]> git.openfabrics.org - compat-rdma/linux-4.8.git/commitdiff
Added QED driver
authorVladimir Sokolovsky <vlad@mellanox.com>
Fri, 5 May 2017 15:40:14 +0000 (18:40 +0300)
committerVladimir Sokolovsky <vlad@mellanox.com>
Fri, 5 May 2017 15:40:14 +0000 (18:40 +0300)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
87 files changed:
drivers/net/ethernet/qlogic/Kconfig [new file with mode: 0644]
drivers/net/ethernet/qlogic/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_cxt.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_cxt.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dcbx.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dcbx.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dev.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dev_api.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hsi.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hw.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hw.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_ops.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_ops.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_int.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_int.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_l2.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_l2.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_mcp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_mcp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_selftest.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_selftest.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_spq.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sriov.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sriov.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_vf.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_vf.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_dcbnl.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qla3xxx.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qla3xxx.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/qlge.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/qlge_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlge/qlge_mpi.c [new file with mode: 0644]
include/linux/qed/common_hsi.h [new file with mode: 0644]
include/linux/qed/eth_common.h [new file with mode: 0644]
include/linux/qed/iscsi_common.h [new file with mode: 0644]
include/linux/qed/qed_chain.h [new file with mode: 0644]
include/linux/qed/qed_eth_if.h [new file with mode: 0644]
include/linux/qed/qed_if.h [new file with mode: 0644]
include/linux/qed/qed_iov_if.h [new file with mode: 0644]
include/linux/qed/rdma_common.h [new file with mode: 0644]
include/linux/qed/roce_common.h [new file with mode: 0644]
include/linux/qed/storage_common.h [new file with mode: 0644]
include/linux/qed/tcp_common.h [new file with mode: 0644]

diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
new file mode 100644 (file)
index 0000000..6ba4840
--- /dev/null
@@ -0,0 +1,107 @@
+#
+# QLogic network device configuration
+#
+
+config NET_VENDOR_QLOGIC
+       bool "QLogic devices"
+       default y
+       depends on PCI
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about QLogic cards. If you say Y, you will be asked for
+         your specific card in the following questions.
+
+if NET_VENDOR_QLOGIC
+
+config QLA3XXX
+       tristate "QLogic QLA3XXX Network Driver Support"
+       depends on PCI
+       ---help---
+         This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+         To compile this driver as a module, choose M here: the module
+         will be called qla3xxx.
+
+config QLCNIC
+       tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+       depends on PCI
+       select FW_LOADER
+       ---help---
+         This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+         devices.
+
+config QLCNIC_SRIOV
+       bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+       depends on QLCNIC && PCI_IOV
+       default y
+       ---help---
+         This configuration parameter enables Single Root Input Output
+         Virtualization support for QLE83XX Converged Ethernet devices.
+         This allows for virtual function acceleration in virtualized
+         environments.
+
+config QLCNIC_DCB
+       bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+       depends on QLCNIC && DCB
+       default y
+       ---help---
+         This configuration parameter enables DCB support in QLE83XX
+         and QLE82XX Converged Ethernet devices. This allows for DCB
+         get operations support through rtNetlink interface. Only CEE
+         mode of DCB is supported. PG and PFC values are related only
+         to Tx.
+
+config QLCNIC_HWMON
+       bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+       depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+       default y
+       ---help---
+         This configuration parameter can be used to read the
+         board temperature in Converged Ethernet devices
+         supported by qlcnic.
+
+         This data is available via the hwmon sysfs interface.
+
+config QLGE
+       tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+       depends on PCI
+       ---help---
+         This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+         To compile this driver as a module, choose M here: the module
+         will be called qlge.
+
+config NETXEN_NIC
+       tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+       depends on PCI
+       select FW_LOADER
+       ---help---
+         This enables the support for NetXen's Gigabit Ethernet card.
+
+config QED
+       tristate "QLogic QED 25/40/100Gb core driver"
+       depends on PCI
+       select ZLIB_INFLATE
+       ---help---
+         This enables the support for ...
+
+config QED_SRIOV
+       bool "QLogic QED 25/40/100Gb SR-IOV support"
+       depends on QED && PCI_IOV
+       default y
+       ---help---
+         This configuration parameter enables Single Root Input Output
+         Virtualization support for QED devices.
+         This allows for virtual function acceleration in virtualized
+         environments.
+
+config QEDE
+       tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+       depends on QED
+       ---help---
+         This enables the support for ...
+
+endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
new file mode 100644 (file)
index 0000000..cee90e0
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the QLogic network device drivers.
+#
+
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
+obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
new file mode 100644 (file)
index 0000000..e14e60c
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) 2003 - 2009 NetXen, Inc.
+# Copyright (C) 2009 - QLogic Corporation.
+# All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#                            
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#                                   
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+# 
+# The full GNU General Public License is included in this distribution
+# in the file called "COPYING".
+# 
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+       netxen_nic_ethtool.o netxen_nic_ctx.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
new file mode 100644 (file)
index 0000000..0a5e204
--- /dev/null
@@ -0,0 +1,1889 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include "netxen_nic_hdr.h"
+#include "netxen_nic_hw.h"
+
+#define _NETXEN_NIC_LINUX_MAJOR 4
+#define _NETXEN_NIC_LINUX_MINOR 0
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.82"
+
+#define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
+#define _major(v)      (((v) >> 24) & 0xff)
+#define _minor(v)      (((v) >> 16) & 0xff)
+#define _build(v)      ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ *  7:0  - major
+ * 15:8  - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+       NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define NETXEN_NUM_FLASH_SECTORS (64)
+#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
+#define NETXEN_FLASH_TOTAL_SIZE  (NETXEN_NUM_FLASH_SECTORS \
+                                       * NETXEN_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring)    \
+       (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring)    \
+       (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+       (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring)      \
+       (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring)      \
+       (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_RCV_PRODUCER_OFFSET     0
+#define NETXEN_RCV_PEG_DB_ID           2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
+
+#define ADDR_IN_WINDOW1(off)   \
+       ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+
+#define ADDR_IN_RANGE(addr, low, high) \
+       (((addr) < (high)) && ((addr) >= (low)))
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg) \
+       ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+       pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define DB_NORMALIZE(adapter, off) \
+       (adapter->ahw.db_base + (off))
+
+#define NX_P2_C0               0x24
+#define NX_P2_C1               0x25
+#define NX_P3_A0               0x30
+#define NX_P3_A2               0x30
+#define NX_P3_B0               0x40
+#define NX_P3_B1               0x41
+#define NX_P3_B2               0x42
+#define NX_P3P_A0              0x50
+
+#define NX_IS_REVISION_P2(REVISION)     (REVISION <= NX_P2_C1)
+#define NX_IS_REVISION_P3(REVISION)     (REVISION >= NX_P3_A0)
+#define NX_IS_REVISION_P3P(REVISION)     (REVISION >= NX_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END   0x100000
+
+#define SECOND_PAGE_GROUP_START        0x6000000
+#define SECOND_PAGE_GROUP_END  0x68BC000
+
+#define THIRD_PAGE_GROUP_START 0x70E4000
+#define THIRD_PAGE_GROUP_END   0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE  FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE  THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define P2_MAX_MTU                     (8000)
+#define P3_MAX_MTU                     (9600)
+#define NX_ETHERMTU                    1500
+#define NX_MAX_ETHERHDR                32 /* This contains some padding */
+
+#define NX_P2_RX_BUF_MAX_LEN           1760
+#define NX_P3_RX_BUF_MAX_LEN           (NX_MAX_ETHERHDR + NX_ETHERMTU)
+#define NX_P2_RX_JUMBO_BUF_MAX_LEN     (NX_MAX_ETHERHDR + P2_MAX_MTU)
+#define NX_P3_RX_JUMBO_BUF_MAX_LEN     (NX_MAX_ETHERHDR + P3_MAX_MTU)
+#define NX_CT_DEFAULT_RX_BUF_LEN       2048
+#define NX_LRO_BUFFER_EXTRA            2048
+
+#define NX_RX_LRO_BUFFER_LENGTH                (8060)
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT   0x01
+#define TX_TCP_PKT     0x02
+#define TX_UDP_PKT     0x03
+#define TX_IP_PKT      0x04
+#define TX_TCP_LSO     0x05
+#define TX_TCP_LSO6    0x06
+#define TX_IPSEC       0x07
+#define TX_IPSEC_CMD   0x0a
+#define TX_TCPV6_PKT   0x0b
+#define TX_UDPV6_PKT   0x0c
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP      0x10
+#define PEGNET_REQUEST         0x11
+
+#define        MAX_NUM_CARDS           4
+
+#define NETXEN_MAX_FRAGS_PER_TX        14
+#define MAX_TSO_HEADER_DESC    2
+#define MGMT_CMD_DESC_RESV     4
+#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+                                                       + MGMT_CMD_DESC_RESV)
+#define NX_MAX_TX_TIMEOUTS     2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START          0xff00
+#define PHAN_INITIALIZE_FAILED         0xffff
+#define PHAN_INITIALIZE_COMPLETE       0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK    0xf00f
+
+#define NUM_RCV_DESC_RINGS     3
+#define NUM_STS_DESC_RINGS     4
+
+#define RCV_RING_NORMAL        0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO   2
+
+#define MIN_CMD_DESCRIPTORS            64
+#define MIN_RCV_DESCRIPTORS            64
+#define MIN_JUMBO_DESCRIPTORS          32
+
+#define MAX_CMD_DESCRIPTORS            1024
+#define MAX_RCV_DESCRIPTORS_1G         4096
+#define MAX_RCV_DESCRIPTORS_10G                8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G   512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G  1024
+#define MAX_LRO_RCV_DESCRIPTORS                8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G     2048
+#define DEFAULT_RCV_DESCRIPTORS_10G    4096
+
+#define NETXEN_CTX_SIGNATURE   0xdee0
+#define NETXEN_CTX_SIGNATURE_V2        0x0002dee0
+#define NETXEN_CTX_RESET       0xbad0
+#define NETXEN_CTX_D3_RESET    0xacc0
+#define NETXEN_RCV_PRODUCER(ringid)    (ringid)
+
+#define PHAN_PEG_RCV_INITIALIZED       0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE  0xff00
+
+#define get_next_index(index, length)  \
+       (((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count)    \
+       (((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+#define NX_MAX_PCI_FUNC                8
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ *     Bit 0-1         : peg_id => 0x2 for tx and 01 for rx
+ *     Bit 2           : priv_id => must be 1
+ *     Bit 3-17        : count => for doorbell
+ *     Bit 18-27       : ctx_id => Context id
+ *     Bit 28-31       : opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define netxen_set_msg_peg_id(config_word, val)        \
+       ((config_word) &= ~3, (config_word) |= val & 3)
+#define netxen_set_msg_privid(config_word)     \
+       ((config_word) |= 1 << 2)
+#define netxen_set_msg_count(config_word, val) \
+       ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
+#define netxen_set_msg_ctxid(config_word, val) \
+       ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
+#define netxen_set_msg_opcode(config_word, val)        \
+       ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
+
+struct netxen_rcv_ring {
+       __le64 addr;
+       __le32 size;
+       __le32 rsrvd;
+};
+
+struct netxen_sts_ring {
+       __le64 addr;
+       __le32 size;
+       __le16 msi_index;
+       __le16 rsvd;
+} ;
+
+struct netxen_ring_ctx {
+
+       /* one command ring */
+       __le64 cmd_consumer_offset;
+       __le64 cmd_ring_addr;
+       __le32 cmd_ring_size;
+       __le32 rsrvd;
+
+       /* three receive rings */
+       struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
+
+       __le64 sts_ring_addr;
+       __le32 sts_ring_size;
+
+       __le32 ctx_id;
+
+       __le64 rsrvd_2[3];
+       __le32 sts_ring_count;
+       __le32 rsrvd_3;
+       struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED 0x01
+#define FLAGS_LSO_ENABLED      0x02
+#define FLAGS_IPSEC_SA_ADD     0x04
+#define FLAGS_IPSEC_SA_DELETE  0x08
+#define FLAGS_VLAN_TAGGED      0x10
+#define FLAGS_VLAN_OOB         0x40
+
+#define netxen_set_tx_vlan_tci(cmd_desc, v)    \
+       (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+
+#define netxen_set_cmd_desc_port(cmd_desc, var)        \
+       ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define netxen_set_cmd_desc_ctxid(cmd_desc, var)       \
+       ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define netxen_set_tx_port(_desc, _port) \
+       (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
+
+#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
+       (_desc)->flags_opcode = \
+       cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
+
+#define netxen_set_tx_frags_len(_desc, _frags, _len) \
+       (_desc)->nfrags__length = \
+       cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
+
+struct cmd_desc_type0 {
+       u8 tcp_hdr_offset;      /* For LSO only */
+       u8 ip_hdr_offset;       /* For LSO only */
+       __le16 flags_opcode;    /* 15:13 unused, 12:7 opcode, 6:0 flags */
+       __le32 nfrags__length;  /* 31:8 total len, 7:0 frag count */
+
+       __le64 addr_buffer2;
+
+       __le16 reference_handle;
+       __le16 mss;
+       u8 port_ctxid;          /* 7:4 ctxid 3:0 port */
+       u8 total_hdr_length;    /* LSO only : MAC+IP+TCP Hdr size */
+       __le16 conn_id;         /* IPSec offoad only */
+
+       __le64 addr_buffer3;
+       __le64 addr_buffer1;
+
+       __le16 buffer_length[4];
+
+       __le64 addr_buffer4;
+
+       __le32 reserved2;
+       __le16 reserved;
+       __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
+struct rcv_desc {
+       __le16 reference_handle;
+       __le16 reserved;
+       __le32 buffer_length;   /* allocated buffer length (usually 2K) */
+       __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define NETXEN_NIC_SYN_OFFLOAD  0x03
+#define NETXEN_NIC_RXPKT_DESC  0x04
+#define NETXEN_OLD_RXPKT_DESC  0x3f
+#define NETXEN_NIC_RESPONSE_DESC 0x05
+#define NETXEN_NIC_LRO_DESC    0x12
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM      (1)
+#define STATUS_CKSUM_OK                (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST      (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM   (0x2ULL << 56)
+
+/* Status descriptor:
+   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define netxen_get_sts_port(sts_data)  \
+       ((sts_data) & 0x0F)
+#define netxen_get_sts_status(sts_data)        \
+       (((sts_data) >> 4) & 0x0F)
+#define netxen_get_sts_type(sts_data)  \
+       (((sts_data) >> 8) & 0x0F)
+#define netxen_get_sts_totallength(sts_data)   \
+       (((sts_data) >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(sts_data)     \
+       (((sts_data) >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(sts_data)  \
+       (((sts_data) >> 44) & 0x0F)
+#define netxen_get_sts_pkt_offset(sts_data)    \
+       (((sts_data) >> 48) & 0x1F)
+#define netxen_get_sts_desc_cnt(sts_data)      \
+       (((sts_data) >> 53) & 0x7)
+#define netxen_get_sts_opcode(sts_data)        \
+       (((sts_data) >> 58) & 0x03F)
+
+#define netxen_get_lro_sts_refhandle(sts_data)         \
+       ((sts_data) & 0x0FFFF)
+#define netxen_get_lro_sts_length(sts_data)    \
+       (((sts_data) >> 16) & 0x0FFFF)
+#define netxen_get_lro_sts_l2_hdr_offset(sts_data)     \
+       (((sts_data) >> 32) & 0x0FF)
+#define netxen_get_lro_sts_l4_hdr_offset(sts_data)     \
+       (((sts_data) >> 40) & 0x0FF)
+#define netxen_get_lro_sts_timestamp(sts_data) \
+       (((sts_data) >> 48) & 0x1)
+#define netxen_get_lro_sts_type(sts_data)      \
+       (((sts_data) >> 49) & 0x7)
+#define netxen_get_lro_sts_push_flag(sts_data)         \
+       (((sts_data) >> 52) & 0x1)
+#define netxen_get_lro_sts_seq_number(sts_data)                \
+       ((sts_data) & 0x0FFFFFFFF)
+#define netxen_get_lro_sts_mss(sts_data1)              \
+       ((sts_data1 >> 32) & 0x0FFFF)
+
+
+struct status_desc {
+       __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_DIR_SECT_PRODUCT_TBL    0x0
+#define NX_UNI_DIR_SECT_BOOTLD         0x6
+#define NX_UNI_DIR_SECT_FW             0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF            10
+#define NX_UNI_FLAGS_OFF               11
+#define NX_UNI_BIOS_VERSION_OFF        12
+#define NX_UNI_BOOTLD_IDX_OFF          27
+#define NX_UNI_FIRMWARE_IDX_OFF        29
+
+struct uni_table_desc{
+       uint32_t        findex;
+       uint32_t        num_entries;
+       uint32_t        entry_size;
+       uint32_t        reserved[5];
+};
+
+struct uni_data_desc{
+       uint32_t        findex;
+       uint32_t        size;
+       uint32_t        reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
+/* The version of the main data structure */
+#define        NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define        NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+#define NETXEN_BRDTYPE_P1_BD           0x0000
+#define NETXEN_BRDTYPE_P1_SB           0x0001
+#define NETXEN_BRDTYPE_P1_SMAX         0x0002
+#define NETXEN_BRDTYPE_P1_SOCK         0x0003
+
+#define NETXEN_BRDTYPE_P2_SOCK_31      0x0008
+#define NETXEN_BRDTYPE_P2_SOCK_35      0x0009
+#define NETXEN_BRDTYPE_P2_SB35_4G      0x000a
+#define NETXEN_BRDTYPE_P2_SB31_10G     0x000b
+#define NETXEN_BRDTYPE_P2_SB31_2G      0x000c
+
+#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ                0x000d
+#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ                0x000e
+#define NETXEN_BRDTYPE_P2_SB31_10G_CX4         0x000f
+
+#define NETXEN_BRDTYPE_P3_REF_QG       0x0021
+#define NETXEN_BRDTYPE_P3_HMEZ         0x0022
+#define NETXEN_BRDTYPE_P3_10G_CX4_LP   0x0023
+#define NETXEN_BRDTYPE_P3_4_GB         0x0024
+#define NETXEN_BRDTYPE_P3_IMEZ         0x0025
+#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
+#define NETXEN_BRDTYPE_P3_XG_LOM       0x0028
+#define NETXEN_BRDTYPE_P3_4_GB_MM      0x0029
+#define NETXEN_BRDTYPE_P3_10G_SFP_CT   0x002a
+#define NETXEN_BRDTYPE_P3_10G_SFP_QT   0x002b
+#define NETXEN_BRDTYPE_P3_10G_CX4      0x0031
+#define NETXEN_BRDTYPE_P3_10G_XFP      0x0032
+#define NETXEN_BRDTYPE_P3_10G_TP       0x0080
+
+/* Flash memory map */
+#define NETXEN_CRBINIT_START   0       /* crbinit section */
+#define NETXEN_BRDCFG_START    0x4000  /* board config */
+#define NETXEN_INITCODE_START  0x6000  /* pegtune code */
+#define NETXEN_BOOTLD_START    0x10000 /* bootld */
+#define NETXEN_IMAGE_START     0x43000 /* compressed image */
+#define NETXEN_SECONDARY_START 0x200000        /* backup images */
+#define NETXEN_PXE_START       0x3E0000        /* PXE boot rom */
+#define NETXEN_USER_START      0x3E8000        /* Firmware info */
+#define NETXEN_FIXED_START     0x3F0000        /* backup of crbinit */
+#define NETXEN_USER_START_OLD  NETXEN_PXE_START /* very old flash */
+
+#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START)
+#define NX_FW_VERSION_OFFSET   (NETXEN_USER_START+0x408)
+#define NX_FW_SIZE_OFFSET      (NETXEN_USER_START+0x40c)
+#define NX_FW_MAC_ADDR_OFFSET  (NETXEN_USER_START+0x418)
+#define NX_FW_SERIAL_NUM_OFFSET        (NETXEN_USER_START+0x81c)
+#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c)
+
+#define NX_HDR_VERSION_OFFSET  (NETXEN_BRDCFG_START)
+#define NX_BRDTYPE_OFFSET      (NETXEN_BRDCFG_START+0x8)
+#define NX_FW_MAGIC_OFFSET     (NETXEN_BRDCFG_START+0x128)
+
+#define NX_FW_MIN_SIZE         (0x3fffff)
+#define NX_P2_MN_ROMIMAGE      0
+#define NX_P3_CT_ROMIMAGE      1
+#define NX_P3_MN_ROMIMAGE      2
+#define NX_UNIFIED_ROMIMAGE    3
+#define NX_FLASH_ROMIMAGE      4
+#define NX_UNKNOWN_ROMIMAGE    0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME         "nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME         "nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME         "nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME       "phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME         "flash"
+
+extern char netxen_nic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE      (64)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+       u64 dma;
+       u64 length;
+};
+
+struct netxen_recv_crb {
+       u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+       u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+       u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/*    Following defines are for the state of the buffers    */
+#define        NETXEN_BUFFER_FREE      0
+#define        NETXEN_BUFFER_BUSY      1
+
+/*
+ * There will be one netxen_buffer per skb packet.    These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+       struct sk_buff *skb;
+       struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+       u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+       struct list_head list;
+       struct sk_buff *skb;
+       u64 dma;
+       u16 ref_handle;
+       u16 state;
+};
+
+/* Board types */
+#define        NETXEN_NIC_GBE  0x01
+#define        NETXEN_NIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+       void __iomem *pci_base0;
+       void __iomem *pci_base1;
+       void __iomem *pci_base2;
+       void __iomem *db_base;
+       void __iomem *ocm_win_crb;
+
+       unsigned long db_len;
+       unsigned long pci_len0;
+
+       u32 ocm_win;
+       u32 crb_win;
+
+       rwlock_t crb_lock;
+       spinlock_t mem_lock;
+
+       u8 cut_through;
+       u8 revision_id;
+       u8 pci_func;
+       u8 linkup;
+       u16 port_type;
+       u16 board_type;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE    64      /* With FCS */
+#define ETHERNET_FCS_SIZE              4
+
+struct netxen_adapter_stats {
+       u64  xmitcalled;
+       u64  xmitfinished;
+       u64  rxdropped;
+       u64  txdropped;
+       u64  csummed;
+       u64  rx_pkts;
+       u64  lro_pkts;
+       u64  rxbytes;
+       u64  txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct nx_host_rds_ring {
+       u32 producer;
+       u32 num_desc;
+       u32 dma_size;
+       u32 skb_size;
+       u32 flags;
+       void __iomem *crb_rcv_producer;
+       struct rcv_desc *desc_head;
+       struct netxen_rx_buffer *rx_buf_arr;
+       struct list_head free_list;
+       spinlock_t lock;
+       dma_addr_t phys_addr;
+};
+
+struct nx_host_sds_ring {
+       u32 consumer;
+       u32 num_desc;
+       void __iomem *crb_sts_consumer;
+       void __iomem *crb_intr_mask;
+
+       struct status_desc *desc_head;
+       struct netxen_adapter *adapter;
+       struct napi_struct napi;
+       struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+       int irq;
+
+       dma_addr_t phys_addr;
+       char name[IFNAMSIZ+4];
+};
+
+struct nx_host_tx_ring {
+       u32 producer;
+       __le32 *hw_consumer;
+       u32 sw_consumer;
+       void __iomem *crb_cmd_producer;
+       void __iomem *crb_cmd_consumer;
+       u32 num_desc;
+
+       struct netdev_queue *txq;
+
+       struct netxen_cmd_buffer *cmd_buf_arr;
+       struct cmd_desc_type0 *desc_head;
+       dma_addr_t phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+       u32 state;
+       u16 context_id;
+       u16 virt_port;
+
+       struct nx_host_rds_ring *rds_rings;
+       struct nx_host_sds_ring *sds_rings;
+
+       struct netxen_ring_ctx *hwctx;
+       dma_addr_t phys_addr;
+};
+
+struct _cdrp_cmd {
+       u32 cmd;
+       u32 arg1;
+       u32 arg2;
+       u32 arg3;
+};
+
+struct netxen_cmd_args {
+       struct _cdrp_cmd req;
+       struct _cdrp_cmd rsp;
+};
+
+/* New HW context creation */
+
+#define NX_OS_CRB_RETRY_COUNT  4000
+#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
+       (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define NX_CDRP_CLEAR          0x00000000
+#define NX_CDRP_CMD_BIT                0x80000000
+
+/*
+ * All responses must have the NX_CDRP_CMD_BIT cleared
+ * in the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_RSP(rsp)  (rsp)
+#define NX_CDRP_IS_RSP(rsp)    (((rsp) & NX_CDRP_CMD_BIT) == 0)
+
+#define NX_CDRP_RSP_OK         0x00000001
+#define NX_CDRP_RSP_FAIL       0x00000002
+#define NX_CDRP_RSP_TIMEOUT    0x00000003
+
+/*
+ * All commands must have the NX_CDRP_CMD_BIT set in
+ * the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_CMD(cmd)  (NX_CDRP_CMD_BIT | (cmd))
+#define NX_CDRP_IS_CMD(cmd)    (((cmd) & NX_CDRP_CMD_BIT) != 0)
+
+#define NX_CDRP_CMD_SUBMIT_CAPABILITIES     0x00000001
+#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX    0x00000002
+#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX    0x00000003
+#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX  0x00000004
+#define NX_CDRP_CMD_READ_MAX_RX_CTX         0x00000005
+#define NX_CDRP_CMD_READ_MAX_TX_CTX         0x00000006
+#define NX_CDRP_CMD_CREATE_RX_CTX           0x00000007
+#define NX_CDRP_CMD_DESTROY_RX_CTX          0x00000008
+#define NX_CDRP_CMD_CREATE_TX_CTX           0x00000009
+#define NX_CDRP_CMD_DESTROY_TX_CTX          0x0000000a
+#define NX_CDRP_CMD_SETUP_STATISTICS        0x0000000e
+#define NX_CDRP_CMD_GET_STATISTICS          0x0000000f
+#define NX_CDRP_CMD_DELETE_STATISTICS       0x00000010
+#define NX_CDRP_CMD_SET_MTU                 0x00000012
+#define NX_CDRP_CMD_READ_PHY                   0x00000013
+#define NX_CDRP_CMD_WRITE_PHY                  0x00000014
+#define NX_CDRP_CMD_READ_HW_REG                        0x00000015
+#define NX_CDRP_CMD_GET_FLOW_CTL               0x00000016
+#define NX_CDRP_CMD_SET_FLOW_CTL               0x00000017
+#define NX_CDRP_CMD_READ_MAX_MTU               0x00000018
+#define NX_CDRP_CMD_READ_MAX_LRO               0x00000019
+#define NX_CDRP_CMD_CONFIGURE_TOE              0x0000001a
+#define NX_CDRP_CMD_FUNC_ATTRIB                        0x0000001b
+#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS       0x0000001c
+#define NX_CDRP_CMD_GET_LIC_CAPABILITIES       0x0000001d
+#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD     0x0000001e
+#define NX_CDRP_CMD_CONFIG_GBE_PORT            0x0000001f
+#define NX_CDRP_CMD_MAX                                0x00000020
+
+#define NX_RCODE_SUCCESS               0
+#define NX_RCODE_NO_HOST_MEM           1
+#define NX_RCODE_NO_HOST_RESOURCE      2
+#define NX_RCODE_NO_CARD_CRB           3
+#define NX_RCODE_NO_CARD_MEM           4
+#define NX_RCODE_NO_CARD_RESOURCE      5
+#define NX_RCODE_INVALID_ARGS          6
+#define NX_RCODE_INVALID_ACTION                7
+#define NX_RCODE_INVALID_STATE         8
+#define NX_RCODE_NOT_SUPPORTED         9
+#define NX_RCODE_NOT_PERMITTED         10
+#define NX_RCODE_NOT_READY             11
+#define NX_RCODE_DOES_NOT_EXIST                12
+#define NX_RCODE_ALREADY_EXISTS                13
+#define NX_RCODE_BAD_SIGNATURE         14
+#define NX_RCODE_CMD_NOT_IMPL          15
+#define NX_RCODE_CMD_INVALID           16
+#define NX_RCODE_TIMEOUT               17
+#define NX_RCODE_CMD_FAILED            18
+#define NX_RCODE_MAX_EXCEEDED          19
+#define NX_RCODE_MAX                   20
+
+#define NX_DESTROY_CTX_RESET           0
+#define NX_DESTROY_CTX_D3_RESET                1
+#define NX_DESTROY_CTX_MAX             2
+
+/*
+ * Capabilities
+ */
+#define NX_CAP_BIT(class, bit)         (1 << bit)
+#define NX_CAP0_LEGACY_CONTEXT         NX_CAP_BIT(0, 0)
+#define NX_CAP0_MULTI_CONTEXT          NX_CAP_BIT(0, 1)
+#define NX_CAP0_LEGACY_MN              NX_CAP_BIT(0, 2)
+#define NX_CAP0_LEGACY_MS              NX_CAP_BIT(0, 3)
+#define NX_CAP0_CUT_THROUGH            NX_CAP_BIT(0, 4)
+#define NX_CAP0_LRO                    NX_CAP_BIT(0, 5)
+#define NX_CAP0_LSO                    NX_CAP_BIT(0, 6)
+#define NX_CAP0_JUMBO_CONTIGUOUS       NX_CAP_BIT(0, 7)
+#define NX_CAP0_LRO_CONTIGUOUS         NX_CAP_BIT(0, 8)
+#define NX_CAP0_HW_LRO                 NX_CAP_BIT(0, 10)
+#define NX_CAP0_HW_LRO_MSS             NX_CAP_BIT(0, 21)
+
+/*
+ * Context state
+ */
+#define NX_HOST_CTX_STATE_FREED                0
+#define NX_HOST_CTX_STATE_ALLOCATED    1
+#define NX_HOST_CTX_STATE_ACTIVE       2
+#define NX_HOST_CTX_STATE_DISABLED     3
+#define NX_HOST_CTX_STATE_QUIESCED     4
+#define NX_HOST_CTX_STATE_MAX          5
+
+/*
+ * Rx context
+ */
+
+typedef struct {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le32 ring_size;               /* Ring entries */
+       __le16 msi_index;
+       __le16 rsvd;            /* Padding */
+} nx_hostrq_sds_ring_t;
+
+typedef struct {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le64 buff_size;               /* Packet buffer size */
+       __le32 ring_size;               /* Ring entries */
+       __le32 ring_kind;               /* Class of ring */
+} nx_hostrq_rds_ring_t;
+
+typedef struct {
+       __le64 host_rsp_dma_addr;       /* Response dma'd here */
+       __le32 capabilities[4]; /* Flag bit vector */
+       __le32 host_int_crb_mode;       /* Interrupt crb usage */
+       __le32 host_rds_crb_mode;       /* RDS crb usage */
+       /* These ring offsets are relative to data[0] below */
+       __le32 rds_ring_offset; /* Offset to RDS config */
+       __le32 sds_ring_offset; /* Offset to SDS config */
+       __le16 num_rds_rings;   /* Count of RDS rings */
+       __le16 num_sds_rings;   /* Count of SDS rings */
+       __le16 rsvd1;           /* Padding */
+       __le16 rsvd2;           /* Padding */
+       u8  reserved[128];      /* reserve space for future expansion*/
+       /* MUST BE 64-bit aligned.
+          The following is packed:
+          - N hostrq_rds_rings
+          - N hostrq_sds_rings */
+       char data[0];
+} nx_hostrq_rx_ctx_t;
+
+typedef struct {
+       __le32 host_producer_crb;       /* Crb to use */
+       __le32 rsvd1;           /* Padding */
+} nx_cardrsp_rds_ring_t;
+
+typedef struct {
+       __le32 host_consumer_crb;       /* Crb to use */
+       __le32 interrupt_crb;   /* Crb to use */
+} nx_cardrsp_sds_ring_t;
+
+typedef struct {
+       /* These ring offsets are relative to data[0] below */
+       __le32 rds_ring_offset; /* Offset to RDS config */
+       __le32 sds_ring_offset; /* Offset to SDS config */
+       __le32 host_ctx_state;  /* Starting State */
+       __le32 num_fn_per_port; /* How many PCI fn share the port */
+       __le16 num_rds_rings;   /* Count of RDS rings */
+       __le16 num_sds_rings;   /* Count of SDS rings */
+       __le16 context_id;              /* Handle for context */
+       u8  phys_port;          /* Physical id of port */
+       u8  virt_port;          /* Virtual/Logical id of port */
+       u8  reserved[128];      /* save space for future expansion */
+       /*  MUST BE 64-bit aligned.
+          The following is packed:
+          - N cardrsp_rds_rings
+          - N cardrs_sds_rings */
+       char data[0];
+} nx_cardrsp_rx_ctx_t;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings)      \
+       (sizeof(HOSTRQ_RX) +                                    \
+       (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) +            \
+       (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings)    \
+       (sizeof(CARDRSP_RX) +                                   \
+       (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) +           \
+       (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
+
+/*
+ * Tx context
+ */
+
+typedef struct {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le32 ring_size;               /* Ring entries */
+       __le32 rsvd;            /* Padding */
+} nx_hostrq_cds_ring_t;
+
+typedef struct {
+       __le64 host_rsp_dma_addr;       /* Response dma'd here */
+       __le64 cmd_cons_dma_addr;       /*  */
+       __le64 dummy_dma_addr;  /*  */
+       __le32 capabilities[4]; /* Flag bit vector */
+       __le32 host_int_crb_mode;       /* Interrupt crb usage */
+       __le32 rsvd1;           /* Padding */
+       __le16 rsvd2;           /* Padding */
+       __le16 interrupt_ctl;
+       __le16 msi_index;
+       __le16 rsvd3;           /* Padding */
+       nx_hostrq_cds_ring_t cds_ring;  /* Desc of cds ring */
+       u8  reserved[128];      /* future expansion */
+} nx_hostrq_tx_ctx_t;
+
+typedef struct {
+       __le32 host_producer_crb;       /* Crb to use */
+       __le32 interrupt_crb;   /* Crb to use */
+} nx_cardrsp_cds_ring_t;
+
+typedef struct {
+       __le32 host_ctx_state;  /* Starting state */
+       __le16 context_id;              /* Handle for context */
+       u8  phys_port;          /* Physical id of port */
+       u8  virt_port;          /* Virtual/Logical id of port */
+       nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
+       u8  reserved[128];      /* future expansion */
+} nx_cardrsp_tx_ctx_t;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX)    (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX)  (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define NX_HOST_RDS_CRB_MODE_UNIQUE    0
+#define NX_HOST_RDS_CRB_MODE_SHARED    1
+#define NX_HOST_RDS_CRB_MODE_CUSTOM    2
+#define NX_HOST_RDS_CRB_MODE_MAX       3
+
+#define NX_HOST_INT_CRB_MODE_UNIQUE    0
+#define NX_HOST_INT_CRB_MODE_SHARED    1
+#define NX_HOST_INT_CRB_MODE_NORX      2
+#define NX_HOST_INT_CRB_MODE_NOTX      3
+#define NX_HOST_INT_CRB_MODE_NORXTX    4
+
+
+/* MAC */
+
+#define MC_COUNT_P2    16
+#define MC_COUNT_P3    38
+
+#define NETXEN_MAC_NOOP        0
+#define NETXEN_MAC_ADD 1
+#define NETXEN_MAC_DEL 2
+
+typedef struct nx_mac_list_s {
+       struct list_head list;
+       uint8_t mac_addr[ETH_ALEN+2];
+} nx_mac_list_t;
+
+struct nx_ip_list {
+       struct list_head list;
+       __be32 ip_addr;
+       bool master;
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US        3
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS        256
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS        64
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US        4
+
+#define NETXEN_NIC_INTR_DEFAULT                        0x04
+
+typedef union {
+       struct {
+               uint16_t        rx_packets;
+               uint16_t        rx_time_us;
+               uint16_t        tx_packets;
+               uint16_t        tx_time_us;
+       } data;
+       uint64_t                word;
+} nx_nic_intr_coalesce_data_t;
+
+typedef struct {
+       uint16_t                        stats_time_us;
+       uint16_t                        rate_sample_time;
+       uint16_t                        flags;
+       uint16_t                        rsvd_1;
+       uint32_t                        low_threshold;
+       uint32_t                        high_threshold;
+       nx_nic_intr_coalesce_data_t     normal;
+       nx_nic_intr_coalesce_data_t     low;
+       nx_nic_intr_coalesce_data_t     high;
+       nx_nic_intr_coalesce_data_t     irq;
+} nx_nic_intr_coalesce_t;
+
+#define NX_HOST_REQUEST                0x13
+#define NX_NIC_REQUEST         0x14
+
+#define NX_MAC_EVENT           0x1
+
+#define NX_IP_UP               2
+#define NX_IP_DOWN             3
+
+/*
+ * Driver --> Firmware
+ */
+#define NX_NIC_H2C_OPCODE_START                                0
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS                   1
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL               2
+#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE         3
+#define NX_NIC_H2C_OPCODE_CONFIG_LED                   4
+#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS           5
+#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC                        6
+#define NX_NIC_H2C_OPCODE_LRO_REQUEST                  7
+#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS               8
+#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST          9
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST           10
+#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU                        11
+#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE    12
+#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST     13
+#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST      14
+#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST       15
+#define NX_NIC_H2C_OPCODE_GET_NET_STATS                        16
+#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V             17
+#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR                        18
+#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK              19
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE              20
+#define NX_NIC_H2C_OPCODE_GET_LINKEVENT                        21
+#define NX_NIC_C2C_OPCODE                              22
+#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING               23
+#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO                        24
+#define NX_NIC_H2C_OPCODE_LAST                         25
+
+/*
+ * Firmware --> Driver
+ */
+
+#define NX_NIC_C2H_OPCODE_START                                128
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE          129
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE      130
+#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE          131
+#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE  132
+#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE       133
+#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE          134
+#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE     135
+#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS               136
+#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY       137
+#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY                138
+#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE       140
+#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE       141
+#define NX_NIC_C2H_OPCODE_LAST                         142
+
+#define VPORT_MISS_MODE_DROP           0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL     1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI   2 /* accept unmatched multicast */
+
+#define NX_NIC_LRO_REQUEST_FIRST               0
+#define NX_NIC_LRO_REQUEST_ADD_FLOW            1
+#define NX_NIC_LRO_REQUEST_DELETE_FLOW         2
+#define NX_NIC_LRO_REQUEST_TIMER               3
+#define NX_NIC_LRO_REQUEST_CLEANUP             4
+#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED  5
+#define NX_TOE_LRO_REQUEST_ADD_FLOW            6
+#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE   7
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW         8
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE        9
+#define NX_TOE_LRO_REQUEST_TIMER               10
+#define NX_NIC_LRO_REQUEST_LAST                        11
+
+#define NX_FW_CAPABILITY_LINK_NOTIFICATION     (1 << 5)
+#define NX_FW_CAPABILITY_SWITCHING             (1 << 6)
+#define NX_FW_CAPABILITY_PEXQ                  (1 << 7)
+#define NX_FW_CAPABILITY_BDG                   (1 << 8)
+#define NX_FW_CAPABILITY_FVLANTX               (1 << 9)
+#define NX_FW_CAPABILITY_HW_LRO                        (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG          (1 << 11)
+#define NX_FW_CAPABILITY_MORE_CAPS             (1 << 31)
+#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG     (1 << 2)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT                   1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN               2
+#define LINKEVENT_MODULE_OPTICAL_SRLR                  3
+#define LINKEVENT_MODULE_OPTICAL_LRM                   4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G                        5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE      6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN   7
+#define LINKEVENT_MODULE_TWINAX                                8
+
+#define LINKSPEED_10GBPS       10000
+#define LINKSPEED_1GBPS                1000
+#define LINKSPEED_100MBPS      100
+#define LINKSPEED_10MBPS       10
+
+#define LINKSPEED_ENCODED_10MBPS       0
+#define LINKSPEED_ENCODED_100MBPS      1
+#define LINKSPEED_ENCODED_1GBPS                2
+
+#define LINKEVENT_AUTONEG_DISABLED     0
+#define LINKEVENT_AUTONEG_ENABLED      1
+
+#define LINKEVENT_HALF_DUPLEX          0
+#define LINKEVENT_FULL_DUPLEX          1
+
+#define LINKEVENT_LINKSPEED_MBPS       0
+#define LINKEVENT_LINKSPEED_ENCODED    1
+
+#define AUTO_FW_RESET_ENABLED  0xEF10AF12
+#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
+
+/* firmware response header:
+ *     63:58 - message type
+ *     57:56 - owner
+ *     55:53 - desc count
+ *     52:48 - reserved
+ *     47:40 - completion id
+ *     39:32 - opcode
+ *     31:16 - error code
+ *     15:00 - reserved
+ */
+#define netxen_get_nic_msgtype(msg_hdr)        \
+       ((msg_hdr >> 58) & 0x3F)
+#define netxen_get_nic_msg_compid(msg_hdr)     \
+       ((msg_hdr >> 40) & 0xFF)
+#define netxen_get_nic_msg_opcode(msg_hdr)     \
+       ((msg_hdr >> 32) & 0xFF)
+#define netxen_get_nic_msg_errcode(msg_hdr)    \
+       ((msg_hdr >> 16) & 0xFFFF)
+
+typedef struct {
+       union {
+               struct {
+                       u64 hdr;
+                       u64 body[7];
+               };
+               u64 words[8];
+       };
+} nx_fw_msg_t;
+
+typedef struct {
+       __le64 qhdr;
+       __le64 req_hdr;
+       __le64 words[6];
+} nx_nic_req_t;
+
+typedef struct {
+       u8 op;
+       u8 tag;
+       u8 mac_addr[6];
+} nx_mac_req_t;
+
+#define MAX_PENDING_DESC_BLOCK_SIZE    64
+
+#define NETXEN_NIC_MSI_ENABLED         0x02
+#define NETXEN_NIC_MSIX_ENABLED                0x04
+#define NETXEN_NIC_LRO_ENABLED         0x08
+#define NETXEN_NIC_LRO_DISABLED                0x00
+#define NETXEN_NIC_BRIDGE_ENABLED       0X10
+#define NETXEN_NIC_DIAG_ENABLED                0x20
+#define NETXEN_FW_RESET_OWNER           0x40
+#define NETXEN_FW_MSS_CAP              0x80
+#define NETXEN_IS_MSI_FAMILY(adapter) \
+       ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER       NUM_STS_DESC_RINGS
+#define NETXEN_MSIX_TBL_SPACE          8192
+#define NETXEN_PCI_REG_MSIX_TBL                0x44
+
+#define NETXEN_DB_MAPSIZE_BYTES        0x1000
+
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+#define __NX_FW_ATTACHED               0
+#define __NX_DEV_UP                    1
+#define __NX_RESETTING                 2
+
+/* Mini Coredump FW supported version */
+#define NX_MD_SUPPORT_MAJOR            4
+#define NX_MD_SUPPORT_MINOR            0
+#define NX_MD_SUPPORT_SUBVERSION       579
+
+#define LSW(x)  ((uint16_t)(x))
+#define LSD(x)  ((uint32_t)((uint64_t)(x)))
+#define MSD(x)  ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+/* Mini Coredump mask level */
+#define        NX_DUMP_MASK_MIN        0x03
+#define        NX_DUMP_MASK_DEF        0x1f
+#define        NX_DUMP_MASK_MAX        0xff
+
+/* Mini Coredump CDRP commands */
+#define NX_CDRP_CMD_TEMP_SIZE           0x0000002f
+#define NX_CDRP_CMD_GET_TEMP_HDR        0x00000030
+
+
+#define NX_DUMP_STATE_ARRAY_LEN                16
+#define NX_DUMP_CAP_SIZE_ARRAY_LEN     8
+
+/* Mini Coredump sysfs entries flags*/
+#define NX_FORCE_FW_DUMP_KEY           0xdeadfeed
+#define NX_ENABLE_FW_DUMP               0xaddfeed
+#define NX_DISABLE_FW_DUMP              0xbadfeed
+#define NX_FORCE_FW_RESET               0xdeaddead
+
+
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
+/* Flash read/write address */
+#define NX_FW_DUMP_REG1         0x00130060
+#define NX_FW_DUMP_REG2         0x001e0000
+#define NX_FLASH_SEM2_LK        0x0013C010
+#define NX_FLASH_SEM2_ULK       0x0013C014
+#define NX_FLASH_LOCK_ID        0x001B2100
+#define FLASH_ROM_WINDOW        0x42110030
+#define FLASH_ROM_DATA          0x42150000
+
+/* Mini Coredump register read/write routine */
+#define NX_RD_DUMP_REG(addr, bar0, data) do {                   \
+       writel((addr & 0xFFFF0000), (void __iomem *) (bar0 +            \
+               NX_FW_DUMP_REG1));                                      \
+       readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1));               \
+       *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 +        \
+               LSW(addr)));                                            \
+} while (0)
+
+#define NX_WR_DUMP_REG(addr, bar0, data) do {                   \
+       writel((addr & 0xFFFF0000), (void __iomem *) (bar0 +            \
+               NX_FW_DUMP_REG1));                                      \
+       readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1));                \
+       writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\
+       readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));  \
+} while (0)
+
+
+/*
+Entry Type Defines
+*/
+
+#define RDNOP  0
+#define RDCRB  1
+#define RDMUX  2
+#define QUEUE  3
+#define BOARD  4
+#define RDSRE  5
+#define RDOCM  6
+#define PREGS  7
+#define L1DTG  8
+#define L1ITG  9
+#define CACHE  10
+
+#define L1DAT  11
+#define L1INS  12
+#define RDSTK  13
+#define RDCON  14
+
+#define L2DTG  21
+#define L2ITG  22
+#define L2DAT  23
+#define L2INS  24
+#define RDOC3  25
+
+#define MEMBK  32
+
+#define RDROM  71
+#define RDMEM  72
+#define RDMN   73
+
+#define INFOR  81
+#define CNTRL  98
+
+#define TLHDR  99
+#define RDEND  255
+
+#define PRIMQ  103
+#define SQG2Q  104
+#define SQG3Q  105
+
+/*
+* Opcodes for Control Entries.
+* These Flags are bit fields.
+*/
+#define NX_DUMP_WCRB           0x01
+#define NX_DUMP_RWCRB          0x02
+#define NX_DUMP_ANDCRB         0x04
+#define NX_DUMP_ORCRB          0x08
+#define NX_DUMP_POLLCRB                0x10
+#define NX_DUMP_RD_SAVE                0x20
+#define NX_DUMP_WRT_SAVED      0x40
+#define NX_DUMP_MOD_SAVE_ST    0x80
+
+/* Driver Flags */
+#define NX_DUMP_SKIP           0x80    /*  driver skipped this entry  */
+#define NX_DUMP_SIZE_ERR 0x40  /*entry size vs capture size mismatch*/
+
+#define NX_PCI_READ_32(ADDR)                   readl((ADDR))
+#define NX_PCI_WRITE_32(DATA, ADDR)    writel(DATA, (ADDR))
+
+
+
+struct netxen_minidump {
+       u32 pos;                        /* position in the dump buffer */
+       u8  fw_supports_md;             /* FW supports Mini cordump */
+       u8  has_valid_dump;             /* indicates valid dump */
+       u8  md_capture_mask;            /* driver capture mask */
+       u8  md_enabled;                 /* Turn Mini Coredump on/off */
+       u32 md_dump_size;               /* Total FW Mini Coredump size */
+       u32 md_capture_size;            /* FW dump capture size */
+       u32 md_template_size;           /* FW template size */
+       u32 md_template_ver;            /* FW template version */
+       u64 md_timestamp;               /* FW Mini dump timestamp */
+       void *md_template;              /* FW template will be stored */
+       void *md_capture_buff;          /* FW dump will be stored */
+};
+
+
+
+struct netxen_minidump_template_hdr {
+       u32 entry_type;
+       u32 first_entry_offset;
+       u32 size_of_template;
+       u32 capture_mask;
+       u32 num_of_entries;
+       u32 version;
+       u32 driver_timestamp;
+       u32 checksum;
+       u32 driver_capture_mask;
+       u32 driver_info_word2;
+       u32 driver_info_word3;
+       u32 driver_info_word4;
+       u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
+       u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
+       u32 rsvd[0];
+};
+
+/* Common Entry Header:  Common to All Entry Types */
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+
+struct netxen_common_entry_hdr {
+       u32 entry_type;
+       u32 entry_size;
+       u32 entry_capture_size;
+       union {
+               struct {
+                       u8 entry_capture_mask;
+                       u8 entry_code;
+                       u8 driver_code;
+                       u8 driver_flags;
+               };
+               u32 entry_ctrl_word;
+       };
+};
+
+
+/* Generic Entry Including Header */
+struct netxen_minidump_entry {
+       struct netxen_common_entry_hdr hdr;
+       u32 entry_data00;
+       u32 entry_data01;
+       u32 entry_data02;
+       u32 entry_data03;
+       u32 entry_data04;
+       u32 entry_data05;
+       u32 entry_data06;
+       u32 entry_data07;
+};
+
+/* Read ROM Header */
+struct netxen_minidump_entry_rdrom {
+       struct netxen_common_entry_hdr h;
+       union {
+               struct {
+                       u32 select_addr_reg;
+               };
+               u32 rsvd_0;
+       };
+       union {
+               struct {
+                       u8 addr_stride;
+                       u8 addr_cnt;
+                       u16 data_size;
+               };
+               u32 rsvd_1;
+       };
+       union {
+               struct {
+                       u32 op_count;
+               };
+               u32 rsvd_2;
+       };
+       union {
+               struct {
+                       u32 read_addr_reg;
+               };
+               u32 rsvd_3;
+       };
+       union {
+               struct {
+                       u32 write_mask;
+               };
+               u32 rsvd_4;
+       };
+       union {
+               struct {
+                       u32 read_mask;
+               };
+               u32 rsvd_5;
+       };
+       u32 read_addr;
+       u32 read_data_size;
+};
+
+
+/* Read CRB and Control Entry Header */
+struct netxen_minidump_entry_crb {
+       struct netxen_common_entry_hdr h;
+       u32 addr;
+       union {
+               struct {
+                       u8 addr_stride;
+                       u8 state_index_a;
+                       u16 poll_timeout;
+                       };
+               u32 addr_cntrl;
+       };
+       u32 data_size;
+       u32 op_count;
+       union {
+               struct {
+                       u8 opcode;
+                       u8 state_index_v;
+                       u8 shl;
+                       u8 shr;
+                       };
+               u32 control_value;
+       };
+       u32 value_1;
+       u32 value_2;
+       u32 value_3;
+};
+
+/* Read Memory and MN Header */
+struct netxen_minidump_entry_rdmem {
+       struct netxen_common_entry_hdr h;
+       union {
+               struct {
+                       u32 select_addr_reg;
+               };
+               u32 rsvd_0;
+       };
+       union {
+               struct {
+                       u8 addr_stride;
+                       u8 addr_cnt;
+                       u16 data_size;
+               };
+               u32 rsvd_1;
+       };
+       union {
+               struct {
+                       u32 op_count;
+               };
+               u32 rsvd_2;
+       };
+       union {
+               struct {
+                       u32 read_addr_reg;
+               };
+               u32 rsvd_3;
+       };
+       union {
+               struct {
+                       u32 cntrl_addr_reg;
+               };
+               u32 rsvd_4;
+       };
+       union {
+               struct {
+                       u8 wr_byte0;
+                       u8 wr_byte1;
+                       u8 poll_mask;
+                       u8 poll_cnt;
+               };
+               u32 rsvd_5;
+       };
+       u32 read_addr;
+       u32 read_data_size;
+};
+
+/* Read Cache L1 and L2 Header */
+struct netxen_minidump_entry_cache {
+       struct netxen_common_entry_hdr h;
+       u32 tag_reg_addr;
+       union {
+               struct {
+                       u16 tag_value_stride;
+                       u16 init_tag_value;
+               };
+               u32 select_addr_cntrl;
+       };
+       u32 data_size;
+       u32 op_count;
+       u32 control_addr;
+       union {
+               struct {
+                       u16 write_value;
+                       u8 poll_mask;
+                       u8 poll_wait;
+               };
+               u32 control_value;
+       };
+       u32 read_addr;
+       union {
+               struct {
+                       u8 read_addr_stride;
+                       u8 read_addr_cnt;
+                       u16 rsvd_1;
+               };
+               u32 read_addr_cntrl;
+       };
+};
+
+/* Read OCM Header */
+struct netxen_minidump_entry_rdocm {
+       struct netxen_common_entry_hdr h;
+       u32 rsvd_0;
+       union {
+               struct {
+                       u32 rsvd_1;
+               };
+               u32 select_addr_cntrl;
+       };
+       u32 data_size;
+       u32 op_count;
+       u32 rsvd_2;
+       u32 rsvd_3;
+       u32 read_addr;
+       union {
+               struct {
+                       u32 read_addr_stride;
+               };
+               u32 read_addr_cntrl;
+       };
+};
+
+/* Read MUX Header */
+struct netxen_minidump_entry_mux {
+       struct netxen_common_entry_hdr h;
+       u32 select_addr;
+       union {
+               struct {
+                       u32 rsvd_0;
+               };
+               u32 select_addr_cntrl;
+       };
+       u32 data_size;
+       u32 op_count;
+       u32 select_value;
+       u32 select_value_stride;
+       u32 read_addr;
+       u32 rsvd_1;
+};
+
+/* Read Queue Header */
+struct netxen_minidump_entry_queue {
+       struct netxen_common_entry_hdr h;
+       u32 select_addr;
+       union {
+               struct {
+                       u16 queue_id_stride;
+                       u16 rsvd_0;
+               };
+               u32 select_addr_cntrl;
+       };
+       u32 data_size;
+       u32 op_count;
+       u32 rsvd_1;
+       u32 rsvd_2;
+       u32 read_addr;
+       union {
+               struct {
+                       u8 read_addr_stride;
+                       u8 read_addr_cnt;
+                       u16 rsvd_3;
+               };
+               u32 read_addr_cntrl;
+       };
+};
+
+struct netxen_dummy_dma {
+       void *addr;
+       dma_addr_t phys_addr;
+};
+
+struct netxen_adapter {
+       struct netxen_hardware_context ahw;
+
+       struct net_device *netdev;
+       struct pci_dev *pdev;
+       struct list_head mac_list;
+       struct list_head ip_list;
+
+       spinlock_t tx_clean_lock;
+
+       u16 num_txd;
+       u16 num_rxd;
+       u16 num_jumbo_rxd;
+       u16 num_lro_rxd;
+
+       u8 max_rds_rings;
+       u8 max_sds_rings;
+       u8 driver_mismatch;
+       u8 msix_supported;
+       u8 __pad;
+       u8 pci_using_dac;
+       u8 portnum;
+       u8 physical_port;
+
+       u8 mc_enabled;
+       u8 max_mc_count;
+       u8 rss_supported;
+       u8 link_changed;
+       u8 fw_wait_cnt;
+       u8 fw_fail_cnt;
+       u8 tx_timeo_cnt;
+       u8 need_fw_reset;
+
+       u8 has_link_events;
+       u8 fw_type;
+       u16 tx_context_id;
+       u16 mtu;
+       u16 is_up;
+
+       u16 link_speed;
+       u16 link_duplex;
+       u16 link_autoneg;
+       u16 module_type;
+
+       u32 capabilities;
+       u32 flags;
+       u32 irq;
+       u32 temp;
+
+       u32 int_vec_bit;
+       u32 heartbit;
+
+       u8 mac_addr[ETH_ALEN];
+
+       struct netxen_adapter_stats stats;
+
+       struct netxen_recv_context recv_ctx;
+       struct nx_host_tx_ring *tx_ring;
+
+       int (*macaddr_set) (struct netxen_adapter *, u8 *);
+       int (*set_mtu) (struct netxen_adapter *, int);
+       int (*set_promisc) (struct netxen_adapter *, u32);
+       void (*set_multi) (struct net_device *);
+       int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *);
+       int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val);
+       int (*init_port) (struct netxen_adapter *, int);
+       int (*stop_port) (struct netxen_adapter *);
+
+       u32 (*crb_read)(struct netxen_adapter *, ulong);
+       int (*crb_write)(struct netxen_adapter *, ulong, u32);
+
+       int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
+       int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
+
+       int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
+
+       u32 (*io_read)(struct netxen_adapter *, void __iomem *);
+       void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
+
+       void __iomem    *tgt_mask_reg;
+       void __iomem    *pci_int_reg;
+       void __iomem    *tgt_status_reg;
+       void __iomem    *crb_int_state_reg;
+       void __iomem    *isr_int_vec;
+
+       struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+       struct netxen_dummy_dma dummy_dma;
+
+       struct delayed_work fw_work;
+
+       struct work_struct  tx_timeout_task;
+
+       nx_nic_intr_coalesce_t coal;
+
+       unsigned long state;
+       __le32 file_prd_off;    /*File fw product offset*/
+       u32 fw_version;
+       const struct firmware *fw;
+       struct netxen_minidump mdump;   /* mdump ptr */
+       int fw_mdump_rdy;       /* for mdump ready */
+};
+
+int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
+int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val);
+
+#define NXRD32(adapter, off) \
+       (adapter->crb_read(adapter, off))
+#define NXWR32(adapter, off, val) \
+       (adapter->crb_write(adapter, off, val))
+#define NXRDIO(adapter, addr) \
+       (adapter->io_read(adapter, addr))
+#define NXWRIO(adapter, addr, val) \
+       (adapter->io_write(adapter, addr, val))
+
+int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32);
+void netxen_pcie_sem_unlock(struct netxen_adapter *, int);
+
+#define netxen_rom_lock(a)     \
+       netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID)
+#define netxen_rom_unlock(a)   \
+       netxen_pcie_sem_unlock((a), 2)
+#define netxen_phy_lock(a)     \
+       netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID)
+#define netxen_phy_unlock(a)   \
+       netxen_pcie_sem_unlock((a), 3)
+#define netxen_api_lock(a)     \
+       netxen_pcie_sem_lock((a), 5, 0)
+#define netxen_api_unlock(a)   \
+       netxen_pcie_sem_unlock((a), 5)
+#define netxen_sw_lock(a)      \
+       netxen_pcie_sem_lock((a), 6, 0)
+#define netxen_sw_unlock(a)    \
+       netxen_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a)        \
+       netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a)      \
+       netxen_pcie_sem_unlock((a), 7)
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_wol_supported(struct netxen_adapter *adapter);
+
+/* Functions from netxen_nic_init.c */
+int netxen_init_dummy_dma(struct netxen_adapter *adapter);
+void netxen_free_dummy_dma(struct netxen_adapter *adapter);
+
+int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter);
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
+void netxen_request_firmware(struct netxen_adapter *adapter);
+void netxen_release_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter);
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+                               u8 *bytes, size_t size);
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
+                               u8 *bytes, size_t size);
+int netxen_flash_unlock(struct netxen_adapter *adapter);
+int netxen_backup_crbinit(struct netxen_adapter *adapter);
+int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
+int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+void netxen_halt_pegs(struct netxen_adapter *adapter);
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
+void netxen_free_sw_resources(struct netxen_adapter *adapter);
+
+void netxen_setup_hwops(struct netxen_adapter *adapter);
+void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32);
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter);
+void netxen_release_tx_buffers(struct netxen_adapter *adapter);
+
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+void netxen_watchdog_task(struct work_struct *work);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+               struct nx_host_rds_ring *rds_ring);
+int netxen_process_cmd_ring(struct netxen_adapter *adapter);
+int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
+int netxen_config_rss(struct netxen_adapter *adapter, int enable);
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd);
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
+
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+                               u32 speed, u32 duplex, u32 autoneg);
+int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
+int netxen_setup_minidump(struct netxen_adapter *adapter);
+void netxen_dump_fw(struct netxen_adapter *adapter);
+void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+               struct nx_host_tx_ring *tx_ring);
+
+/* Functions from netxen_nic_main.c */
+int netxen_nic_reset_context(struct netxen_adapter *);
+
+int nx_dev_request_reset(struct netxen_adapter *adapter);
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 32
+struct netxen_brdinfo {
+       int brdtype;    /* type of board */
+       long ports;             /* max no of physical ports */
+       char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+struct netxen_dimm_cfg {
+       u8 presence;
+       u8 mem_type;
+       u8 dimm_type;
+       u32 size;
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+       {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+       {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+       {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+       {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+       {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+       {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+       {NETXEN_BRDTYPE_P3_REF_QG,  4, "Reference Quad Gig "},
+       {NETXEN_BRDTYPE_P3_HMEZ,    2, "Dual XGb HMEZ"},
+       {NETXEN_BRDTYPE_P3_10G_CX4_LP,   2, "Dual XGb CX4 LP"},
+       {NETXEN_BRDTYPE_P3_4_GB,    4, "Quad Gig LP"},
+       {NETXEN_BRDTYPE_P3_IMEZ,    2, "Dual XGb IMEZ"},
+       {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+       {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+       {NETXEN_BRDTYPE_P3_XG_LOM,  2, "Dual XGb LOM"},
+       {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
+       {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
+       {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
+       {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+       {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
+
+static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
+{
+       int i, found = 0;
+       for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+               if (netxen_boards[i].brdtype == type) {
+                       strcpy(name, netxen_boards[i].short_name);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found) {
+               strcpy(name, "Unknown");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
+{
+       smp_mb();
+       return find_diff_among(tx_ring->producer,
+                       tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+
+extern const struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif                         /* __NETXEN_NIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
new file mode 100644 (file)
index 0000000..b8d5270
--- /dev/null
@@ -0,0 +1,941 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+
+#define NXHAL_VERSION  1
+
+static u32
+netxen_poll_rsp(struct netxen_adapter *adapter)
+{
+       u32 rsp = NX_CDRP_RSP_OK;
+       int     timeout = 0;
+
+       do {
+               /* give atleast 1ms for firmware to respond */
+               msleep(1);
+
+               if (++timeout > NX_OS_CRB_RETRY_COUNT)
+                       return NX_CDRP_RSP_TIMEOUT;
+
+               rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
+       } while (!NX_CDRP_IS_RSP(rsp));
+
+       return rsp;
+}
+
+static u32
+netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
+{
+       u32 rsp;
+       u32 signature = 0;
+       u32 rcode = NX_RCODE_SUCCESS;
+
+       signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
+                                               NXHAL_VERSION);
+       /* Acquire semaphore before accessing CRB */
+       if (netxen_api_lock(adapter))
+               return NX_RCODE_TIMEOUT;
+
+       NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
+
+       NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
+
+       NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
+
+       NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
+
+       NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
+
+       rsp = netxen_poll_rsp(adapter);
+
+       if (rsp == NX_CDRP_RSP_TIMEOUT) {
+               printk(KERN_ERR "%s: card response timeout.\n",
+                               netxen_nic_driver_name);
+
+               rcode = NX_RCODE_TIMEOUT;
+       } else if (rsp == NX_CDRP_RSP_FAIL) {
+               rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+
+               printk(KERN_ERR "%s: failed card response code:0x%x\n",
+                               netxen_nic_driver_name, rcode);
+       } else if (rsp == NX_CDRP_RSP_OK) {
+               cmd->rsp.cmd = NX_RCODE_SUCCESS;
+               if (cmd->rsp.arg2)
+                       cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
+               if (cmd->rsp.arg3)
+                       cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
+       }
+
+       if (cmd->rsp.arg1)
+               cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+       /* Release semaphore */
+       netxen_api_unlock(adapter);
+
+       return rcode;
+}
+
+static int
+netxen_get_minidump_template_size(struct netxen_adapter *adapter)
+{
+       struct netxen_cmd_args cmd;
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
+       memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+       netxen_issue_cmd(adapter, &cmd);
+       if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
+               dev_info(&adapter->pdev->dev,
+                       "Can't get template size %d\n", cmd.rsp.cmd);
+               return -EIO;
+       }
+       adapter->mdump.md_template_size = cmd.rsp.arg2;
+       adapter->mdump.md_template_ver = cmd.rsp.arg3;
+       return 0;
+}
+
+static int
+netxen_get_minidump_template(struct netxen_adapter *adapter)
+{
+       dma_addr_t md_template_addr;
+       void *addr;
+       u32 size;
+       struct netxen_cmd_args cmd;
+       size = adapter->mdump.md_template_size;
+
+       if (size == 0) {
+               dev_err(&adapter->pdev->dev, "Can not capture Minidump "
+                       "template. Invalid template size.\n");
+               return NX_RCODE_INVALID_ARGS;
+       }
+
+       addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+       if (!addr) {
+               dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
+               return -ENOMEM;
+       }
+
+       memset(&cmd, 0, sizeof(cmd));
+       memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+       cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
+       cmd.req.arg1 = LSD(md_template_addr);
+       cmd.req.arg2 = MSD(md_template_addr);
+       cmd.req.arg3 |= size;
+       netxen_issue_cmd(adapter, &cmd);
+
+       if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
+               memcpy(adapter->mdump.md_template, addr, size);
+       } else {
+               dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
+                       "err_code : %d, requested_size : %d, actual_size : %d\n ",
+                       cmd.rsp.cmd, size, cmd.rsp.arg2);
+       }
+       pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+       return 0;
+}
+
+static u32
+netxen_check_template_checksum(struct netxen_adapter *adapter)
+{
+       u64 sum =  0 ;
+       u32 *buff = adapter->mdump.md_template;
+       int count =  adapter->mdump.md_template_size/sizeof(uint32_t) ;
+
+       while (count-- > 0)
+               sum += *buff++ ;
+       while (sum >> 32)
+               sum = (sum & 0xFFFFFFFF) +  (sum >> 32) ;
+
+       return ~sum;
+}
+
+int
+netxen_setup_minidump(struct netxen_adapter *adapter)
+{
+       int err = 0, i;
+       u32 *template, *tmp_buf;
+       struct netxen_minidump_template_hdr *hdr;
+       err = netxen_get_minidump_template_size(adapter);
+       if (err) {
+               adapter->mdump.fw_supports_md = 0;
+               if ((err == NX_RCODE_CMD_INVALID) ||
+                       (err == NX_RCODE_CMD_NOT_IMPL)) {
+                       dev_info(&adapter->pdev->dev,
+                               "Flashed firmware version does not support minidump, "
+                               "minimum version required is [ %u.%u.%u ].\n ",
+                               NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
+                               NX_MD_SUPPORT_SUBVERSION);
+               }
+               return err;
+       }
+
+       if (!adapter->mdump.md_template_size) {
+               dev_err(&adapter->pdev->dev, "Error : Invalid template size "
+               ",should be non-zero.\n");
+               return -EIO;
+       }
+       adapter->mdump.md_template =
+               kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
+
+       if (!adapter->mdump.md_template)
+               return -ENOMEM;
+
+       err = netxen_get_minidump_template(adapter);
+       if (err) {
+               if (err == NX_RCODE_CMD_NOT_IMPL)
+                       adapter->mdump.fw_supports_md = 0;
+               goto free_template;
+       }
+
+       if (netxen_check_template_checksum(adapter)) {
+               dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
+               err = -EIO;
+               goto free_template;
+       }
+
+       adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
+       tmp_buf = (u32 *) adapter->mdump.md_template;
+       template = (u32 *) adapter->mdump.md_template;
+       for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
+               *template++ = __le32_to_cpu(*tmp_buf++);
+       hdr = (struct netxen_minidump_template_hdr *)
+                               adapter->mdump.md_template;
+       adapter->mdump.md_capture_buff = NULL;
+       adapter->mdump.fw_supports_md = 1;
+       adapter->mdump.md_enabled = 0;
+
+       return err;
+
+free_template:
+       kfree(adapter->mdump.md_template);
+       adapter->mdump.md_template = NULL;
+       return err;
+}
+
+
+int
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
+{
+       u32 rcode = NX_RCODE_SUCCESS;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
+       cmd.req.arg1 = recv_ctx->context_id;
+       cmd.req.arg2 = mtu;
+       cmd.req.arg3 = 0;
+
+       if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+               netxen_issue_cmd(adapter, &cmd);
+
+       if (rcode != NX_RCODE_SUCCESS)
+               return -EIO;
+
+       return 0;
+}
+
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+                       u32 speed, u32 duplex, u32 autoneg)
+{
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
+       cmd.req.arg1 = speed;
+       cmd.req.arg2 = duplex;
+       cmd.req.arg3 = autoneg;
+       return netxen_issue_cmd(adapter, &cmd);
+}
+
+static int
+nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
+{
+       void *addr;
+       nx_hostrq_rx_ctx_t *prq;
+       nx_cardrsp_rx_ctx_t *prsp;
+       nx_hostrq_rds_ring_t *prq_rds;
+       nx_hostrq_sds_ring_t *prq_sds;
+       nx_cardrsp_rds_ring_t *prsp_rds;
+       nx_cardrsp_sds_ring_t *prsp_sds;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_cmd_args cmd;
+
+       dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+       u64 phys_addr;
+
+       int i, nrds_rings, nsds_rings;
+       size_t rq_size, rsp_size;
+       u32 cap, reg, val;
+
+       int err;
+
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       nrds_rings = adapter->max_rds_rings;
+       nsds_rings = adapter->max_sds_rings;
+
+       rq_size =
+               SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
+       rsp_size =
+               SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
+
+       addr = pci_alloc_consistent(adapter->pdev,
+                               rq_size, &hostrq_phys_addr);
+       if (addr == NULL)
+               return -ENOMEM;
+       prq = addr;
+
+       addr = pci_alloc_consistent(adapter->pdev,
+                       rsp_size, &cardrsp_phys_addr);
+       if (addr == NULL) {
+               err = -ENOMEM;
+               goto out_free_rq;
+       }
+       prsp = addr;
+
+       prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+       cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
+       cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
+
+       if (adapter->flags & NETXEN_FW_MSS_CAP)
+               cap |= NX_CAP0_HW_LRO_MSS;
+
+       prq->capabilities[0] = cpu_to_le32(cap);
+       prq->host_int_crb_mode =
+               cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+       prq->host_rds_crb_mode =
+               cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
+
+       prq->num_rds_rings = cpu_to_le16(nrds_rings);
+       prq->num_sds_rings = cpu_to_le16(nsds_rings);
+       prq->rds_ring_offset = cpu_to_le32(0);
+
+       val = le32_to_cpu(prq->rds_ring_offset) +
+               (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+       prq->sds_ring_offset = cpu_to_le32(val);
+
+       prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
+                       le32_to_cpu(prq->rds_ring_offset));
+
+       for (i = 0; i < nrds_rings; i++) {
+
+               rds_ring = &recv_ctx->rds_rings[i];
+
+               prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+               prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+               prq_rds[i].ring_kind = cpu_to_le32(i);
+               prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+       }
+
+       prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
+                       le32_to_cpu(prq->sds_ring_offset));
+
+       for (i = 0; i < nsds_rings; i++) {
+
+               sds_ring = &recv_ctx->sds_rings[i];
+
+               prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+               prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+               prq_sds[i].msi_index = cpu_to_le16(i);
+       }
+
+       phys_addr = hostrq_phys_addr;
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = (u32)(phys_addr >> 32);
+       cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
+       cmd.req.arg3 = rq_size;
+       cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
+       err = netxen_issue_cmd(adapter, &cmd);
+       if (err) {
+               printk(KERN_WARNING
+                       "Failed to create rx ctx in firmware%d\n", err);
+               goto out_free_rsp;
+       }
+
+
+       prsp_rds = ((nx_cardrsp_rds_ring_t *)
+                        &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+       for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+               rds_ring = &recv_ctx->rds_rings[i];
+
+               reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+               rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
+                               NETXEN_NIC_REG(reg - 0x200));
+       }
+
+       prsp_sds = ((nx_cardrsp_sds_ring_t *)
+                       &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+       for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+               sds_ring = &recv_ctx->sds_rings[i];
+
+               reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+               sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
+                               NETXEN_NIC_REG(reg - 0x200));
+
+               reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+               sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
+                               NETXEN_NIC_REG(reg - 0x200));
+       }
+
+       recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+       recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+       recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+       pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+       pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+       return err;
+}
+
+static void
+nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = recv_ctx->context_id;
+       cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+       cmd.req.arg3 = 0;
+       cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
+
+       if (netxen_issue_cmd(adapter, &cmd)) {
+               printk(KERN_WARNING
+                       "%s: Failed to destroy rx ctx in firmware\n",
+                       netxen_nic_driver_name);
+       }
+}
+
+static int
+nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
+{
+       nx_hostrq_tx_ctx_t      *prq;
+       nx_hostrq_cds_ring_t    *prq_cds;
+       nx_cardrsp_tx_ctx_t     *prsp;
+       void    *rq_addr, *rsp_addr;
+       size_t  rq_size, rsp_size;
+       u32     temp;
+       int     err = 0;
+       u64     offset, phys_addr;
+       dma_addr_t      rq_phys_addr, rsp_phys_addr;
+       struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct netxen_cmd_args cmd;
+
+       rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
+       rq_addr = pci_alloc_consistent(adapter->pdev,
+               rq_size, &rq_phys_addr);
+       if (!rq_addr)
+               return -ENOMEM;
+
+       rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
+       rsp_addr = pci_alloc_consistent(adapter->pdev,
+               rsp_size, &rsp_phys_addr);
+       if (!rsp_addr) {
+               err = -ENOMEM;
+               goto out_free_rq;
+       }
+
+       memset(rq_addr, 0, rq_size);
+       prq = rq_addr;
+
+       memset(rsp_addr, 0, rsp_size);
+       prsp = rsp_addr;
+
+       prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+       temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
+       prq->capabilities[0] = cpu_to_le32(temp);
+
+       prq->host_int_crb_mode =
+               cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+
+       prq->interrupt_ctl = 0;
+       prq->msi_index = 0;
+
+       prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+       offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
+       prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+       prq_cds = &prq->cds_ring;
+
+       prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+       prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+       phys_addr = rq_phys_addr;
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = (u32)(phys_addr >> 32);
+       cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+       cmd.req.arg3 = rq_size;
+       cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
+       err = netxen_issue_cmd(adapter, &cmd);
+
+       if (err == NX_RCODE_SUCCESS) {
+               temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+               tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+                               NETXEN_NIC_REG(temp - 0x200));
+#if 0
+               adapter->tx_state =
+                       le32_to_cpu(prsp->host_ctx_state);
+#endif
+               adapter->tx_context_id =
+                       le16_to_cpu(prsp->context_id);
+       } else {
+               printk(KERN_WARNING
+                       "Failed to create tx ctx in firmware%d\n", err);
+               err = -EIO;
+       }
+
+       pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+       pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+       return err;
+}
+
+static void
+nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
+{
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = adapter->tx_context_id;
+       cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+       cmd.req.arg3 = 0;
+       cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
+       if (netxen_issue_cmd(adapter, &cmd)) {
+               printk(KERN_WARNING
+                       "%s: Failed to destroy tx ctx in firmware\n",
+                       netxen_nic_driver_name);
+       }
+}
+
+int
+nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
+{
+       u32 rcode;
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = reg;
+       cmd.req.arg2 = 0;
+       cmd.req.arg3 = 0;
+       cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
+       cmd.rsp.arg1 = 1;
+       rcode = netxen_issue_cmd(adapter, &cmd);
+       if (rcode != NX_RCODE_SUCCESS)
+               return -EIO;
+
+       if (val == NULL)
+               return -EIO;
+
+       *val = cmd.rsp.arg1;
+       return 0;
+}
+
+int
+nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
+{
+       u32 rcode;
+       struct netxen_cmd_args cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.req.arg1 = reg;
+       cmd.req.arg2 = val;
+       cmd.req.arg3 = 0;
+       cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
+       rcode = netxen_issue_cmd(adapter, &cmd);
+       if (rcode != NX_RCODE_SUCCESS)
+               return -EIO;
+
+       return 0;
+}
+
+static u64 ctx_addr_sig_regs[][3] = {
+       {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+       {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+       {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+       {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#define CRB_CTX_ADDR_REG_LO(FUNC_ID)   (ctx_addr_sig_regs[FUNC_ID][0])
+#define CRB_CTX_ADDR_REG_HI(FUNC_ID)   (ctx_addr_sig_regs[FUNC_ID][2])
+#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
+
+#define lower32(x)     ((u32)((x) & 0xffffffff))
+#define upper32(x)     ((u32)(((u64)(x) >> 32) & 0xffffffff))
+
+static struct netxen_recv_crb recv_crb_registers[] = {
+       /* Instance 0 */
+       {
+               /* crb_rcv_producer: */
+               {
+                       NETXEN_NIC_REG(0x100),
+                       /* Jumbo frames */
+                       NETXEN_NIC_REG(0x110),
+                       /* LRO */
+                       NETXEN_NIC_REG(0x120)
+               },
+               /* crb_sts_consumer: */
+               {
+                       NETXEN_NIC_REG(0x138),
+                       NETXEN_NIC_REG_2(0x000),
+                       NETXEN_NIC_REG_2(0x004),
+                       NETXEN_NIC_REG_2(0x008),
+               },
+               /* sw_int_mask */
+               {
+                       CRB_SW_INT_MASK_0,
+                       NETXEN_NIC_REG_2(0x044),
+                       NETXEN_NIC_REG_2(0x048),
+                       NETXEN_NIC_REG_2(0x04c),
+               },
+       },
+       /* Instance 1 */
+       {
+               /* crb_rcv_producer: */
+               {
+                       NETXEN_NIC_REG(0x144),
+                       /* Jumbo frames */
+                       NETXEN_NIC_REG(0x154),
+                       /* LRO */
+                       NETXEN_NIC_REG(0x164)
+               },
+               /* crb_sts_consumer: */
+               {
+                       NETXEN_NIC_REG(0x17c),
+                       NETXEN_NIC_REG_2(0x020),
+                       NETXEN_NIC_REG_2(0x024),
+                       NETXEN_NIC_REG_2(0x028),
+               },
+               /* sw_int_mask */
+               {
+                       CRB_SW_INT_MASK_1,
+                       NETXEN_NIC_REG_2(0x064),
+                       NETXEN_NIC_REG_2(0x068),
+                       NETXEN_NIC_REG_2(0x06c),
+               },
+       },
+       /* Instance 2 */
+       {
+               /* crb_rcv_producer: */
+               {
+                       NETXEN_NIC_REG(0x1d8),
+                       /* Jumbo frames */
+                       NETXEN_NIC_REG(0x1f8),
+                       /* LRO */
+                       NETXEN_NIC_REG(0x208)
+               },
+               /* crb_sts_consumer: */
+               {
+                       NETXEN_NIC_REG(0x220),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+               },
+               /* sw_int_mask */
+               {
+                       CRB_SW_INT_MASK_2,
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+               },
+       },
+       /* Instance 3 */
+       {
+               /* crb_rcv_producer: */
+               {
+                       NETXEN_NIC_REG(0x22c),
+                       /* Jumbo frames */
+                       NETXEN_NIC_REG(0x23c),
+                       /* LRO */
+                       NETXEN_NIC_REG(0x24c)
+               },
+               /* crb_sts_consumer: */
+               {
+                       NETXEN_NIC_REG(0x264),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+               },
+               /* sw_int_mask */
+               {
+                       CRB_SW_INT_MASK_3,
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+                       NETXEN_NIC_REG_2(0x03c),
+               },
+       },
+};
+
+static int
+netxen_init_old_ctx(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct nx_host_tx_ring *tx_ring;
+       int ring;
+       int port = adapter->portnum;
+       struct netxen_ring_ctx *hwctx;
+       u32 signature;
+
+       tx_ring = adapter->tx_ring;
+       recv_ctx = &adapter->recv_ctx;
+       hwctx = recv_ctx->hwctx;
+
+       hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
+       hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
+
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+
+               hwctx->rcv_rings[ring].addr =
+                       cpu_to_le64(rds_ring->phys_addr);
+               hwctx->rcv_rings[ring].size =
+                       cpu_to_le32(rds_ring->num_desc);
+       }
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               if (ring == 0) {
+                       hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
+                       hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
+               }
+               hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
+               hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
+               hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
+       }
+       hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
+
+       signature = (adapter->max_sds_rings > 1) ?
+               NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
+
+       NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
+                       lower32(recv_ctx->phys_addr));
+       NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
+                       upper32(recv_ctx->phys_addr));
+       NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+                       signature | port);
+       return 0;
+}
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
+{
+       void *addr;
+       int err = 0;
+       int ring;
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct nx_host_tx_ring *tx_ring;
+
+       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = adapter->netdev;
+       int port = adapter->portnum;
+
+       recv_ctx = &adapter->recv_ctx;
+       tx_ring = adapter->tx_ring;
+
+       addr = pci_alloc_consistent(pdev,
+                       sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+                       &recv_ctx->phys_addr);
+       if (addr == NULL) {
+               dev_err(&pdev->dev, "failed to allocate hw context\n");
+               return -ENOMEM;
+       }
+
+       memset(addr, 0, sizeof(struct netxen_ring_ctx));
+       recv_ctx->hwctx = addr;
+       recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+       recv_ctx->hwctx->cmd_consumer_offset =
+               cpu_to_le64(recv_ctx->phys_addr +
+                       sizeof(struct netxen_ring_ctx));
+       tx_ring->hw_consumer =
+               (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
+
+       /* cmd desc ring */
+       addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+                       &tx_ring->phys_addr);
+
+       if (addr == NULL) {
+               dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
+                               netdev->name);
+               err = -ENOMEM;
+               goto err_out_free;
+       }
+
+       tx_ring->desc_head = addr;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               addr = pci_alloc_consistent(adapter->pdev,
+                               RCV_DESC_RINGSIZE(rds_ring),
+                               &rds_ring->phys_addr);
+               if (addr == NULL) {
+                       dev_err(&pdev->dev,
+                               "%s: failed to allocate rds ring [%d]\n",
+                               netdev->name, ring);
+                       err = -ENOMEM;
+                       goto err_out_free;
+               }
+               rds_ring->desc_head = addr;
+
+               if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+                       rds_ring->crb_rcv_producer =
+                               netxen_get_ioaddr(adapter,
+                       recv_crb_registers[port].crb_rcv_producer[ring]);
+       }
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               addr = pci_alloc_consistent(adapter->pdev,
+                               STATUS_DESC_RINGSIZE(sds_ring),
+                               &sds_ring->phys_addr);
+               if (addr == NULL) {
+                       dev_err(&pdev->dev,
+                               "%s: failed to allocate sds ring [%d]\n",
+                               netdev->name, ring);
+                       err = -ENOMEM;
+                       goto err_out_free;
+               }
+               sds_ring->desc_head = addr;
+
+               if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+                       sds_ring->crb_sts_consumer =
+                               netxen_get_ioaddr(adapter,
+                               recv_crb_registers[port].crb_sts_consumer[ring]);
+
+                       sds_ring->crb_intr_mask =
+                               netxen_get_ioaddr(adapter,
+                               recv_crb_registers[port].sw_int_mask[ring]);
+               }
+       }
+
+
+       if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
+                       goto done;
+               err = nx_fw_cmd_create_rx_ctx(adapter);
+               if (err)
+                       goto err_out_free;
+               err = nx_fw_cmd_create_tx_ctx(adapter);
+               if (err)
+                       goto err_out_free;
+       } else {
+               err = netxen_init_old_ctx(adapter);
+               if (err)
+                       goto err_out_free;
+       }
+
+done:
+       return 0;
+
+err_out_free:
+       netxen_free_hw_resources(adapter);
+       return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct nx_host_tx_ring *tx_ring;
+       int ring;
+
+       int port = adapter->portnum;
+
+       if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
+                       goto done;
+
+               nx_fw_cmd_destroy_rx_ctx(adapter);
+               nx_fw_cmd_destroy_tx_ctx(adapter);
+       } else {
+               netxen_api_lock(adapter);
+               NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+                               NETXEN_CTX_D3_RESET | port);
+               netxen_api_unlock(adapter);
+       }
+
+       /* Allow dma queues to drain after context reset */
+       msleep(20);
+
+done:
+       recv_ctx = &adapter->recv_ctx;
+
+       if (recv_ctx->hwctx != NULL) {
+               pci_free_consistent(adapter->pdev,
+                               sizeof(struct netxen_ring_ctx) +
+                               sizeof(uint32_t),
+                               recv_ctx->hwctx,
+                               recv_ctx->phys_addr);
+               recv_ctx->hwctx = NULL;
+       }
+
+       tx_ring = adapter->tx_ring;
+       if (tx_ring->desc_head != NULL) {
+               pci_free_consistent(adapter->pdev,
+                               TX_DESC_RINGSIZE(tx_ring),
+                               tx_ring->desc_head, tx_ring->phys_addr);
+               tx_ring->desc_head = NULL;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+
+               if (rds_ring->desc_head != NULL) {
+                       pci_free_consistent(adapter->pdev,
+                                       RCV_DESC_RINGSIZE(rds_ring),
+                                       rds_ring->desc_head,
+                                       rds_ring->phys_addr);
+                       rds_ring->desc_head = NULL;
+               }
+       }
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               if (sds_ring->desc_head != NULL) {
+                       pci_free_consistent(adapter->pdev,
+                               STATUS_DESC_RINGSIZE(sds_ring),
+                               sds_ring->desc_head,
+                               sds_ring->phys_addr);
+                       sds_ring->desc_head = NULL;
+               }
+       }
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
new file mode 100644 (file)
index 0000000..f903446
--- /dev/null
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct netxen_nic_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \
+                       offsetof(struct netxen_adapter, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+       {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+       {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+       {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
+       {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+       {"csummed", NETXEN_NIC_STAT(stats.csummed)},
+       {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)},
+       {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)},
+       {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+       {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN   ARRAY_SIZE(netxen_nic_gstrings_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+       "Register_Test_on_offline",
+       "Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN    ARRAY_SIZE(netxen_nic_gstrings_test)
+
+#define NETXEN_NIC_REGS_COUNT 30
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN   1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+       return NETXEN_FLASH_TOTAL_SIZE;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 fw_major = 0;
+       u32 fw_minor = 0;
+       u32 fw_build = 0;
+
+       strlcpy(drvinfo->driver, netxen_nic_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
+       fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+       fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+       fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static int
+netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       int check_sfp_module = 0;
+
+       /* read which mode */
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               ecmd->supported = (SUPPORTED_10baseT_Half |
+                                  SUPPORTED_10baseT_Full |
+                                  SUPPORTED_100baseT_Half |
+                                  SUPPORTED_100baseT_Full |
+                                  SUPPORTED_1000baseT_Half |
+                                  SUPPORTED_1000baseT_Full);
+
+               ecmd->advertising = (ADVERTISED_100baseT_Half |
+                                    ADVERTISED_100baseT_Full |
+                                    ADVERTISED_1000baseT_Half |
+                                    ADVERTISED_1000baseT_Full);
+
+               ecmd->port = PORT_TP;
+
+               ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+               ecmd->duplex = adapter->link_duplex;
+               ecmd->autoneg = adapter->link_autoneg;
+
+       } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+               u32 val;
+
+               val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+               if (val == NETXEN_PORT_MODE_802_3_AP) {
+                       ecmd->supported = SUPPORTED_1000baseT_Full;
+                       ecmd->advertising = ADVERTISED_1000baseT_Full;
+               } else {
+                       ecmd->supported = SUPPORTED_10000baseT_Full;
+                       ecmd->advertising = ADVERTISED_10000baseT_Full;
+               }
+
+               if (netif_running(dev) && adapter->has_link_events) {
+                       ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+                       ecmd->autoneg = adapter->link_autoneg;
+                       ecmd->duplex = adapter->link_duplex;
+                       goto skip;
+               }
+
+               ecmd->port = PORT_TP;
+
+               if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+                       u16 pcifn = adapter->ahw.pci_func;
+
+                       val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+                       ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
+                                             P3_LINK_SPEED_VAL(pcifn, val));
+               } else
+                       ethtool_cmd_speed_set(ecmd, SPEED_10000);
+
+               ecmd->duplex = DUPLEX_FULL;
+               ecmd->autoneg = AUTONEG_DISABLE;
+       } else
+               return -EIO;
+
+skip:
+       ecmd->phy_address = adapter->physical_port;
+       ecmd->transceiver = XCVR_EXTERNAL;
+
+       switch (adapter->ahw.board_type) {
+       case NETXEN_BRDTYPE_P2_SB35_4G:
+       case NETXEN_BRDTYPE_P2_SB31_2G:
+       case NETXEN_BRDTYPE_P3_REF_QG:
+       case NETXEN_BRDTYPE_P3_4_GB:
+       case NETXEN_BRDTYPE_P3_4_GB_MM:
+
+               ecmd->supported |= SUPPORTED_Autoneg;
+               ecmd->advertising |= ADVERTISED_Autoneg;
+       case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+       case NETXEN_BRDTYPE_P3_10G_CX4:
+       case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+       case NETXEN_BRDTYPE_P3_10000_BASE_T:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               ecmd->autoneg = (adapter->ahw.board_type ==
+                                NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+                   (AUTONEG_DISABLE) : (adapter->link_autoneg);
+               break;
+       case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+       case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+       case NETXEN_BRDTYPE_P3_IMEZ:
+       case NETXEN_BRDTYPE_P3_XG_LOM:
+       case NETXEN_BRDTYPE_P3_HMEZ:
+               ecmd->supported |= SUPPORTED_MII;
+               ecmd->advertising |= ADVERTISED_MII;
+               ecmd->port = PORT_MII;
+               ecmd->autoneg = AUTONEG_DISABLE;
+               break;
+       case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+       case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+       case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->supported |= SUPPORTED_TP;
+               check_sfp_module = netif_running(dev) &&
+                       adapter->has_link_events;
+       case NETXEN_BRDTYPE_P2_SB31_10G:
+       case NETXEN_BRDTYPE_P3_10G_XFP:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               ecmd->autoneg = AUTONEG_DISABLE;
+               break;
+       case NETXEN_BRDTYPE_P3_10G_TP:
+               if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+                       ecmd->autoneg = AUTONEG_DISABLE;
+                       ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+                       ecmd->advertising |=
+                               (ADVERTISED_FIBRE | ADVERTISED_TP);
+                       ecmd->port = PORT_FIBRE;
+                       check_sfp_module = netif_running(dev) &&
+                               adapter->has_link_events;
+               } else {
+                       ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+                       ecmd->advertising |=
+                               (ADVERTISED_TP | ADVERTISED_Autoneg);
+                       ecmd->port = PORT_TP;
+               }
+               break;
+       default:
+               printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+                               adapter->ahw.board_type);
+               return -EIO;
+       }
+
+       if (check_sfp_module) {
+               switch (adapter->module_type) {
+               case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+               case LINKEVENT_MODULE_OPTICAL_SRLR:
+               case LINKEVENT_MODULE_OPTICAL_LRM:
+               case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+                       ecmd->port = PORT_FIBRE;
+                       break;
+               case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+               case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+               case LINKEVENT_MODULE_TWINAX:
+                       ecmd->port = PORT_TP;
+                       break;
+               default:
+                       ecmd->port = -1;
+               }
+       }
+
+       if (!netif_running(dev) || !adapter->ahw.linkup) {
+               ecmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+       }
+
+       return 0;
+}
+
+static int
+netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 speed = ethtool_cmd_speed(ecmd);
+       int ret;
+
+       if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+               return -EOPNOTSUPP;
+
+       if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+               return -EOPNOTSUPP;
+
+       ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
+                                    ecmd->autoneg);
+       if (ret == NX_RCODE_NOT_SUPPORTED)
+               return -EOPNOTSUPP;
+       else if (ret)
+               return -EIO;
+
+       adapter->link_speed = speed;
+       adapter->link_duplex = ecmd->duplex;
+       adapter->link_autoneg = ecmd->autoneg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       dev->netdev_ops->ndo_stop(dev);
+       return dev->netdev_ops->ndo_open(dev);
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+       return NETXEN_NIC_REGS_LEN;
+}
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct nx_host_sds_ring *sds_ring;
+       u32 *regs_buff = p;
+       int ring, i = 0;
+       int port = adapter->physical_port;
+
+       memset(p, 0, NETXEN_NIC_REGS_LEN);
+
+       regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+           (adapter->pdev)->device;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return;
+
+       regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+       regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+       regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+       regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+       regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+       regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+       regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+       regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+               regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+               i += 2;
+
+               regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+               regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+       } else {
+               i++;
+
+               regs_buff[i++] = NXRD32(adapter,
+                                       NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+               regs_buff[i++] = NXRD32(adapter,
+                                       NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+               regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+               regs_buff[i++] = NXRDIO(adapter,
+                                adapter->tx_ring->crb_cmd_consumer);
+       }
+
+       regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+       regs_buff[i++] = NXRDIO(adapter,
+                        recv_ctx->rds_rings[0].crb_rcv_producer);
+       regs_buff[i++] = NXRDIO(adapter,
+                        recv_ctx->rds_rings[1].crb_rcv_producer);
+
+       regs_buff[i++] = adapter->max_sds_rings;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &(recv_ctx->sds_rings[ring]);
+               regs_buff[i++] = NXRDIO(adapter,
+                                       sds_ring->crb_sts_consumer);
+       }
+}
+
+static u32 netxen_nic_test_link(struct net_device *dev)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 val, port;
+
+       port = adapter->physical_port;
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               val = NXRD32(adapter, CRB_XG_STATE_P3);
+               val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+               return (val == XG_LINK_UP_P3) ? 0 : 1;
+       } else {
+               val = NXRD32(adapter, CRB_XG_STATE);
+               val = (val >> port*8) & 0xff;
+               return (val == XG_LINK_UP) ? 0 : 1;
+       }
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+                     u8 *bytes)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       int offset;
+       int ret;
+
+       if (eeprom->len == 0)
+               return -EINVAL;
+
+       eeprom->magic = (adapter->pdev)->vendor |
+                       ((adapter->pdev)->device << 16);
+       offset = eeprom->offset;
+
+       ret = netxen_rom_fast_read_words(adapter, offset, bytes,
+                                               eeprom->len);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev,
+               struct ethtool_ringparam *ring)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+
+       ring->rx_pending = adapter->num_rxd;
+       ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+       ring->rx_jumbo_pending += adapter->num_lro_rxd;
+       ring->tx_pending = adapter->num_txd;
+
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+       } else {
+               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+       }
+
+       ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+       u32 num_desc;
+       num_desc = max(val, min);
+       num_desc = min(num_desc, max);
+       num_desc = roundup_pow_of_two(num_desc);
+
+       if (val != num_desc) {
+               printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+                      netxen_nic_driver_name, r_name, num_desc, val);
+       }
+
+       return num_desc;
+}
+
+static int
+netxen_nic_set_ringparam(struct net_device *dev,
+               struct ethtool_ringparam *ring)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+       u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+       u16 num_rxd, num_jumbo_rxd, num_txd;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return -EOPNOTSUPP;
+
+       if (ring->rx_mini_pending)
+               return -EOPNOTSUPP;
+
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+               max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+       }
+
+       num_rxd = netxen_validate_ringparam(ring->rx_pending,
+                       MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+       num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending,
+                       MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+       num_txd = netxen_validate_ringparam(ring->tx_pending,
+                       MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+       if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+                       num_jumbo_rxd == adapter->num_jumbo_rxd)
+               return 0;
+
+       adapter->num_rxd = num_rxd;
+       adapter->num_jumbo_rxd = num_jumbo_rxd;
+       adapter->num_txd = num_txd;
+
+       return netxen_nic_reset_context(adapter);
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+                         struct ethtool_pauseparam *pause)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       __u32 val;
+       int port = adapter->physical_port;
+
+       pause->autoneg = 0;
+
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+                       return;
+               /* get flow control settings */
+               val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+               pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+               val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+               switch (port) {
+               case 0:
+                       pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+                       break;
+               case 1:
+                       pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+                       break;
+               case 2:
+                       pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+                       break;
+               case 3:
+               default:
+                       pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+                       break;
+               }
+       } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+               if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+                       return;
+               pause->rx_pause = 1;
+               val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+               if (port == 0)
+                       pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
+               else
+                       pause->tx_pause = !(netxen_xg_get_xg1_mask(val));
+       } else {
+               printk(KERN_ERR"%s: Unknown board type: %x\n",
+                               netxen_nic_driver_name, adapter->ahw.port_type);
+       }
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+                         struct ethtool_pauseparam *pause)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       __u32 val;
+       int port = adapter->physical_port;
+
+       /* not supported */
+       if (pause->autoneg)
+               return -EINVAL;
+
+       /* read mode */
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+                       return -EIO;
+               /* set flow control */
+               val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+
+               if (pause->rx_pause)
+                       netxen_gb_rx_flowctl(val);
+               else
+                       netxen_gb_unset_rx_flowctl(val);
+
+               NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+                               val);
+               /* set autoneg */
+               val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+               switch (port) {
+               case 0:
+                       if (pause->tx_pause)
+                               netxen_gb_unset_gb0_mask(val);
+                       else
+                               netxen_gb_set_gb0_mask(val);
+                       break;
+               case 1:
+                       if (pause->tx_pause)
+                               netxen_gb_unset_gb1_mask(val);
+                       else
+                               netxen_gb_set_gb1_mask(val);
+                       break;
+               case 2:
+                       if (pause->tx_pause)
+                               netxen_gb_unset_gb2_mask(val);
+                       else
+                               netxen_gb_set_gb2_mask(val);
+                       break;
+               case 3:
+               default:
+                       if (pause->tx_pause)
+                               netxen_gb_unset_gb3_mask(val);
+                       else
+                               netxen_gb_set_gb3_mask(val);
+                       break;
+               }
+               NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
+       } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+               if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+                       return -EIO;
+               val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+               if (port == 0) {
+                       if (pause->tx_pause)
+                               netxen_xg_unset_xg0_mask(val);
+                       else
+                               netxen_xg_set_xg0_mask(val);
+               } else {
+                       if (pause->tx_pause)
+                               netxen_xg_unset_xg1_mask(val);
+                       else
+                               netxen_xg_set_xg1_mask(val);
+               }
+               NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
+       } else {
+               printk(KERN_ERR "%s: Unknown board type: %x\n",
+                               netxen_nic_driver_name,
+                               adapter->ahw.port_type);
+       }
+       return 0;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 data_read, data_written;
+
+       data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
+       if ((data_read & 0xffff) != adapter->pdev->vendor)
+               return 1;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               return 0;
+
+       data_written = (u32)0xa5a5a5a5;
+
+       NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+       data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
+       if (data_written != data_read)
+               return 1;
+
+       return 0;
+}
+
+static int netxen_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_TEST:
+               return NETXEN_NIC_TEST_LEN;
+       case ETH_SS_STATS:
+               return NETXEN_NIC_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+                    u64 *data)
+{
+       memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
+       if ((data[0] = netxen_nic_reg_test(dev)))
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+       /* link test */
+       if ((data[1] = (u64) netxen_nic_test_link(dev)))
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       int index;
+
+       switch (stringset) {
+       case ETH_SS_TEST:
+               memcpy(data, *netxen_nic_gstrings_test,
+                      NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+               break;
+       case ETH_SS_STATS:
+               for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+                       memcpy(data + index * ETH_GSTRING_LEN,
+                              netxen_nic_gstrings_stats[index].stat_string,
+                              ETH_GSTRING_LEN);
+               }
+               break;
+       }
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+                            struct ethtool_stats *stats, u64 *data)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       int index;
+
+       for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+               char *p =
+                   (char *)adapter +
+                   netxen_nic_gstrings_stats[index].stat_offset;
+               data[index] =
+                   (netxen_nic_gstrings_stats[index].sizeof_stat ==
+                    sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+       }
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 wol_cfg = 0;
+
+       wol->supported = 0;
+       wol->wolopts = 0;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return;
+
+       wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+       if (wol_cfg & (1UL << adapter->portnum))
+               wol->supported |= WAKE_MAGIC;
+
+       wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+       if (wol_cfg & (1UL << adapter->portnum))
+               wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       u32 wol_cfg = 0;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return -EOPNOTSUPP;
+
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EOPNOTSUPP;
+
+       wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+       if (!(wol_cfg & (1 << adapter->portnum)))
+               return -EOPNOTSUPP;
+
+       wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+       if (wol->wolopts & WAKE_MAGIC)
+               wol_cfg |= 1UL << adapter->portnum;
+       else
+               wol_cfg &= ~(1UL << adapter->portnum);
+       NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
+
+       return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int netxen_set_intr_coalesce(struct net_device *netdev,
+                       struct ethtool_coalesce *ethcoal)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+
+       if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               return -EINVAL;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return -EINVAL;
+
+       /*
+       * Return Error if unsupported values or
+       * unsupported parameters are set.
+       */
+       if (ethcoal->rx_coalesce_usecs > 0xffff ||
+               ethcoal->rx_max_coalesced_frames > 0xffff ||
+               ethcoal->tx_coalesce_usecs > 0xffff ||
+               ethcoal->tx_max_coalesced_frames > 0xffff ||
+               ethcoal->rx_coalesce_usecs_irq ||
+               ethcoal->rx_max_coalesced_frames_irq ||
+               ethcoal->tx_coalesce_usecs_irq ||
+               ethcoal->tx_max_coalesced_frames_irq ||
+               ethcoal->stats_block_coalesce_usecs ||
+               ethcoal->use_adaptive_rx_coalesce ||
+               ethcoal->use_adaptive_tx_coalesce ||
+               ethcoal->pkt_rate_low ||
+               ethcoal->rx_coalesce_usecs_low ||
+               ethcoal->rx_max_coalesced_frames_low ||
+               ethcoal->tx_coalesce_usecs_low ||
+               ethcoal->tx_max_coalesced_frames_low ||
+               ethcoal->pkt_rate_high ||
+               ethcoal->rx_coalesce_usecs_high ||
+               ethcoal->rx_max_coalesced_frames_high ||
+               ethcoal->tx_coalesce_usecs_high ||
+               ethcoal->tx_max_coalesced_frames_high)
+               return -EINVAL;
+
+       if (!ethcoal->rx_coalesce_usecs ||
+               !ethcoal->rx_max_coalesced_frames) {
+               adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+               adapter->coal.normal.data.rx_time_us =
+                       NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+               adapter->coal.normal.data.rx_packets =
+                       NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+       } else {
+               adapter->coal.flags = 0;
+               adapter->coal.normal.data.rx_time_us =
+               ethcoal->rx_coalesce_usecs;
+               adapter->coal.normal.data.rx_packets =
+               ethcoal->rx_max_coalesced_frames;
+       }
+       adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+       adapter->coal.normal.data.tx_packets =
+       ethcoal->tx_max_coalesced_frames;
+
+       netxen_config_intr_coalesce(adapter);
+
+       return 0;
+}
+
+static int netxen_get_intr_coalesce(struct net_device *netdev,
+                       struct ethtool_coalesce *ethcoal)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+
+       if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               return -EINVAL;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return -EINVAL;
+
+       ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+       ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+       ethcoal->rx_max_coalesced_frames =
+               adapter->coal.normal.data.rx_packets;
+       ethcoal->tx_max_coalesced_frames =
+               adapter->coal.normal.data.tx_packets;
+
+       return 0;
+}
+
+static int
+netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_minidump *mdump = &adapter->mdump;
+       if (adapter->fw_mdump_rdy)
+               dump->len = mdump->md_dump_size;
+       else
+               dump->len = 0;
+
+       if (!mdump->md_enabled)
+               dump->flag = ETH_FW_DUMP_DISABLE;
+       else
+               dump->flag = mdump->md_capture_mask;
+
+       dump->version = adapter->fw_version;
+       return 0;
+}
+
+static int
+netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+       int i;
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_minidump *mdump = &adapter->mdump;
+
+       switch (val->flag) {
+       case NX_FORCE_FW_DUMP_KEY:
+               if (!mdump->md_enabled) {
+                       netdev_info(netdev, "FW dump not enabled\n");
+                       return 0;
+               }
+               if (adapter->fw_mdump_rdy) {
+                       netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
+                       return 0;
+               }
+               netdev_info(netdev, "Forcing a fw dump\n");
+               nx_dev_request_reset(adapter);
+               break;
+       case NX_DISABLE_FW_DUMP:
+               if (mdump->md_enabled) {
+                       netdev_info(netdev, "Disabling FW Dump\n");
+                       mdump->md_enabled = 0;
+               }
+               break;
+       case NX_ENABLE_FW_DUMP:
+               if (!mdump->md_enabled) {
+                       netdev_info(netdev, "Enabling FW dump\n");
+                       mdump->md_enabled = 1;
+               }
+               break;
+       case NX_FORCE_FW_RESET:
+               netdev_info(netdev, "Forcing FW reset\n");
+               nx_dev_request_reset(adapter);
+               adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+               break;
+       default:
+               for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
+                       if (val->flag == FW_DUMP_LEVELS[i]) {
+                               mdump->md_capture_mask = val->flag;
+                               netdev_info(netdev,
+                                       "Driver mask changed to: 0x%x\n",
+                                       mdump->md_capture_mask);
+                               return 0;
+                       }
+               }
+               netdev_info(netdev,
+                       "Invalid dump level: 0x%x\n", val->flag);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+                       void *buffer)
+{
+       int i, copy_sz;
+       u32 *hdr_ptr, *data;
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_minidump *mdump = &adapter->mdump;
+
+
+       if (!adapter->fw_mdump_rdy) {
+               netdev_info(netdev, "Dump not available\n");
+               return -EINVAL;
+       }
+       /* Copy template header first */
+       copy_sz = mdump->md_template_size;
+       hdr_ptr = (u32 *) mdump->md_template;
+       data = buffer;
+       for (i = 0; i < copy_sz/sizeof(u32); i++)
+               *data++ = cpu_to_le32(*hdr_ptr++);
+
+       /* Copy captured dump data */
+       memcpy(buffer + copy_sz,
+               mdump->md_capture_buff + mdump->md_template_size,
+                       mdump->md_capture_size);
+       dump->len = copy_sz + mdump->md_capture_size;
+       dump->flag = mdump->md_capture_mask;
+
+       /* Free dump area once data has been captured */
+       vfree(mdump->md_capture_buff);
+       mdump->md_capture_buff = NULL;
+       adapter->fw_mdump_rdy = 0;
+       netdev_info(netdev, "extracted the fw dump Successfully\n");
+       return 0;
+}
+
+const struct ethtool_ops netxen_nic_ethtool_ops = {
+       .get_settings = netxen_nic_get_settings,
+       .set_settings = netxen_nic_set_settings,
+       .get_drvinfo = netxen_nic_get_drvinfo,
+       .get_regs_len = netxen_nic_get_regs_len,
+       .get_regs = netxen_nic_get_regs,
+       .get_link = ethtool_op_get_link,
+       .get_eeprom_len = netxen_nic_get_eeprom_len,
+       .get_eeprom = netxen_nic_get_eeprom,
+       .get_ringparam = netxen_nic_get_ringparam,
+       .set_ringparam = netxen_nic_set_ringparam,
+       .get_pauseparam = netxen_nic_get_pauseparam,
+       .set_pauseparam = netxen_nic_set_pauseparam,
+       .get_wol = netxen_nic_get_wol,
+       .set_wol = netxen_nic_set_wol,
+       .self_test = netxen_nic_diag_test,
+       .get_strings = netxen_nic_get_strings,
+       .get_ethtool_stats = netxen_nic_get_ethtool_stats,
+       .get_sset_count = netxen_get_sset_count,
+       .get_coalesce = netxen_get_intr_coalesce,
+       .set_coalesce = netxen_set_intr_coalesce,
+       .get_dump_flag = netxen_get_dump_flag,
+       .get_dump_data = netxen_get_dump_data,
+       .set_dump = netxen_set_dump,
+};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
new file mode 100644 (file)
index 0000000..a310c2f
--- /dev/null
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t;       /* single word in CRB space */
+
+enum {
+       NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+       NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+       NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+       NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+       NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+       NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+       NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/*  Hub 0 */
+enum {
+       NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+       NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/*  Hub 1 */
+enum {
+       NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+       NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+       NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+       NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+       NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+       NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+       NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+       NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+       NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+       NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+       NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+       NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+       NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+       NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+       NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+       NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/*  Hub 2 */
+enum {
+       NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+       NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+       NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+       NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+       NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+       NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+       NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+       NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+       NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+       NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+       NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+       NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+       NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+       NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+       NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+       NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/*  Hub 3 */
+enum {
+       NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+       NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+       NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+       NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/*  Hub 4 */
+enum {
+       NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+       NETXEN_HW_PEGN1_CRB_AGT_ADR,
+       NETXEN_HW_PEGN2_CRB_AGT_ADR,
+       NETXEN_HW_PEGN3_CRB_AGT_ADR,
+       NETXEN_HW_PEGNI_CRB_AGT_ADR,
+       NETXEN_HW_PEGND_CRB_AGT_ADR,
+       NETXEN_HW_PEGNC_CRB_AGT_ADR,
+       NETXEN_HW_PEGR0_CRB_AGT_ADR,
+       NETXEN_HW_PEGR1_CRB_AGT_ADR,
+       NETXEN_HW_PEGR2_CRB_AGT_ADR,
+       NETXEN_HW_PEGR3_CRB_AGT_ADR,
+       NETXEN_HW_PEGN4_CRB_AGT_ADR
+};
+
+/*  Hub 5 */
+enum {
+       NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+       NETXEN_HW_PEGS1_CRB_AGT_ADR,
+       NETXEN_HW_PEGS2_CRB_AGT_ADR,
+       NETXEN_HW_PEGS3_CRB_AGT_ADR,
+       NETXEN_HW_PEGSI_CRB_AGT_ADR,
+       NETXEN_HW_PEGSD_CRB_AGT_ADR,
+       NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/*  Hub 6 */
+enum {
+       NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+       NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+       NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+       NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+       NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+       NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+       NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+       NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+       NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/*  Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR        0x67
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+       NETXEN_HW_PX_MAP_CRB_PH = 0,
+       NETXEN_HW_PX_MAP_CRB_PS,
+       NETXEN_HW_PX_MAP_CRB_MN,
+       NETXEN_HW_PX_MAP_CRB_MS,
+       NETXEN_HW_PX_MAP_CRB_PGR1,
+       NETXEN_HW_PX_MAP_CRB_SRE,
+       NETXEN_HW_PX_MAP_CRB_NIU,
+       NETXEN_HW_PX_MAP_CRB_QMN,
+       NETXEN_HW_PX_MAP_CRB_SQN0,
+       NETXEN_HW_PX_MAP_CRB_SQN1,
+       NETXEN_HW_PX_MAP_CRB_SQN2,
+       NETXEN_HW_PX_MAP_CRB_SQN3,
+       NETXEN_HW_PX_MAP_CRB_QMS,
+       NETXEN_HW_PX_MAP_CRB_SQS0,
+       NETXEN_HW_PX_MAP_CRB_SQS1,
+       NETXEN_HW_PX_MAP_CRB_SQS2,
+       NETXEN_HW_PX_MAP_CRB_SQS3,
+       NETXEN_HW_PX_MAP_CRB_PGN0,
+       NETXEN_HW_PX_MAP_CRB_PGN1,
+       NETXEN_HW_PX_MAP_CRB_PGN2,
+       NETXEN_HW_PX_MAP_CRB_PGN3,
+       NETXEN_HW_PX_MAP_CRB_PGND,
+       NETXEN_HW_PX_MAP_CRB_PGNI,
+       NETXEN_HW_PX_MAP_CRB_PGS0,
+       NETXEN_HW_PX_MAP_CRB_PGS1,
+       NETXEN_HW_PX_MAP_CRB_PGS2,
+       NETXEN_HW_PX_MAP_CRB_PGS3,
+       NETXEN_HW_PX_MAP_CRB_PGSD,
+       NETXEN_HW_PX_MAP_CRB_PGSI,
+       NETXEN_HW_PX_MAP_CRB_SN,
+       NETXEN_HW_PX_MAP_CRB_PGR2,
+       NETXEN_HW_PX_MAP_CRB_EG,
+       NETXEN_HW_PX_MAP_CRB_PH2,
+       NETXEN_HW_PX_MAP_CRB_PS2,
+       NETXEN_HW_PX_MAP_CRB_CAM,
+       NETXEN_HW_PX_MAP_CRB_CAS0,
+       NETXEN_HW_PX_MAP_CRB_CAS1,
+       NETXEN_HW_PX_MAP_CRB_CAS2,
+       NETXEN_HW_PX_MAP_CRB_C2C0,
+       NETXEN_HW_PX_MAP_CRB_C2C1,
+       NETXEN_HW_PX_MAP_CRB_TIMR,
+       NETXEN_HW_PX_MAP_CRB_PGR3,
+       NETXEN_HW_PX_MAP_CRB_RPMX1,
+       NETXEN_HW_PX_MAP_CRB_RPMX2,
+       NETXEN_HW_PX_MAP_CRB_RPMX3,
+       NETXEN_HW_PX_MAP_CRB_RPMX4,
+       NETXEN_HW_PX_MAP_CRB_RPMX5,
+       NETXEN_HW_PX_MAP_CRB_RPMX6,
+       NETXEN_HW_PX_MAP_CRB_RPMX7,
+       NETXEN_HW_PX_MAP_CRB_XDMA,
+       NETXEN_HW_PX_MAP_CRB_I2Q,
+       NETXEN_HW_PX_MAP_CRB_ROMUSB,
+       NETXEN_HW_PX_MAP_CRB_CAS3,
+       NETXEN_HW_PX_MAP_CRB_RPMX0,
+       NETXEN_HW_PX_MAP_CRB_RPMX8,
+       NETXEN_HW_PX_MAP_CRB_RPMX9,
+       NETXEN_HW_PX_MAP_CRB_OCM0,
+       NETXEN_HW_PX_MAP_CRB_OCM1,
+       NETXEN_HW_PX_MAP_CRB_SMB,
+       NETXEN_HW_PX_MAP_CRB_I2C0,
+       NETXEN_HW_PX_MAP_CRB_I2C1,
+       NETXEN_HW_PX_MAP_CRB_LPC,
+       NETXEN_HW_PX_MAP_CRB_PGNC,
+       NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/*  This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN   \
+       ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH   \
+       ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS   \
+       ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS   \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS   \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3        \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS  \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2        \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4        \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7        \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9        \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB  \
+       ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU  \
+       ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \
+       ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \
+       ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE  \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG   \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0        \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN  \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1        \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5        \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6        \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8        \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \
+       ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \
+       ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \
+       ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM  \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN   \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q  \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB       \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC  \
+       ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+#define NETXEN_SRE_MISC                        (NETXEN_CRB_SRE + 0x0002c)
+#define NETXEN_SRE_INT_STATUS          (NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS   (NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL            (NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL            (NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL             (NETXEN_CRB_SRE + 0x01000)
+
+#define        NETXEN_DMA_BASE(U)      (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define        NETXEN_DMA_COMMAND(U)   (NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI  (NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N)    (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC          0x3c
+
+#define ROMUSB_GLB     (NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM     (NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS       (ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET     (ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I   (ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST      (ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL        (ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n)          (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS      (ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA                (ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT    (ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA                (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER        0x0d417340
+
+/******************************************************************************
+*
+*    Definitions specific to M25P flash
+*
+*******************************************************************************
+*   Instructions
+*/
+#define M25P_INSTR_WREN                0x06
+#define M25P_INSTR_WRDI                0x04
+#define M25P_INSTR_RDID                0x9f
+#define M25P_INSTR_RDSR                0x05
+#define M25P_INSTR_WRSR                0x01
+#define M25P_INSTR_READ                0x03
+#define M25P_INSTR_FAST_READ   0x0b
+#define M25P_INSTR_PP          0x02
+#define M25P_INSTR_SE          0xd8
+#define M25P_INSTR_BE          0xc7
+#define M25P_INSTR_DP          0xb9
+#define M25P_INSTR_RES         0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE      0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A)       \
+       (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU         NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE         NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB      \
+       NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q         NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0                NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
+#define NETXEN_CRB_SMB         NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
+#define NETXEN_CRB_MAX         NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2  NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_4   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2)
+#define NETXEN_CRB_PEG_NET_D   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I   NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET     NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+#define NETXEN_CRB_QDR_NET     NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
+
+#define NETXEN_CRB_PCIX_MD     NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE                NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR         (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK           (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW      (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS  (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK    (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7   (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7     (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define NETXEN_PCI_MAPSIZE     128
+#define NETXEN_PCI_DDR_NET     (0x00000000UL)
+#define NETXEN_PCI_QDR_NET     (0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB  (0x04400000UL)
+#define NETXEN_PCI_CAMQM       (0x04800000UL)
+#define NETXEN_PCI_CAMQM_MAX   (0x04ffffffUL)
+#define NETXEN_PCI_OCM0                (0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX    (0x050fffffUL)
+#define NETXEN_PCI_OCM1                (0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX    (0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE    (0x06000000UL)
+#define NETXEN_PCI_128MB_SIZE  (0x08000000UL)
+#define NETXEN_PCI_32MB_SIZE   (0x02000000UL)
+#define NETXEN_PCI_2MB_SIZE    (0x00200000UL)
+
+#define NETXEN_PCI_MN_2M       (0)
+#define NETXEN_PCI_MS_2M       (0x80000)
+#define NETXEN_PCI_OCM0_2M     (0x000c0000UL)
+#define NETXEN_PCI_CAMQM_2M_BASE       (0x000ff800UL)
+#define NETXEN_PCI_CAMQM_2M_END                (0x04800800UL)
+
+#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET    (0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0       (0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX   (0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1       (0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX   (0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET    (0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ *   Register offsets for MN
+ */
+#define        NETXEN_MIU_CONTROL      (0x000)
+#define        NETXEN_MIU_MN_CONTROL   (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
+
+       /* 200ms delay in each loop */
+#define        NETXEN_NIU_PHY_WAITLEN          200000
+       /* 10 seconds before we give up */
+#define        NETXEN_NIU_PHY_WAITMAX          50
+#define        NETXEN_NIU_MAX_GBE_PORTS        4
+#define        NETXEN_NIU_MAX_XG_PORTS         2
+
+#define        NETXEN_NIU_MODE                 (NETXEN_CRB_NIU + 0x00000)
+
+#define        NETXEN_NIU_XG_SINGLE_TERM       (NETXEN_CRB_NIU + 0x00004)
+#define        NETXEN_NIU_XG_DRIVE_HI          (NETXEN_CRB_NIU + 0x00008)
+#define        NETXEN_NIU_XG_DRIVE_LO          (NETXEN_CRB_NIU + 0x0000c)
+#define        NETXEN_NIU_XG_DTX               (NETXEN_CRB_NIU + 0x00010)
+#define        NETXEN_NIU_XG_DEQ               (NETXEN_CRB_NIU + 0x00014)
+#define        NETXEN_NIU_XG_WORD_ALIGN        (NETXEN_CRB_NIU + 0x00018)
+#define        NETXEN_NIU_XG_RESET             (NETXEN_CRB_NIU + 0x0001c)
+#define        NETXEN_NIU_XG_POWER_DOWN        (NETXEN_CRB_NIU + 0x00020)
+#define        NETXEN_NIU_XG_RESET_PLL         (NETXEN_CRB_NIU + 0x00024)
+#define        NETXEN_NIU_XG_SERDES_LOOPBACK   (NETXEN_CRB_NIU + 0x00028)
+#define        NETXEN_NIU_XG_DO_BYTE_ALIGN     (NETXEN_CRB_NIU + 0x0002c)
+#define        NETXEN_NIU_XG_TX_ENABLE         (NETXEN_CRB_NIU + 0x00030)
+#define        NETXEN_NIU_XG_RX_ENABLE         (NETXEN_CRB_NIU + 0x00034)
+#define        NETXEN_NIU_XG_STATUS            (NETXEN_CRB_NIU + 0x00038)
+#define        NETXEN_NIU_XG_PAUSE_THRESHOLD   (NETXEN_CRB_NIU + 0x0003c)
+#define        NETXEN_NIU_INT_MASK             (NETXEN_CRB_NIU + 0x00040)
+#define        NETXEN_NIU_ACTIVE_INT           (NETXEN_CRB_NIU + 0x00044)
+#define        NETXEN_NIU_MASKABLE_INT         (NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER     (NETXEN_CRB_NIU + 0x0004c)
+
+#define        NETXEN_NIU_GB_SERDES_RESET      (NETXEN_CRB_NIU + 0x00050)
+#define        NETXEN_NIU_GB0_GMII_MODE        (NETXEN_CRB_NIU + 0x00054)
+#define        NETXEN_NIU_GB0_MII_MODE         (NETXEN_CRB_NIU + 0x00058)
+#define        NETXEN_NIU_GB1_GMII_MODE        (NETXEN_CRB_NIU + 0x0005c)
+#define        NETXEN_NIU_GB1_MII_MODE         (NETXEN_CRB_NIU + 0x00060)
+#define        NETXEN_NIU_GB2_GMII_MODE        (NETXEN_CRB_NIU + 0x00064)
+#define        NETXEN_NIU_GB2_MII_MODE         (NETXEN_CRB_NIU + 0x00068)
+#define        NETXEN_NIU_GB3_GMII_MODE        (NETXEN_CRB_NIU + 0x0006c)
+#define        NETXEN_NIU_GB3_MII_MODE         (NETXEN_CRB_NIU + 0x00070)
+#define        NETXEN_NIU_REMOTE_LOOPBACK      (NETXEN_CRB_NIU + 0x00074)
+#define        NETXEN_NIU_GB0_HALF_DUPLEX      (NETXEN_CRB_NIU + 0x00078)
+#define        NETXEN_NIU_GB1_HALF_DUPLEX      (NETXEN_CRB_NIU + 0x0007c)
+#define        NETXEN_NIU_RESET_SYS_FIFOS      (NETXEN_CRB_NIU + 0x00088)
+#define        NETXEN_NIU_GB_CRC_DROP          (NETXEN_CRB_NIU + 0x0008c)
+#define        NETXEN_NIU_GB_DROP_WRONGADDR    (NETXEN_CRB_NIU + 0x00090)
+#define        NETXEN_NIU_TEST_MUX_CTL         (NETXEN_CRB_NIU + 0x00094)
+#define        NETXEN_NIU_XG_PAUSE_CTL         (NETXEN_CRB_NIU + 0x00098)
+#define        NETXEN_NIU_XG_PAUSE_LEVEL       (NETXEN_CRB_NIU + 0x000dc)
+#define        NETXEN_NIU_FRAME_COUNT_SELECT   (NETXEN_CRB_NIU + 0x000ac)
+#define        NETXEN_NIU_FRAME_COUNT          (NETXEN_CRB_NIU + 0x000b0)
+#define        NETXEN_NIU_XG_SEL               (NETXEN_CRB_NIU + 0x00128)
+#define NETXEN_NIU_GB_PAUSE_CTL                (NETXEN_CRB_NIU + 0x0030c)
+
+#define NETXEN_NIU_FULL_LEVEL_XG       (NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET           (NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN      (NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL       (NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG       (NETXEN_CRB_NIU + 0x1000)
+
+#define        NETXEN_MULTICAST_ADDR_HI_0      (NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1     (NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2     (NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3     (NETXEN_CRB_NIU + 0x101c)
+
+#define NETXEN_UNICAST_ADDR_BASE       (NETXEN_CRB_NIU + 0x1080)
+#define        NETXEN_MULTICAST_ADDR_BASE      (NETXEN_CRB_NIU + 0x1100)
+
+#define        NETXEN_NIU_GB_MAC_CONFIG_0(I)           \
+       (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MAC_CONFIG_1(I)           \
+       (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MAC_IPG_IFG(I)            \
+       (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define        NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I)       \
+       (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define        NETXEN_NIU_GB_MAX_FRAME_SIZE(I)         \
+       (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define        NETXEN_NIU_GB_TEST_REG(I)               \
+       (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_CONFIG(I)        \
+       (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_COMMAND(I)       \
+       (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_ADDR(I)          \
+       (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_CTRL(I)          \
+       (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_STATUS(I)        \
+       (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define        NETXEN_NIU_GB_MII_MGMT_INDICATE(I)      \
+       (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define        NETXEN_NIU_GB_INTERFACE_CTRL(I)         \
+       (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define        NETXEN_NIU_GB_INTERFACE_STATUS(I)       \
+       (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define        NETXEN_NIU_GB_STATION_ADDR_0(I)         \
+       (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define        NETXEN_NIU_GB_STATION_ADDR_1(I)         \
+       (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define        NETXEN_NIU_XGE_CONFIG_0                 (NETXEN_CRB_NIU + 0x70000)
+#define        NETXEN_NIU_XGE_CONFIG_1                 (NETXEN_CRB_NIU + 0x70004)
+#define        NETXEN_NIU_XGE_IPG                      (NETXEN_CRB_NIU + 0x70008)
+#define        NETXEN_NIU_XGE_STATION_ADDR_0_HI        (NETXEN_CRB_NIU + 0x7000c)
+#define        NETXEN_NIU_XGE_STATION_ADDR_0_1         (NETXEN_CRB_NIU + 0x70010)
+#define        NETXEN_NIU_XGE_STATION_ADDR_1_LO        (NETXEN_CRB_NIU + 0x70014)
+#define        NETXEN_NIU_XGE_STATUS                   (NETXEN_CRB_NIU + 0x70018)
+#define        NETXEN_NIU_XGE_MAX_FRAME_SIZE           (NETXEN_CRB_NIU + 0x7001c)
+#define        NETXEN_NIU_XGE_PAUSE_FRAME_VALUE        (NETXEN_CRB_NIU + 0x70020)
+#define        NETXEN_NIU_XGE_TX_BYTE_CNT              (NETXEN_CRB_NIU + 0x70024)
+#define        NETXEN_NIU_XGE_TX_FRAME_CNT             (NETXEN_CRB_NIU + 0x70028)
+#define        NETXEN_NIU_XGE_RX_BYTE_CNT              (NETXEN_CRB_NIU + 0x7002c)
+#define        NETXEN_NIU_XGE_RX_FRAME_CNT             (NETXEN_CRB_NIU + 0x70030)
+#define        NETXEN_NIU_XGE_AGGR_ERROR_CNT           (NETXEN_CRB_NIU + 0x70034)
+#define        NETXEN_NIU_XGE_MULTICAST_FRAME_CNT      (NETXEN_CRB_NIU + 0x70038)
+#define        NETXEN_NIU_XGE_UNICAST_FRAME_CNT        (NETXEN_CRB_NIU + 0x7003c)
+#define        NETXEN_NIU_XGE_CRC_ERROR_CNT            (NETXEN_CRB_NIU + 0x70040)
+#define        NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR       (NETXEN_CRB_NIU + 0x70044)
+#define        NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR      (NETXEN_CRB_NIU + 0x70048)
+#define        NETXEN_NIU_XGE_LOCAL_ERROR_CNT          (NETXEN_CRB_NIU + 0x7004c)
+#define        NETXEN_NIU_XGE_REMOTE_ERROR_CNT         (NETXEN_CRB_NIU + 0x70050)
+#define        NETXEN_NIU_XGE_CONTROL_CHAR_CNT         (NETXEN_CRB_NIU + 0x70054)
+#define        NETXEN_NIU_XGE_PAUSE_FRAME_CNT          (NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0                        (NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1                        (NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG                     (NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI       (NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1                (NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO       (NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS                  (NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE          (NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE       (NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT             (NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT            (NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT             (NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT            (NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT          (NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT     (NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT       (NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT           (NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR      (NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR     (NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT         (NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT                (NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT                (NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT         (NETXEN_CRB_NIU + 0x80058)
+
+/* P3 802.3ap */
+#define NETXEN_NIU_AP_MAC_CONFIG_0(I)      (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_CONFIG_1(I)      (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_IPG_IFG(I)       (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
+#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I)  (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
+#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I)    (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
+#define NETXEN_NIU_AP_TEST_REG(I)          (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I)   (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I)  (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_ADDR(I)     (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CTRL(I)     (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_STATUS(I)   (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_CTRL(I)    (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_STATUS(I)  (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_0(I)    (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_1(I)    (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
+
+
+#define TEST_AGT_CTRL  (0x00)
+
+#define TA_CTL_START   1
+#define TA_CTL_ENABLE  2
+#define TA_CTL_WRITE   4
+#define TA_CTL_BUSY    8
+
+/*
+ *   Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE              (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO           (0x04)
+#define MIU_TEST_AGT_ADDR_HI           (0x08)
+#define MIU_TEST_AGT_WRDATA_LO         (0x10)
+#define MIU_TEST_AGT_WRDATA_HI         (0x14)
+#define MIU_TEST_AGT_RDDATA_LO         (0x18)
+#define MIU_TEST_AGT_RDDATA_HI         (0x1c)
+
+#define MIU_TEST_AGT_ADDR_MASK         0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off)   (0)
+
+/*
+ *   Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE              (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO           (0x04)
+#define SIU_TEST_AGT_ADDR_HI           (0x18)
+#define SIU_TEST_AGT_WRDATA_LO         (0x08)
+#define SIU_TEST_AGT_WRDATA_HI         (0x0c)
+#define SIU_TEST_AGT_WRDATA(i)         (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO         (0x10)
+#define SIU_TEST_AGT_RDDATA_HI         (0x14)
+#define SIU_TEST_AGT_RDDATA(i)         (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK         0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off)   ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP     0x10
+#define XG_LINK_DOWN   0x20
+
+#define XG_LINK_UP_P3  0x01
+#define XG_LINK_DOWN_P3        0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn,val) \
+       (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ      100
+#define P3_LINK_SPEED_MASK     0xff
+#define P3_LINK_SPEED_REG(pcifn)       \
+       (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg)  \
+       (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define NETXEN_CAM_RAM_BASE    (NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg)    (NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB  (NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID     (NETXEN_CAM_RAM(0x100))
+#define NETXEN_PHY_LOCK_ID     (NETXEN_CAM_RAM(0x120))
+#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE           (NETXEN_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2         (NETXEN_CAM_RAM(0x700))
+#define NETXEN_NIC_REG(X)      (NIC_CRB_BASE+(X))
+#define NETXEN_NIC_REG_2(X)    (NIC_CRB_BASE_2+(X))
+#define NETXEN_INTR_MODE_REG   NETXEN_NIC_REG(0x44)
+#define NETXEN_MSI_MODE                0x1
+#define NETXEN_INTX_MODE       0x2
+
+#define NX_CDRP_CRB_OFFSET             (NETXEN_NIC_REG(0x18))
+#define NX_ARG1_CRB_OFFSET             (NETXEN_NIC_REG(0x1c))
+#define NX_ARG2_CRB_OFFSET             (NETXEN_NIC_REG(0x20))
+#define NX_ARG3_CRB_OFFSET             (NETXEN_NIC_REG(0x24))
+#define NX_SIGN_CRB_OFFSET             (NETXEN_NIC_REG(0x28))
+
+#define CRB_HOST_DUMMY_BUF_ADDR_HI     (NETXEN_NIC_REG(0x3c))
+#define CRB_HOST_DUMMY_BUF_ADDR_LO     (NETXEN_NIC_REG(0x40))
+
+#define CRB_CMDPEG_STATE               (NETXEN_NIC_REG(0x50))
+#define CRB_RCVPEG_STATE               (NETXEN_NIC_REG(0x13c))
+
+#define CRB_XG_STATE                   (NETXEN_NIC_REG(0x94))
+#define CRB_XG_STATE_P3                        (NETXEN_NIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1            (NETXEN_NIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2            (NETXEN_NIC_REG(0xec))
+
+#define CRB_MPORT_MODE                 (NETXEN_NIC_REG(0xc4))
+#define CRB_DMA_SHIFT                  (NETXEN_NIC_REG(0xcc))
+#define CRB_INT_VECTOR                 (NETXEN_NIC_REG(0xd4))
+
+#define CRB_CMD_PRODUCER_OFFSET                (NETXEN_NIC_REG(0x08))
+#define CRB_CMD_CONSUMER_OFFSET                (NETXEN_NIC_REG(0x0c))
+#define CRB_CMD_PRODUCER_OFFSET_1      (NETXEN_NIC_REG(0x1ac))
+#define CRB_CMD_CONSUMER_OFFSET_1      (NETXEN_NIC_REG(0x1b0))
+#define CRB_CMD_PRODUCER_OFFSET_2      (NETXEN_NIC_REG(0x1b8))
+#define CRB_CMD_CONSUMER_OFFSET_2      (NETXEN_NIC_REG(0x1bc))
+#define CRB_CMD_PRODUCER_OFFSET_3      (NETXEN_NIC_REG(0x1d0))
+#define CRB_CMD_CONSUMER_OFFSET_3      (NETXEN_NIC_REG(0x1d4))
+#define CRB_TEMP_STATE                 (NETXEN_NIC_REG(0x1b4))
+
+#define CRB_V2P_0                      (NETXEN_NIC_REG(0x290))
+#define CRB_V2P(port)                  (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION             (NETXEN_NIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0              (NETXEN_NIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1              (NETXEN_NIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2              (NETXEN_NIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3              (NETXEN_NIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1          (NETXEN_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2          (NETXEN_CAM_RAM(0x12c))
+#define CRB_MAC_BLOCK_START            (NETXEN_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compatibility
+ */
+#define CRB_NIC_CAPABILITIES_HOST      NETXEN_NIC_REG(0x1a8)
+#define CRB_NIC_MSI_MODE_HOST          NETXEN_NIC_REG(0x270)
+
+#define INTR_SCHEME_PERPORT            0x1
+#define MSI_MODE_MULTIFUNC             0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST        NETXEN_NIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define nx_get_temp_val(x)             ((x) >> 16)
+#define nx_get_temp_state(x)           ((x) & 0xffff)
+#define nx_encode_temp(val, state)     (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       NX_TEMP_NORMAL = 0x1,   /* Normal operating range */
+       NX_TEMP_WARN,           /* Sound alert, temperature getting high */
+       NX_TEMP_PANIC           /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER                0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO     (0x10000)
+/*   via CRB  (PS side only)     */
+#define PCIX_PS_OP_ADDR_HI     (0x10004)
+
+#define PCIX_INT_VECTOR                (0x10100)
+#define PCIX_INT_MASK          (0x10104)
+
+#define PCIX_CRB_WINDOW                (0x10210)
+#define PCIX_CRB_WINDOW_F0     (0x10210)
+#define PCIX_CRB_WINDOW_F1     (0x10230)
+#define PCIX_CRB_WINDOW_F2     (0x10250)
+#define PCIX_CRB_WINDOW_F3     (0x10270)
+#define PCIX_CRB_WINDOW_F4     (0x102ac)
+#define PCIX_CRB_WINDOW_F5     (0x102bc)
+#define PCIX_CRB_WINDOW_F6     (0x102cc)
+#define PCIX_CRB_WINDOW_F7     (0x102dc)
+#define PCIE_CRB_WINDOW_REG(func)      (((func) < 4) ? \
+               (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+               (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW         (0x10200)
+#define PCIX_MN_WINDOW_F0      (0x10200)
+#define PCIX_MN_WINDOW_F1      (0x10220)
+#define PCIX_MN_WINDOW_F2      (0x10240)
+#define PCIX_MN_WINDOW_F3      (0x10260)
+#define PCIX_MN_WINDOW_F4      (0x102a0)
+#define PCIX_MN_WINDOW_F5      (0x102b0)
+#define PCIX_MN_WINDOW_F6      (0x102c0)
+#define PCIX_MN_WINDOW_F7      (0x102d0)
+#define PCIE_MN_WINDOW_REG(func)       (((func) < 4) ? \
+               (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+               (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW         (0x10208)
+#define PCIX_SN_WINDOW_F0      (0x10208)
+#define PCIX_SN_WINDOW_F1      (0x10228)
+#define PCIX_SN_WINDOW_F2      (0x10248)
+#define PCIX_SN_WINDOW_F3      (0x10268)
+#define PCIX_SN_WINDOW_F4      (0x102a8)
+#define PCIX_SN_WINDOW_F5      (0x102b8)
+#define PCIX_SN_WINDOW_F6      (0x102c8)
+#define PCIX_SN_WINDOW_F7      (0x102d8)
+#define PCIE_SN_WINDOW_REG(func)       (((func) < 4) ? \
+               (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+               (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_OCM_WINDOW                (0x10800)
+#define PCIX_OCM_WINDOW_REG(func)      (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS     (0x10118)
+#define PCIX_TARGET_STATUS_F1  (0x10160)
+#define PCIX_TARGET_STATUS_F2  (0x10164)
+#define PCIX_TARGET_STATUS_F3  (0x10168)
+#define PCIX_TARGET_STATUS_F4  (0x10360)
+#define PCIX_TARGET_STATUS_F5  (0x10364)
+#define PCIX_TARGET_STATUS_F6  (0x10368)
+#define PCIX_TARGET_STATUS_F7  (0x1036c)
+
+#define PCIX_TARGET_MASK       (0x10128)
+#define PCIX_TARGET_MASK_F1    (0x10170)
+#define PCIX_TARGET_MASK_F2    (0x10174)
+#define PCIX_TARGET_MASK_F3    (0x10178)
+#define PCIX_TARGET_MASK_F4    (0x10370)
+#define PCIX_TARGET_MASK_F5    (0x10374)
+#define PCIX_TARGET_MASK_F6    (0x10378)
+#define PCIX_TARGET_MASK_F7    (0x1037c)
+
+#define PCIX_MSI_F0            (0x13000)
+#define PCIX_MSI_F1            (0x13004)
+#define PCIX_MSI_F2            (0x13008)
+#define PCIX_MSI_F3            (0x1300c)
+#define PCIX_MSI_F4            (0x13010)
+#define PCIX_MSI_F5            (0x13014)
+#define PCIX_MSI_F6            (0x13018)
+#define PCIX_MSI_F7            (0x1301c)
+#define PCIX_MSI_F(i)          (0x13000+((i)*4))
+
+#define PCIX_PS_MEM_SPACE      (0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg)        (NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg)        (NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg)   (NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE (0x1404c)
+
+#define PCIE_DCR               0x00d8
+
+#define PCIE_SEM0_LOCK         (0x1c000)
+#define PCIE_SEM0_UNLOCK       (0x1c004)
+#define PCIE_SEM1_LOCK         (0x1c008)
+#define PCIE_SEM1_UNLOCK       (0x1c00c)
+#define PCIE_SEM2_LOCK         (0x1c010)       /* Flash lock   */
+#define PCIE_SEM2_UNLOCK       (0x1c014)       /* Flash unlock */
+#define PCIE_SEM3_LOCK         (0x1c018)       /* Phy lock     */
+#define PCIE_SEM3_UNLOCK       (0x1c01c)       /* Phy unlock   */
+#define PCIE_SEM4_LOCK         (0x1c020)
+#define PCIE_SEM4_UNLOCK       (0x1c024)
+#define PCIE_SEM5_LOCK         (0x1c028)       /* API lock     */
+#define PCIE_SEM5_UNLOCK       (0x1c02c)       /* API unlock   */
+#define PCIE_SEM6_LOCK         (0x1c030)       /* sw lock      */
+#define PCIE_SEM6_UNLOCK       (0x1c034)       /* sw unlock    */
+#define PCIE_SEM7_LOCK         (0x1c038)       /* crb win lock */
+#define PCIE_SEM7_UNLOCK       (0x1c03c)       /* crbwin unlock*/
+#define PCIE_SEM_LOCK(N)       (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N)     (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION    (0x12040)
+#define PCIE_SETUP_FUNCTION2   (0x12048)
+#define PCIE_MISCCFG_RC         (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3          (0x120c8)
+
+#define ISR_INT_STATE_REG       (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT  (0x14048)
+
+#define NETXEN_PORT_MODE_NONE          0
+#define NETXEN_PORT_MODE_XG            1
+#define NETXEN_PORT_MODE_GB            2
+#define NETXEN_PORT_MODE_802_3_AP      3
+#define NETXEN_PORT_MODE_AUTO_NEG      4
+#define NETXEN_PORT_MODE_AUTO_NEG_1G   5
+#define NETXEN_PORT_MODE_AUTO_NEG_XG   6
+#define NETXEN_PORT_MODE_ADDR          (NETXEN_CAM_RAM(0x24))
+#define NETXEN_WOL_PORT_MODE           (NETXEN_CAM_RAM(0x198))
+
+#define NETXEN_WOL_CONFIG_NV           (NETXEN_CAM_RAM(0x184))
+#define NETXEN_WOL_CONFIG              (NETXEN_CAM_RAM(0x188))
+
+#define NX_PEG_TUNE_MN_PRESENT         0x1
+#define NX_PEG_TUNE_CAPABILITY         (NETXEN_CAM_RAM(0x02c))
+
+#define NETXEN_DMA_WATCHDOG_CTRL       (NETXEN_CAM_RAM(0x14))
+#define NETXEN_PEG_ALIVE_COUNTER       (NETXEN_CAM_RAM(0xb0))
+#define NETXEN_PEG_HALT_STATUS1        (NETXEN_CAM_RAM(0xa8))
+#define NETXEN_PEG_HALT_STATUS2        (NETXEN_CAM_RAM(0xac))
+#define NX_CRB_DEV_REF_COUNT           (NETXEN_CAM_RAM(0x138))
+#define NX_CRB_DEV_STATE               (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY                 (NETXEN_CAM_RAM(0x178))
+
+/* MiniDIMM related macros */
+#define NETXEN_DIMM_CAPABILITY         (NETXEN_CAM_RAM(0x258))
+#define NETXEN_DIMM_PRESENT                    0x1
+#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2
+#define NETXEN_DIMM_SIZE                       0x4
+#define NETXEN_DIMM_MEMTYPE(VAL)               ((VAL >> 3) & 0xf)
+#define        NETXEN_DIMM_NUMROWS(VAL)                ((VAL >> 7) & 0xf)
+#define        NETXEN_DIMM_NUMCOLS(VAL)                ((VAL >> 11) & 0xf)
+#define        NETXEN_DIMM_NUMRANKS(VAL)               ((VAL >> 15) & 0x3)
+#define NETXEN_DIMM_DATAWIDTH(VAL)             ((VAL >> 18) & 0x3)
+#define NETXEN_DIMM_NUMBANKS(VAL)              ((VAL >> 21) & 0xf)
+#define NETXEN_DIMM_TYPE(VAL)          ((VAL >> 25) & 0x3f)
+#define NETXEN_DIMM_VALID_FLAG         0x80000000
+
+#define NETXEN_DIMM_MEM_DDR2_SDRAM     0x8
+
+#define NETXEN_DIMM_STD_MEM_SIZE       512
+
+#define NETXEN_DIMM_TYPE_RDIMM 0x1
+#define NETXEN_DIMM_TYPE_UDIMM 0x2
+#define NETXEN_DIMM_TYPE_SO_DIMM       0x4
+#define NETXEN_DIMM_TYPE_Micro_DIMM    0x8
+#define NETXEN_DIMM_TYPE_Mini_RDIMM    0x10
+#define NETXEN_DIMM_TYPE_Mini_UDIMM    0x20
+
+/* Device State */
+#define NX_DEV_COLD            1
+#define NX_DEV_INITALIZING     2
+#define NX_DEV_READY           3
+#define NX_DEV_NEED_RESET      4
+#define NX_DEV_NEED_QUISCENT   5
+#define NX_DEV_NEED_AER        6
+#define NX_DEV_FAILED          7
+
+#define NX_RCODE_DRIVER_INFO           0x20000000
+#define NX_RCODE_DRIVER_CAN_RELOAD     0x40000000
+#define NX_RCODE_FATAL_ERROR           0x80000000
+#define NX_FWERROR_PEGNUM(code)                ((code) & 0xff)
+#define NX_FWERROR_CODE(code)          ((code >> 8) & 0xfffff)
+#define NX_FWERROR_PEGSTAT1(code)      ((code >> 8) & 0x1fffff)
+
+#define FW_POLL_DELAY                  (2 * HZ)
+#define FW_FAIL_THRESH                 3
+#define FW_POLL_THRESH                 10
+
+#define        ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL)  (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define        PCIX_INT_VECTOR_BIT_F0  0x0080
+#define        PCIX_INT_VECTOR_BIT_F1  0x0100
+#define        PCIX_INT_VECTOR_BIT_F2  0x0200
+#define        PCIX_INT_VECTOR_BIT_F3  0x0400
+#define        PCIX_INT_VECTOR_BIT_F4  0x0800
+#define        PCIX_INT_VECTOR_BIT_F5  0x1000
+#define        PCIX_INT_VECTOR_BIT_F6  0x2000
+#define        PCIX_INT_VECTOR_BIT_F7  0x4000
+
+struct netxen_legacy_intr_set {
+       uint32_t        int_vec_bit;
+       uint32_t        tgt_status_reg;
+       uint32_t        tgt_mask_reg;
+       uint32_t        pci_int_reg;
+};
+
+#define        NX_LEGACY_INTR_CONFIG                                           \
+{                                                                      \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F0,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS,          \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK,            \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(0) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F1,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F1,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F1,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(1) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F2,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F2,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F2,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(2) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F3,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F3,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F3,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(3) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F4,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F4,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F4,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(4) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F5,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F5,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F5,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(5) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F6,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F6,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F6,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(6) },       \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F7,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F7,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F7,         \
+               .pci_int_reg    =       ISR_MSI_INT_TRIGGER(7) },       \
+}
+
+#endif                         /* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
new file mode 100644 (file)
index 0000000..2b10f1b
--- /dev/null
@@ -0,0 +1,2597 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off)   ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off)        ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M  (0x130060)
+#define CRB_HI(off)    ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M        (0x1e0000UL)
+
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+               void __iomem *addr, u32 data);
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+               void __iomem *addr);
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+       return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+       writel(((u32) (val)), (addr));
+       writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off)    \
+       ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off)   \
+       ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off)    \
+       ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+                                           unsigned long off)
+{
+       if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+               return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+       if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
+               return PCI_OFFSET_SECOND_RANGE(adapter, off);
+
+       if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
+               return PCI_OFFSET_THIRD_RANGE(adapter, off);
+
+       return NULL;
+}
+
+static crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+    {{{0, 0,         0,         0} } },                /* 0: PCI */
+    {{{1, 0x0100000, 0x0102000, 0x120000},     /* 1: PCIE */
+         {1, 0x0110000, 0x0120000, 0x130000},
+         {1, 0x0120000, 0x0122000, 0x124000},
+         {1, 0x0130000, 0x0132000, 0x126000},
+         {1, 0x0140000, 0x0142000, 0x128000},
+         {1, 0x0150000, 0x0152000, 0x12a000},
+         {1, 0x0160000, 0x0170000, 0x110000},
+         {1, 0x0170000, 0x0172000, 0x12e000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {1, 0x01e0000, 0x01e0800, 0x122000},
+         {0, 0x0000000, 0x0000000, 0x000000} } },
+       {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+    {{{0, 0,         0,         0} } },            /* 3: */
+    {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+    {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
+    {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
+    {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
+    {{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x08f0000, 0x08f2000, 0x172000} } },
+    {{{1, 0x0900000, 0x0902000, 0x174000},     /* 9: SQM1*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x09f0000, 0x09f2000, 0x176000} } },
+    {{{0, 0x0a00000, 0x0a02000, 0x178000},     /* 10: SQM2*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+    {{{0, 0x0b00000, 0x0b02000, 0x17c000},     /* 11: SQM3*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+       {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+       {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+       {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+       {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+       {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+       {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+       {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+       {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+       {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+       {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+       {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+       {{{0, 0,         0,         0} } },     /* 23: */
+       {{{0, 0,         0,         0} } },     /* 24: */
+       {{{0, 0,         0,         0} } },     /* 25: */
+       {{{0, 0,         0,         0} } },     /* 26: */
+       {{{0, 0,         0,         0} } },     /* 27: */
+       {{{0, 0,         0,         0} } },     /* 28: */
+       {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+    {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+    {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+       {{{0} } },                              /* 32: PCI */
+       {{{1, 0x2100000, 0x2102000, 0x120000},  /* 33: PCIE */
+         {1, 0x2110000, 0x2120000, 0x130000},
+         {1, 0x2120000, 0x2122000, 0x124000},
+         {1, 0x2130000, 0x2132000, 0x126000},
+         {1, 0x2140000, 0x2142000, 0x128000},
+         {1, 0x2150000, 0x2152000, 0x12a000},
+         {1, 0x2160000, 0x2170000, 0x110000},
+         {1, 0x2170000, 0x2172000, 0x12e000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000} } },
+       {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+       {{{0} } },                              /* 35: */
+       {{{0} } },                              /* 36: */
+       {{{0} } },                              /* 37: */
+       {{{0} } },                              /* 38: */
+       {{{0} } },                              /* 39: */
+       {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+       {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+       {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+       {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+       {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+       {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+       {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+       {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+       {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+       {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+       {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+       {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+       {{{0} } },                              /* 52: */
+       {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+       {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+       {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+       {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+       {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+       {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+       {{{0} } },                              /* 59: I2C0 */
+       {{{0} } },                              /* 60: I2C1 */
+       {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+       {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+       {{{1, 0x3f00000, 0x3f01000, 0x168000} } }       /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] =
+{
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+       NETXEN_HW_CRB_HUB_AGT_ADR_MN,
+       NETXEN_HW_CRB_HUB_AGT_ADR_MS,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SRE,
+       NETXEN_HW_CRB_HUB_AGT_ADR_NIU,
+       NETXEN_HW_CRB_HUB_AGT_ADR_QMN,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SQN0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SQN1,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SQN2,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SQN3,
+       NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+       NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+       NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGN4,
+       NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGN0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGN1,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGN2,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGN3,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGND,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGNI,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGS0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGS1,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGS2,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGS3,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGSI,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SN,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_EG,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+       NETXEN_HW_CRB_HUB_AGT_ADR_CAM,
+       0,
+       0,
+       0,
+       0,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7,
+       NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+       NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+       NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8,
+       NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9,
+       NETXEN_HW_CRB_HUB_AGT_ADR_OCM0,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_SMB,
+       NETXEN_HW_CRB_HUB_AGT_ADR_I2C0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_I2C1,
+       0,
+       NETXEN_HW_CRB_HUB_AGT_ADR_PGNC,
+       0,
+};
+
+/*  PCI Windowing for DDR regions.  */
+
+#define NETXEN_WINDOW_ONE      0x2000000 /*CRB Window: bit 25 of CRB address */
+
+#define NETXEN_PCIE_SEM_TIMEOUT        10000
+
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
+
+int
+netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
+{
+       int done = 0, timeout = 0;
+
+       while (!done) {
+               done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem)));
+               if (done == 1)
+                       break;
+               if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
+                       return -EIO;
+               msleep(1);
+       }
+
+       if (id_reg)
+               NXWR32(adapter, id_reg, adapter->portnum);
+
+       return 0;
+}
+
+void
+netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
+{
+       NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+               NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
+       }
+
+       return 0;
+}
+
+/* Disable an XG interface */
+static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
+{
+       __u32 mac_cfg;
+       u32 port = adapter->physical_port;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               return 0;
+
+       if (port >= NETXEN_NIU_MAX_XG_PORTS)
+               return -EINVAL;
+
+       mac_cfg = 0;
+       if (NXWR32(adapter,
+                       NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
+               return -EIO;
+       return 0;
+}
+
+#define NETXEN_UNICAST_ADDR(port, index) \
+       (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define NETXEN_MCAST_ADDR(port, index) \
+       (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+       ((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+       ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+       u32 mac_cfg;
+       u32 cnt = 0;
+       __u32 reg = 0x0200;
+       u32 port = adapter->physical_port;
+       u16 board_type = adapter->ahw.board_type;
+
+       if (port >= NETXEN_NIU_MAX_XG_PORTS)
+               return -EINVAL;
+
+       mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
+       mac_cfg &= ~0x4;
+       NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+       if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
+                       (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
+               reg = (0x20 << port);
+
+       NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
+
+       mdelay(10);
+
+       while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
+               mdelay(10);
+
+       if (cnt < 20) {
+
+               reg = NXRD32(adapter,
+                       NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
+
+               if (mode == NETXEN_NIU_PROMISC_MODE)
+                       reg = (reg | 0x2000UL);
+               else
+                       reg = (reg & ~0x2000UL);
+
+               if (mode == NETXEN_NIU_ALLMULTI_MODE)
+                       reg = (reg | 0x1000UL);
+               else
+                       reg = (reg & ~0x1000UL);
+
+               NXWR32(adapter,
+                       NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+       }
+
+       mac_cfg |= 0x4;
+       NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+       return 0;
+}
+
+static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+       u32 mac_hi, mac_lo;
+       u32 reg_hi, reg_lo;
+
+       u8 phy = adapter->physical_port;
+
+       if (phy >= NETXEN_NIU_MAX_XG_PORTS)
+               return -EINVAL;
+
+       mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+       mac_hi = addr[2] | ((u32)addr[3] << 8) |
+               ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+       reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+       reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+
+       /* write twice to flush */
+       if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+               return -EIO;
+       if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+               return -EIO;
+
+       return 0;
+}
+
+static int
+netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
+{
+       u32     val = 0;
+       u16 port = adapter->physical_port;
+       u8 *addr = adapter->mac_addr;
+
+       if (adapter->mc_enabled)
+               return 0;
+
+       val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+       val |= (1UL << (28+port));
+       NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+       /* add broadcast addr to filter */
+       val = 0xffffff;
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+       /* add station addr to filter */
+       val = MAC_HI(addr);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+       val = MAC_LO(addr);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
+
+       adapter->mc_enabled = 1;
+       return 0;
+}
+
+static int
+netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
+{
+       u32     val = 0;
+       u16 port = adapter->physical_port;
+       u8 *addr = adapter->mac_addr;
+
+       if (!adapter->mc_enabled)
+               return 0;
+
+       val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+       val &= ~(1UL << (28+port));
+       NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+       val = MAC_HI(addr);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+       val = MAC_LO(addr);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+       NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+
+       adapter->mc_enabled = 0;
+       return 0;
+}
+
+static int
+netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
+               int index, u8 *addr)
+{
+       u32 hi = 0, lo = 0;
+       u16 port = adapter->physical_port;
+
+       lo = MAC_LO(addr);
+       hi = MAC_HI(addr);
+
+       NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
+       NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
+
+       return 0;
+}
+
+static void netxen_p2_nic_set_multi(struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netdev_hw_addr *ha;
+       u8 null_addr[ETH_ALEN];
+       int i;
+
+       eth_zero_addr(null_addr);
+
+       if (netdev->flags & IFF_PROMISC) {
+
+               adapter->set_promisc(adapter,
+                               NETXEN_NIU_PROMISC_MODE);
+
+               /* Full promiscuous mode */
+               netxen_nic_disable_mcast_filter(adapter);
+
+               return;
+       }
+
+       if (netdev_mc_empty(netdev)) {
+               adapter->set_promisc(adapter,
+                               NETXEN_NIU_NON_PROMISC_MODE);
+               netxen_nic_disable_mcast_filter(adapter);
+               return;
+       }
+
+       adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
+       if (netdev->flags & IFF_ALLMULTI ||
+                       netdev_mc_count(netdev) > adapter->max_mc_count) {
+               netxen_nic_disable_mcast_filter(adapter);
+               return;
+       }
+
+       netxen_nic_enable_mcast_filter(adapter);
+
+       i = 0;
+       netdev_for_each_mc_addr(ha, netdev)
+               netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
+
+       /* Clear out remaining addresses */
+       while (i < adapter->max_mc_count)
+               netxen_nic_set_mcast_addr(adapter, i++, null_addr);
+}
+
+static int
+netxen_send_cmd_descs(struct netxen_adapter *adapter,
+               struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+       u32 i, producer, consumer;
+       struct netxen_cmd_buffer *pbuf;
+       struct cmd_desc_type0 *cmd_desc;
+       struct nx_host_tx_ring *tx_ring;
+
+       i = 0;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return -EIO;
+
+       tx_ring = adapter->tx_ring;
+       __netif_tx_lock_bh(tx_ring->txq);
+
+       producer = tx_ring->producer;
+       consumer = tx_ring->sw_consumer;
+
+       if (nr_desc >= netxen_tx_avail(tx_ring)) {
+               netif_tx_stop_queue(tx_ring->txq);
+               smp_mb();
+               if (netxen_tx_avail(tx_ring) > nr_desc) {
+                       if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+                               netif_tx_wake_queue(tx_ring->txq);
+               } else {
+                       __netif_tx_unlock_bh(tx_ring->txq);
+                       return -EBUSY;
+               }
+       }
+
+       do {
+               cmd_desc = &cmd_desc_arr[i];
+
+               pbuf = &tx_ring->cmd_buf_arr[producer];
+               pbuf->skb = NULL;
+               pbuf->frag_count = 0;
+
+               memcpy(&tx_ring->desc_head[producer],
+                       &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+               producer = get_next_index(producer, tx_ring->num_desc);
+               i++;
+
+       } while (i != nr_desc);
+
+       tx_ring->producer = producer;
+
+       netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+       __netif_tx_unlock_bh(tx_ring->txq);
+
+       return 0;
+}
+
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
+{
+       nx_nic_req_t req;
+       nx_mac_req_t *mac_req;
+       u64 word;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+       word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       mac_req = (nx_mac_req_t *)&req.words[0];
+       mac_req->op = op;
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+       return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+               const u8 *addr, struct list_head *del_list)
+{
+       struct list_head *head;
+       nx_mac_list_t *cur;
+
+       /* look up if already exists */
+       list_for_each(head, del_list) {
+               cur = list_entry(head, nx_mac_list_t, list);
+
+               if (ether_addr_equal(addr, cur->mac_addr)) {
+                       list_move_tail(head, &adapter->mac_list);
+                       return 0;
+               }
+       }
+
+       cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+       if (cur == NULL)
+               return -ENOMEM;
+
+       memcpy(cur->mac_addr, addr, ETH_ALEN);
+       list_add_tail(&cur->list, &adapter->mac_list);
+       return nx_p3_sre_macaddr_change(adapter,
+                               cur->mac_addr, NETXEN_MAC_ADD);
+}
+
+static void netxen_p3_nic_set_multi(struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netdev_hw_addr *ha;
+       static const u8 bcast_addr[ETH_ALEN] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+       u32 mode = VPORT_MISS_MODE_DROP;
+       LIST_HEAD(del_list);
+       struct list_head *head;
+       nx_mac_list_t *cur;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return;
+
+       list_splice_tail_init(&adapter->mac_list, &del_list);
+
+       nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+       nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
+
+       if (netdev->flags & IFF_PROMISC) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+               goto send_fw_cmd;
+       }
+
+       if ((netdev->flags & IFF_ALLMULTI) ||
+                       (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+               mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+               goto send_fw_cmd;
+       }
+
+       if (!netdev_mc_empty(netdev)) {
+               netdev_for_each_mc_addr(ha, netdev)
+                       nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
+       }
+
+send_fw_cmd:
+       adapter->set_promisc(adapter, mode);
+       head = &del_list;
+       while (!list_empty(head)) {
+               cur = list_entry(head->next, nx_mac_list_t, list);
+
+               nx_p3_sre_macaddr_change(adapter,
+                               cur->mac_addr, NETXEN_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
+static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+       nx_nic_req_t req;
+       u64 word;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+                       ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(mode);
+
+       return netxen_send_cmd_descs(adapter,
+                               (struct cmd_desc_type0 *)&req, 1);
+}
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
+{
+       nx_mac_list_t *cur;
+       struct list_head *head = &adapter->mac_list;
+
+       while (!list_empty(head)) {
+               cur = list_entry(head->next, nx_mac_list_t, list);
+               nx_p3_sre_macaddr_change(adapter,
+                               cur->mac_addr, NETXEN_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
+static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+       /* assuming caller has already copied new addr to netdev */
+       netxen_p3_nic_set_multi(adapter->netdev);
+       return 0;
+}
+
+#define        NETXEN_CONFIG_INTR_COALESCE     3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
+{
+       nx_nic_req_t req;
+       u64 word[6];
+       int rv, i;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       memset(word, 0, sizeof(word));
+
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word[0]);
+
+       memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+       for (i = 0; i < 6; i++)
+               req.words[i] = cpu_to_le64(word[i]);
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "ERROR. Could not send "
+                       "interrupt coalescing parameters\n");
+       }
+
+       return rv;
+}
+
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int rv = 0;
+
+       if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+               return 0;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(enable);
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "ERROR. Could not send "
+                       "configure hw lro request\n");
+       }
+
+       return rv;
+}
+
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int rv = 0;
+
+       if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable)
+               return rv;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING |
+               ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(enable);
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "ERROR. Could not send "
+                               "configure bridge mode request\n");
+       }
+
+       adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED;
+
+       return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP    0x3
+
+int netxen_config_rss(struct netxen_adapter *adapter, int enable)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int i, rv;
+
+       static const u64 key[] = {
+               0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+               0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+               0x255b0ec26d5a56daULL
+       };
+
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       /*
+        * RSS request:
+        * bits 3-0: hash_method
+        *      5-4: hash_type_ipv4
+        *      7-6: hash_type_ipv6
+        *        8: enable
+        *        9: use indirection table
+        *    47-10: reserved
+        *    63-48: indirection table mask
+        */
+       word =  ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+               ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+               ((u64)(enable & 0x1) << 8) |
+               ((0x7ULL) << 48);
+       req.words[0] = cpu_to_le64(word);
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               req.words[i+1] = cpu_to_le64(key[i]);
+
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "%s: could not configure RSS\n",
+                               adapter->netdev->name);
+       }
+
+       return rv;
+}
+
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int rv;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(cmd);
+       memcpy(&req.words[1], &ip, sizeof(u32));
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n",
+                               adapter->netdev->name,
+                               (cmd == NX_IP_UP) ? "Add" : "Remove", ip);
+       }
+       return rv;
+}
+
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int rv;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+       req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "%s: could not configure link notification\n",
+                               adapter->netdev->name);
+       }
+
+       return rv;
+}
+
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
+{
+       nx_nic_req_t req;
+       u64 word;
+       int rv;
+
+       if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+               return 0;
+
+       memset(&req, 0, sizeof(nx_nic_req_t));
+       req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+       word = NX_NIC_H2C_OPCODE_LRO_REQUEST |
+               ((u64)adapter->portnum << 16) |
+               ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ;
+
+       req.req_hdr = cpu_to_le64(word);
+
+       rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0) {
+               printk(KERN_ERR "%s: could not cleanup lro flows\n",
+                               adapter->netdev->name);
+       }
+       return rv;
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR       100
+
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       int max_mtu;
+       int rc = 0;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               max_mtu = P3_MAX_MTU;
+       else
+               max_mtu = P2_MAX_MTU;
+
+       if (mtu > max_mtu) {
+               printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
+                               netdev->name, max_mtu);
+               return -EINVAL;
+       }
+
+       if (adapter->set_mtu)
+               rc = adapter->set_mtu(adapter, mtu);
+
+       if (!rc)
+               netdev->mtu = mtu;
+
+       return rc;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+                                 int size, __le32 * buf)
+{
+       int i, v, addr;
+       __le32 *ptr32;
+       int ret;
+
+       addr = base;
+       ptr32 = buf;
+       for (i = 0; i < size / sizeof(u32); i++) {
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
+
+               *ptr32 = cpu_to_le32(v);
+               ptr32++;
+               addr += sizeof(u32);
+       }
+       if ((char *)buf + size > (char *)ptr32) {
+               __le32 local;
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
+               local = cpu_to_le32(v);
+               memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+       }
+
+       return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+       __le32 *pmac = (__le32 *) mac;
+       u32 offset;
+
+       offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64));
+
+       if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
+               return -1;
+
+       if (*mac == ~0ULL) {
+
+               offset = NX_OLD_MAC_ADDR_OFFSET +
+                       (adapter->portnum * sizeof(u64));
+
+               if (netxen_get_flash_block(adapter,
+                                       offset, sizeof(u64), pmac) == -1)
+                       return -1;
+
+               if (*mac == ~0ULL)
+                       return -1;
+       }
+       return 0;
+}
+
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+       uint32_t crbaddr, mac_hi, mac_lo;
+       int pci_func = adapter->ahw.pci_func;
+
+       crbaddr = CRB_MAC_BLOCK_START +
+               (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+       mac_lo = NXRD32(adapter, crbaddr);
+       mac_hi = NXRD32(adapter, crbaddr+4);
+
+       if (pci_func & 1)
+               *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+       else
+               *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+       return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+static void
+netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
+               u32 window)
+{
+       void __iomem *offset;
+       int count = 10;
+       u8 func = adapter->ahw.pci_func;
+
+       if (adapter->ahw.crb_win == window)
+               return;
+
+       offset = PCI_OFFSET_SECOND_RANGE(adapter,
+                       NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
+
+       writel(window, offset);
+       do {
+               if (window == readl(offset))
+                       break;
+
+               if (printk_ratelimit())
+                       dev_warn(&adapter->pdev->dev,
+                                       "failed to set CRB window to %d\n",
+                                       (window == NETXEN_WINDOW_ONE));
+               udelay(1);
+
+       } while (--count > 0);
+
+       if (count > 0)
+               adapter->ahw.crb_win = window;
+}
+
+/*
+ * Returns < 0 if off is not valid,
+ *      1 if window access is needed. 'off' is set to offset from
+ *        CRB space in 128M pci map
+ *      0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+               ulong off, void __iomem **addr)
+{
+       crb_128M_2M_sub_block_map_t *m;
+
+
+       if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
+               return -EINVAL;
+
+       off -= NETXEN_PCI_CRBSPACE;
+
+       /*
+        * Try direct map
+        */
+       m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+       if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+               *addr = adapter->ahw.pci_base0 + m->start_2M +
+                       (off - m->start_128M);
+               return 0;
+       }
+
+       /*
+        * Not in direct map, use crb window
+        */
+       *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+               (off & MASK(16));
+       return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
+{
+       u32 window;
+       void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+       off -= NETXEN_PCI_CRBSPACE;
+
+       window = CRB_HI(off);
+
+       writel(window, addr);
+       if (readl(addr) != window) {
+               if (printk_ratelimit())
+                       dev_warn(&adapter->pdev->dev,
+                               "failed to set CRB window to %d off 0x%lx\n",
+                               window, off);
+       }
+}
+
+static void __iomem *
+netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter,
+               ulong win_off, void __iomem **mem_ptr)
+{
+       ulong off = win_off;
+       void __iomem *addr;
+       resource_size_t mem_base;
+
+       if (ADDR_IN_WINDOW1(win_off))
+               off = NETXEN_CRB_NORMAL(win_off);
+
+       addr = pci_base_offset(adapter, off);
+       if (addr)
+               return addr;
+
+       if (adapter->ahw.pci_len0 == 0)
+               off -= NETXEN_PCI_CRBSPACE;
+
+       mem_base = pci_resource_start(adapter->pdev, 0);
+       *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
+       if (*mem_ptr)
+               addr = *mem_ptr + (off & (PAGE_SIZE - 1));
+
+       return addr;
+}
+
+static int
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+       unsigned long flags;
+       void __iomem *addr, *mem_ptr = NULL;
+
+       addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+       if (!addr)
+               return -EIO;
+
+       if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+               netxen_nic_io_write_128M(adapter, addr, data);
+       } else {        /* Window 0 */
+               write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+               netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+               writel(data, addr);
+               netxen_nic_pci_set_crbwindow_128M(adapter,
+                               NETXEN_WINDOW_ONE);
+               write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+       }
+
+       if (mem_ptr)
+               iounmap(mem_ptr);
+
+       return 0;
+}
+
+static u32
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
+{
+       unsigned long flags;
+       void __iomem *addr, *mem_ptr = NULL;
+       u32 data;
+
+       addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+       if (!addr)
+               return -EIO;
+
+       if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+               data = netxen_nic_io_read_128M(adapter, addr);
+       } else {        /* Window 0 */
+               write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+               netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+               data = readl(addr);
+               netxen_nic_pci_set_crbwindow_128M(adapter,
+                               NETXEN_WINDOW_ONE);
+               write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+       }
+
+       if (mem_ptr)
+               iounmap(mem_ptr);
+
+       return data;
+}
+
+static int
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+       unsigned long flags;
+       int rv;
+       void __iomem *addr = NULL;
+
+       rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+       if (rv == 0) {
+               writel(data, addr);
+               return 0;
+       }
+
+       if (rv > 0) {
+               /* indirect access */
+               write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+               crb_win_lock(adapter);
+               netxen_nic_pci_set_crbwindow_2M(adapter, off);
+               writel(data, addr);
+               crb_win_unlock(adapter);
+               write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+               return 0;
+       }
+
+       dev_err(&adapter->pdev->dev,
+                       "%s: invalid offset: 0x%016lx\n", __func__, off);
+       dump_stack();
+       return -EIO;
+}
+
+static u32
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
+{
+       unsigned long flags;
+       int rv;
+       u32 data;
+       void __iomem *addr = NULL;
+
+       rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+       if (rv == 0)
+               return readl(addr);
+
+       if (rv > 0) {
+               /* indirect access */
+               write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+               crb_win_lock(adapter);
+               netxen_nic_pci_set_crbwindow_2M(adapter, off);
+               data = readl(addr);
+               crb_win_unlock(adapter);
+               write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+               return data;
+       }
+
+       dev_err(&adapter->pdev->dev,
+                       "%s: invalid offset: 0x%016lx\n", __func__, off);
+       dump_stack();
+       return -1;
+}
+
+/* window 1 registers only */
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+               void __iomem *addr, u32 data)
+{
+       read_lock(&adapter->ahw.crb_lock);
+       writel(data, addr);
+       read_unlock(&adapter->ahw.crb_lock);
+}
+
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+               void __iomem *addr)
+{
+       u32 val;
+
+       read_lock(&adapter->ahw.crb_lock);
+       val = readl(addr);
+       read_unlock(&adapter->ahw.crb_lock);
+
+       return val;
+}
+
+static void netxen_nic_io_write_2M(struct netxen_adapter *adapter,
+               void __iomem *addr, u32 data)
+{
+       writel(data, addr);
+}
+
+static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
+               void __iomem *addr)
+{
+       return readl(addr);
+}
+
+void __iomem *
+netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
+{
+       void __iomem *addr = NULL;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+                               (offset > NETXEN_CRB_PCIX_HOST))
+                       addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+               else
+                       addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+       } else {
+               WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+                                       offset, &addr));
+       }
+
+       return addr;
+}
+
+static int
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+               u64 addr, u32 *start)
+{
+       if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+               *start = (addr - NETXEN_ADDR_OCM0  + NETXEN_PCI_OCM0);
+               return 0;
+       } else if (ADDR_IN_RANGE(addr,
+                               NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+               *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
+               return 0;
+       }
+
+       return -EIO;
+}
+
+static int
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+               u64 addr, u32 *start)
+{
+       u32 window;
+
+       window = OCM_WIN(addr);
+
+       writel(window, adapter->ahw.ocm_win_crb);
+       /* read back to flush */
+       readl(adapter->ahw.ocm_win_crb);
+
+       adapter->ahw.ocm_win = window;
+       *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+       return 0;
+}
+
+static int
+netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
+               u64 *data, int op)
+{
+       void __iomem *addr, *mem_ptr = NULL;
+       resource_size_t mem_base;
+       int ret;
+       u32 start;
+
+       spin_lock(&adapter->ahw.mem_lock);
+
+       ret = adapter->pci_set_window(adapter, off, &start);
+       if (ret != 0)
+               goto unlock;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               addr = adapter->ahw.pci_base0 + start;
+       } else {
+               addr = pci_base_offset(adapter, start);
+               if (addr)
+                       goto noremap;
+
+               mem_base = pci_resource_start(adapter->pdev, 0) +
+                                       (start & PAGE_MASK);
+               mem_ptr = ioremap(mem_base, PAGE_SIZE);
+               if (mem_ptr == NULL) {
+                       ret = -EIO;
+                       goto unlock;
+               }
+
+               addr = mem_ptr + (start & (PAGE_SIZE-1));
+       }
+noremap:
+       if (op == 0)    /* read */
+               *data = readq(addr);
+       else            /* write */
+               writeq(*data, addr);
+
+unlock:
+       spin_unlock(&adapter->ahw.mem_lock);
+
+       if (mem_ptr)
+               iounmap(mem_ptr);
+       return ret;
+}
+
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+       void __iomem *addr = adapter->ahw.pci_base0 +
+               NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+       spin_lock(&adapter->ahw.mem_lock);
+       *data = readq(addr);
+       spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+       void __iomem *addr = adapter->ahw.pci_base0 +
+               NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+       spin_lock(&adapter->ahw.mem_lock);
+       writeq(data, addr);
+       spin_unlock(&adapter->ahw.mem_lock);
+}
+
+#define MAX_CTL_CHECK   1000
+
+static int
+netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+               u64 off, u64 data)
+{
+       int j, ret;
+       u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+       void __iomem *mem_crb;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+
+       /* P2 has different SIU and MIU test agent base addr */
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+                               NETXEN_ADDR_QDR_NET_MAX_P2)) {
+               mem_crb = pci_base_offset(adapter,
+                               NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+               addr_hi = SIU_TEST_AGT_ADDR_HI;
+               data_lo = SIU_TEST_AGT_WRDATA_LO;
+               data_hi = SIU_TEST_AGT_WRDATA_HI;
+               off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+               off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+               mem_crb = pci_base_offset(adapter,
+                               NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+               addr_hi = MIU_TEST_AGT_ADDR_HI;
+               data_lo = MIU_TEST_AGT_WRDATA_LO;
+               data_hi = MIU_TEST_AGT_WRDATA_HI;
+               off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+               off_hi = 0;
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+               ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+               if (adapter->ahw.pci_len0 != 0) {
+                       return netxen_nic_pci_mem_access_direct(adapter,
+                                       off, &data, 1);
+               }
+       }
+
+       return -EIO;
+
+correct:
+       spin_lock(&adapter->ahw.mem_lock);
+       netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+       writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+       writel(off_hi, (mem_crb + addr_hi));
+       writel(data & 0xffffffff, (mem_crb + data_lo));
+       writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
+       writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+       writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+                       (mem_crb + TEST_AGT_CTRL));
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = readl((mem_crb + TEST_AGT_CTRL));
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to write through agent\n");
+               ret = -EIO;
+       } else
+               ret = 0;
+
+       netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+       spin_unlock(&adapter->ahw.mem_lock);
+       return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+               u64 off, u64 *data)
+{
+       int j, ret;
+       u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+       u64 val;
+       void __iomem *mem_crb;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+
+       /* P2 has different SIU and MIU test agent base addr */
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+                               NETXEN_ADDR_QDR_NET_MAX_P2)) {
+               mem_crb = pci_base_offset(adapter,
+                               NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+               addr_hi = SIU_TEST_AGT_ADDR_HI;
+               data_lo = SIU_TEST_AGT_RDDATA_LO;
+               data_hi = SIU_TEST_AGT_RDDATA_HI;
+               off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+               off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+               mem_crb = pci_base_offset(adapter,
+                               NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+               addr_hi = MIU_TEST_AGT_ADDR_HI;
+               data_lo = MIU_TEST_AGT_RDDATA_LO;
+               data_hi = MIU_TEST_AGT_RDDATA_HI;
+               off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+               off_hi = 0;
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+               ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+               if (adapter->ahw.pci_len0 != 0) {
+                       return netxen_nic_pci_mem_access_direct(adapter,
+                                       off, data, 0);
+               }
+       }
+
+       return -EIO;
+
+correct:
+       spin_lock(&adapter->ahw.mem_lock);
+       netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+       writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+       writel(off_hi, (mem_crb + addr_hi));
+       writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+       writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = readl(mem_crb + TEST_AGT_CTRL);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to read through agent\n");
+               ret = -EIO;
+       } else {
+
+               temp = readl(mem_crb + data_hi);
+               val = ((u64)temp << 32);
+               val |= readl(mem_crb + data_lo);
+               *data = val;
+               ret = 0;
+       }
+
+       netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+       spin_unlock(&adapter->ahw.mem_lock);
+
+       return ret;
+}
+
+static int
+netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+               u64 off, u64 data)
+{
+       int j, ret;
+       u32 temp, off8;
+       void __iomem *mem_crb;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+
+       /* P3 onward, test agent base for MIU and SIU is same */
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+                               NETXEN_ADDR_QDR_NET_MAX_P3)) {
+               mem_crb = netxen_get_ioaddr(adapter,
+                               NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+               mem_crb = netxen_get_ioaddr(adapter,
+                               NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
+               return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
+
+       return -EIO;
+
+correct:
+       off8 = off & 0xfffffff8;
+
+       spin_lock(&adapter->ahw.mem_lock);
+
+       writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+       writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+       writel(data & 0xffffffff,
+                       mem_crb + MIU_TEST_AGT_WRDATA_LO);
+       writel((data >> 32) & 0xffffffff,
+                       mem_crb + MIU_TEST_AGT_WRDATA_HI);
+
+       writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+       writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+                       (mem_crb + TEST_AGT_CTRL));
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = readl(mem_crb + TEST_AGT_CTRL);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to write through agent\n");
+               ret = -EIO;
+       } else
+               ret = 0;
+
+       spin_unlock(&adapter->ahw.mem_lock);
+
+       return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+               u64 off, u64 *data)
+{
+       int j, ret;
+       u32 temp, off8;
+       u64 val;
+       void __iomem *mem_crb;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+
+       /* P3 onward, test agent base for MIU and SIU is same */
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+                               NETXEN_ADDR_QDR_NET_MAX_P3)) {
+               mem_crb = netxen_get_ioaddr(adapter,
+                               NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+               mem_crb = netxen_get_ioaddr(adapter,
+                               NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+               goto correct;
+       }
+
+       if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+               return netxen_nic_pci_mem_access_direct(adapter,
+                               off, data, 0);
+       }
+
+       return -EIO;
+
+correct:
+       off8 = off & 0xfffffff8;
+
+       spin_lock(&adapter->ahw.mem_lock);
+
+       writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+       writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+       writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+       writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = readl(mem_crb + TEST_AGT_CTRL);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to read through agent\n");
+               ret = -EIO;
+       } else {
+               val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+               val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
+               *data = val;
+               ret = 0;
+       }
+
+       spin_unlock(&adapter->ahw.mem_lock);
+
+       return ret;
+}
+
+void
+netxen_setup_hwops(struct netxen_adapter *adapter)
+{
+       adapter->init_port = netxen_niu_xg_init_port;
+       adapter->stop_port = netxen_niu_disable_xg_port;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               adapter->crb_read = netxen_nic_hw_read_wx_128M,
+               adapter->crb_write = netxen_nic_hw_write_wx_128M,
+               adapter->pci_set_window = netxen_nic_pci_set_window_128M,
+               adapter->pci_mem_read = netxen_nic_pci_mem_read_128M,
+               adapter->pci_mem_write = netxen_nic_pci_mem_write_128M,
+               adapter->io_read = netxen_nic_io_read_128M,
+               adapter->io_write = netxen_nic_io_write_128M,
+
+               adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+               adapter->set_multi = netxen_p2_nic_set_multi;
+               adapter->set_mtu = netxen_nic_set_mtu_xgb;
+               adapter->set_promisc = netxen_p2_nic_set_promisc;
+
+       } else {
+               adapter->crb_read = netxen_nic_hw_read_wx_2M,
+               adapter->crb_write = netxen_nic_hw_write_wx_2M,
+               adapter->pci_set_window = netxen_nic_pci_set_window_2M,
+               adapter->pci_mem_read = netxen_nic_pci_mem_read_2M,
+               adapter->pci_mem_write = netxen_nic_pci_mem_write_2M,
+               adapter->io_read = netxen_nic_io_read_2M,
+               adapter->io_write = netxen_nic_io_write_2M,
+
+               adapter->set_mtu = nx_fw_cmd_set_mtu;
+               adapter->set_promisc = netxen_p3_nic_set_promisc;
+               adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+               adapter->set_multi = netxen_p3_nic_set_multi;
+
+               adapter->phy_read = nx_fw_cmd_query_phy;
+               adapter->phy_write = nx_fw_cmd_set_phy;
+       }
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+       int offset, board_type, magic;
+       struct pci_dev *pdev = adapter->pdev;
+
+       offset = NX_FW_MAGIC_OFFSET;
+       if (netxen_rom_fast_read(adapter, offset, &magic))
+               return -EIO;
+
+       if (magic != NETXEN_BDINFO_MAGIC) {
+               dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+                       magic);
+               return -EIO;
+       }
+
+       offset = NX_BRDTYPE_OFFSET;
+       if (netxen_rom_fast_read(adapter, offset, &board_type))
+               return -EIO;
+
+       if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
+               u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+               if ((gpio & 0x8000) == 0)
+                       board_type = NETXEN_BRDTYPE_P3_10G_TP;
+       }
+
+       adapter->ahw.board_type = board_type;
+
+       switch (board_type) {
+       case NETXEN_BRDTYPE_P2_SB35_4G:
+               adapter->ahw.port_type = NETXEN_NIC_GBE;
+               break;
+       case NETXEN_BRDTYPE_P2_SB31_10G:
+       case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+       case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+       case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+       case NETXEN_BRDTYPE_P3_HMEZ:
+       case NETXEN_BRDTYPE_P3_XG_LOM:
+       case NETXEN_BRDTYPE_P3_10G_CX4:
+       case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+       case NETXEN_BRDTYPE_P3_IMEZ:
+       case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+       case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+       case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+       case NETXEN_BRDTYPE_P3_10G_XFP:
+       case NETXEN_BRDTYPE_P3_10000_BASE_T:
+               adapter->ahw.port_type = NETXEN_NIC_XGBE;
+               break;
+       case NETXEN_BRDTYPE_P1_BD:
+       case NETXEN_BRDTYPE_P1_SB:
+       case NETXEN_BRDTYPE_P1_SMAX:
+       case NETXEN_BRDTYPE_P1_SOCK:
+       case NETXEN_BRDTYPE_P3_REF_QG:
+       case NETXEN_BRDTYPE_P3_4_GB:
+       case NETXEN_BRDTYPE_P3_4_GB_MM:
+               adapter->ahw.port_type = NETXEN_NIC_GBE;
+               break;
+       case NETXEN_BRDTYPE_P3_10G_TP:
+               adapter->ahw.port_type = (adapter->portnum < 2) ?
+                       NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
+               break;
+       default:
+               dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+               adapter->ahw.port_type = NETXEN_NIC_XGBE;
+               break;
+       }
+
+       return 0;
+}
+
+/* NIU access sections */
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
+{
+       new_mtu += MTU_FUDGE_FACTOR;
+       if (adapter->physical_port == 0)
+               NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+       else
+               NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
+       return 0;
+}
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
+{
+       __u32 status;
+       __u32 autoneg;
+       __u32 port_mode;
+
+       if (!netif_carrier_ok(adapter->netdev)) {
+               adapter->link_speed   = 0;
+               adapter->link_duplex  = -1;
+               adapter->link_autoneg = AUTONEG_ENABLE;
+               return;
+       }
+
+       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+               if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+                       adapter->link_speed   = SPEED_1000;
+                       adapter->link_duplex  = DUPLEX_FULL;
+                       adapter->link_autoneg = AUTONEG_DISABLE;
+                       return;
+               }
+
+               if (adapter->phy_read &&
+                   adapter->phy_read(adapter,
+                                     NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+                                     &status) == 0) {
+                       if (netxen_get_phy_link(status)) {
+                               switch (netxen_get_phy_speed(status)) {
+                               case 0:
+                                       adapter->link_speed = SPEED_10;
+                                       break;
+                               case 1:
+                                       adapter->link_speed = SPEED_100;
+                                       break;
+                               case 2:
+                                       adapter->link_speed = SPEED_1000;
+                                       break;
+                               default:
+                                       adapter->link_speed = 0;
+                                       break;
+                               }
+                               switch (netxen_get_phy_duplex(status)) {
+                               case 0:
+                                       adapter->link_duplex = DUPLEX_HALF;
+                                       break;
+                               case 1:
+                                       adapter->link_duplex = DUPLEX_FULL;
+                                       break;
+                               default:
+                                       adapter->link_duplex = -1;
+                                       break;
+                               }
+                               if (adapter->phy_read &&
+                                   adapter->phy_read(adapter,
+                                                     NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+                                                     &autoneg) == 0)
+                                       adapter->link_autoneg = autoneg;
+                       } else
+                               goto link_down;
+               } else {
+                     link_down:
+                       adapter->link_speed = 0;
+                       adapter->link_duplex = -1;
+               }
+       }
+}
+
+int
+netxen_nic_wol_supported(struct netxen_adapter *adapter)
+{
+       u32 wol_cfg;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 0;
+
+       wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+       if (wol_cfg & (1UL << adapter->portnum)) {
+               wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+               if (wol_cfg & (1 << adapter->portnum))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static u32 netxen_md_cntrl(struct netxen_adapter *adapter,
+                       struct netxen_minidump_template_hdr *template_hdr,
+                       struct netxen_minidump_entry_crb *crtEntry)
+{
+       int loop_cnt, i, rv = 0, timeout_flag;
+       u32 op_count, stride;
+       u32 opcode, read_value, addr;
+       unsigned long timeout, timeout_jiffies;
+       addr = crtEntry->addr;
+       op_count = crtEntry->op_count;
+       stride = crtEntry->addr_stride;
+
+       for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+               for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) {
+                       opcode = (crtEntry->opcode & (0x1 << i));
+                       if (opcode) {
+                               switch (opcode) {
+                               case NX_DUMP_WCRB:
+                                       NX_WR_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                       crtEntry->value_1);
+                                       break;
+                               case NX_DUMP_RWCRB:
+                                       NX_RD_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       NX_WR_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               read_value);
+                                       break;
+                               case NX_DUMP_ANDCRB:
+                                       NX_RD_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       read_value &= crtEntry->value_2;
+                                       NX_WR_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               read_value);
+                                       break;
+                               case NX_DUMP_ORCRB:
+                                       NX_RD_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       read_value |= crtEntry->value_3;
+                                       NX_WR_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               read_value);
+                                       break;
+                               case NX_DUMP_POLLCRB:
+                                       timeout = crtEntry->poll_timeout;
+                                       NX_RD_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       timeout_jiffies =
+                                       msecs_to_jiffies(timeout) + jiffies;
+                                       for (timeout_flag = 0;
+                                               !timeout_flag
+                                       && ((read_value & crtEntry->value_2)
+                                       != crtEntry->value_1);) {
+                                               if (time_after(jiffies,
+                                                       timeout_jiffies))
+                                                       timeout_flag = 1;
+                                       NX_RD_DUMP_REG(addr,
+                                                       adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       }
+
+                                       if (timeout_flag) {
+                                               dev_err(&adapter->pdev->dev, "%s : "
+                                                       "Timeout in poll_crb control operation.\n"
+                                                               , __func__);
+                                               return -1;
+                                       }
+                                       break;
+                               case NX_DUMP_RD_SAVE:
+                                       /* Decide which address to use */
+                                       if (crtEntry->state_index_a)
+                                               addr =
+                                               template_hdr->saved_state_array
+                                               [crtEntry->state_index_a];
+                                       NX_RD_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               &read_value);
+                                       template_hdr->saved_state_array
+                                       [crtEntry->state_index_v]
+                                               = read_value;
+                                       break;
+                               case NX_DUMP_WRT_SAVED:
+                                       /* Decide which value to use */
+                                       if (crtEntry->state_index_v)
+                                               read_value =
+                                               template_hdr->saved_state_array
+                                               [crtEntry->state_index_v];
+                                       else
+                                               read_value = crtEntry->value_1;
+
+                                       /* Decide which address to use */
+                                       if (crtEntry->state_index_a)
+                                               addr =
+                                               template_hdr->saved_state_array
+                                               [crtEntry->state_index_a];
+
+                                       NX_WR_DUMP_REG(addr,
+                                               adapter->ahw.pci_base0,
+                                                               read_value);
+                                       break;
+                               case NX_DUMP_MOD_SAVE_ST:
+                                       read_value =
+                                       template_hdr->saved_state_array
+                                               [crtEntry->state_index_v];
+                                       read_value <<= crtEntry->shl;
+                                       read_value >>= crtEntry->shr;
+                                       if (crtEntry->value_2)
+                                               read_value &=
+                                               crtEntry->value_2;
+                                       read_value |= crtEntry->value_3;
+                                       read_value += crtEntry->value_1;
+                                       /* Write value back to state area.*/
+                                       template_hdr->saved_state_array
+                                               [crtEntry->state_index_v]
+                                                       = read_value;
+                                       break;
+                               default:
+                                       rv = 1;
+                                       break;
+                               }
+                       }
+               }
+               addr = addr + stride;
+       }
+       return rv;
+}
+
+/* Read memory or MN */
+static u32
+netxen_md_rdmem(struct netxen_adapter *adapter,
+               struct netxen_minidump_entry_rdmem
+                       *memEntry, u64 *data_buff)
+{
+       u64 addr, value = 0;
+       int i = 0, loop_cnt;
+
+       addr = (u64)memEntry->read_addr;
+       loop_cnt = memEntry->read_data_size;    /* This is size in bytes */
+       loop_cnt /= sizeof(value);
+
+       for (i = 0; i < loop_cnt; i++) {
+               if (netxen_nic_pci_mem_read_2M(adapter, addr, &value))
+                       goto out;
+               *data_buff++ = value;
+               addr += sizeof(value);
+       }
+out:
+       return i * sizeof(value);
+}
+
+/* Read CRB operation */
+static u32 netxen_md_rd_crb(struct netxen_adapter *adapter,
+                       struct netxen_minidump_entry_crb
+                               *crbEntry, u32 *data_buff)
+{
+       int loop_cnt;
+       u32 op_count, addr, stride, value;
+
+       addr = crbEntry->addr;
+       op_count = crbEntry->op_count;
+       stride = crbEntry->addr_stride;
+
+       for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+               NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value);
+               *data_buff++ = addr;
+               *data_buff++ = value;
+               addr = addr + stride;
+       }
+       return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Read ROM */
+static u32
+netxen_md_rdrom(struct netxen_adapter *adapter,
+                       struct netxen_minidump_entry_rdrom
+                               *romEntry, __le32 *data_buff)
+{
+       int i, count = 0;
+       u32 size, lck_val;
+       u32 val;
+       u32 fl_addr, waddr, raddr;
+       fl_addr = romEntry->read_addr;
+       size = romEntry->read_data_size/4;
+lock_try:
+       lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 +
+                                                       NX_FLASH_SEM2_LK));
+       if (!lck_val && count < MAX_CTL_CHECK) {
+               msleep(20);
+               count++;
+               goto lock_try;
+       }
+       writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 +
+                                                       NX_FLASH_LOCK_ID));
+       for (i = 0; i < size; i++) {
+               waddr = fl_addr & 0xFFFF0000;
+               NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr);
+               raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF);
+               NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val);
+               *data_buff++ = cpu_to_le32(val);
+               fl_addr += sizeof(val);
+       }
+       readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK));
+       return romEntry->read_data_size;
+}
+
+/* Handle L2 Cache */
+static u32
+netxen_md_L2Cache(struct netxen_adapter *adapter,
+                               struct netxen_minidump_entry_cache
+                                       *cacheEntry, u32 *data_buff)
+{
+       int loop_cnt, i, k, timeout_flag = 0;
+       u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+       u32 tag_value, read_cnt;
+       u8 cntl_value_w, cntl_value_r;
+       unsigned long timeout, timeout_jiffies;
+
+       loop_cnt = cacheEntry->op_count;
+       read_addr = cacheEntry->read_addr;
+       cntrl_addr = cacheEntry->control_addr;
+       cntl_value_w = (u32) cacheEntry->write_value;
+       tag_reg_addr = cacheEntry->tag_reg_addr;
+       tag_value = cacheEntry->init_tag_value;
+       read_cnt = cacheEntry->read_addr_cnt;
+
+       for (i = 0; i < loop_cnt; i++) {
+               NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+               if (cntl_value_w)
+                       NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+                                       (u32)cntl_value_w);
+               if (cacheEntry->poll_mask) {
+                       timeout = cacheEntry->poll_wait;
+                       NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+                                                       &cntl_value_r);
+                       timeout_jiffies = msecs_to_jiffies(timeout) + jiffies;
+                       for (timeout_flag = 0; !timeout_flag &&
+                       ((cntl_value_r & cacheEntry->poll_mask) != 0);) {
+                               if (time_after(jiffies, timeout_jiffies))
+                                       timeout_flag = 1;
+                               NX_RD_DUMP_REG(cntrl_addr,
+                                       adapter->ahw.pci_base0,
+                                                       &cntl_value_r);
+                       }
+                       if (timeout_flag) {
+                               dev_err(&adapter->pdev->dev,
+                                               "Timeout in processing L2 Tag poll.\n");
+                               return -1;
+                       }
+               }
+               addr = read_addr;
+               for (k = 0; k < read_cnt; k++) {
+                       NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0,
+                                       &read_value);
+                       *data_buff++ = read_value;
+                       addr += cacheEntry->read_addr_stride;
+               }
+               tag_value += cacheEntry->tag_value_stride;
+       }
+       return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+
+/* Handle L1 Cache */
+static u32 netxen_md_L1Cache(struct netxen_adapter *adapter,
+                               struct netxen_minidump_entry_cache
+                                       *cacheEntry, u32 *data_buff)
+{
+       int i, k, loop_cnt;
+       u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+       u32 tag_value, read_cnt;
+       u8 cntl_value_w;
+
+       loop_cnt = cacheEntry->op_count;
+       read_addr = cacheEntry->read_addr;
+       cntrl_addr = cacheEntry->control_addr;
+       cntl_value_w = (u32) cacheEntry->write_value;
+       tag_reg_addr = cacheEntry->tag_reg_addr;
+       tag_value = cacheEntry->init_tag_value;
+       read_cnt = cacheEntry->read_addr_cnt;
+
+       for (i = 0; i < loop_cnt; i++) {
+               NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+               NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+                                               (u32) cntl_value_w);
+               addr = read_addr;
+               for (k = 0; k < read_cnt; k++) {
+                       NX_RD_DUMP_REG(addr,
+                               adapter->ahw.pci_base0,
+                                               &read_value);
+                       *data_buff++ = read_value;
+                       addr += cacheEntry->read_addr_stride;
+               }
+               tag_value += cacheEntry->tag_value_stride;
+       }
+       return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+/* Reading OCM memory */
+static u32
+netxen_md_rdocm(struct netxen_adapter *adapter,
+                               struct netxen_minidump_entry_rdocm
+                                       *ocmEntry, u32 *data_buff)
+{
+       int i, loop_cnt;
+       u32 value;
+       void __iomem *addr;
+       addr = (ocmEntry->read_addr + adapter->ahw.pci_base0);
+       loop_cnt = ocmEntry->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               value = readl(addr);
+               *data_buff++ = value;
+               addr += ocmEntry->read_addr_stride;
+       }
+       return i * sizeof(u32);
+}
+
+/* Read MUX data */
+static u32
+netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux
+                                       *muxEntry, u32 *data_buff)
+{
+       int loop_cnt = 0;
+       u32 read_addr, read_value, select_addr, sel_value;
+
+       read_addr = muxEntry->read_addr;
+       sel_value = muxEntry->select_value;
+       select_addr = muxEntry->select_addr;
+
+       for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
+               NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value);
+               NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value);
+               *data_buff++ = sel_value;
+               *data_buff++ = read_value;
+               sel_value += muxEntry->select_value_stride;
+       }
+       return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Handling Queue State Reads */
+static u32
+netxen_md_rdqueue(struct netxen_adapter *adapter,
+                               struct netxen_minidump_entry_queue
+                                       *queueEntry, u32 *data_buff)
+{
+       int loop_cnt, k;
+       u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt;
+
+       read_cnt = queueEntry->read_addr_cnt;
+       read_stride = queueEntry->read_addr_stride;
+       select_addr = queueEntry->select_addr;
+
+       for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
+                                loop_cnt++) {
+               NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
+               read_addr = queueEntry->read_addr;
+               for (k = 0; k < read_cnt; k--) {
+                       NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
+                                                       &read_value);
+                       *data_buff++ = read_value;
+                       read_addr += read_stride;
+               }
+               queue_id += queueEntry->queue_id_stride;
+       }
+       return loop_cnt * (read_cnt * sizeof(read_value));
+}
+
+
+/*
+* We catch an error where driver does not read
+* as much data as we expect from the entry.
+*/
+
+static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
+                               struct netxen_minidump_entry *entry, int esize)
+{
+       if (esize < 0) {
+               entry->hdr.driver_flags |= NX_DUMP_SKIP;
+               return esize;
+       }
+       if (esize != entry->hdr.entry_capture_size) {
+               entry->hdr.entry_capture_size = esize;
+               entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR;
+               dev_info(&adapter->pdev->dev,
+                       "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+                       entry->hdr.entry_type, entry->hdr.entry_capture_mask,
+                       esize, entry->hdr.entry_capture_size);
+               dev_info(&adapter->pdev->dev, "Aborting further dump capture\n");
+       }
+       return 0;
+}
+
+static int netxen_parse_md_template(struct netxen_adapter *adapter)
+{
+       int num_of_entries, buff_level, e_cnt, esize;
+       int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+       char *dbuff;
+       void *template_buff = adapter->mdump.md_template;
+       char *dump_buff = adapter->mdump.md_capture_buff;
+       int capture_mask = adapter->mdump.md_capture_mask;
+       struct netxen_minidump_template_hdr *template_hdr;
+       struct netxen_minidump_entry *entry;
+
+       if ((capture_mask & 0x3) != 0x3) {
+               dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed "
+                       "for valid firmware dump\n", capture_mask);
+               return -EINVAL;
+       }
+       template_hdr = (struct netxen_minidump_template_hdr *) template_buff;
+       num_of_entries = template_hdr->num_of_entries;
+       entry = (struct netxen_minidump_entry *) ((char *) template_buff +
+                               template_hdr->first_entry_offset);
+       memcpy(dump_buff, template_buff, adapter->mdump.md_template_size);
+       dump_buff = dump_buff + adapter->mdump.md_template_size;
+
+       if (template_hdr->entry_type == TLHDR)
+               sane_start = 1;
+
+       for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) {
+               if (!(entry->hdr.entry_capture_mask & capture_mask)) {
+                       entry->hdr.driver_flags |= NX_DUMP_SKIP;
+                       entry = (struct netxen_minidump_entry *)
+                               ((char *) entry + entry->hdr.entry_size);
+                       continue;
+               }
+               switch (entry->hdr.entry_type) {
+               case RDNOP:
+                       entry->hdr.driver_flags |= NX_DUMP_SKIP;
+                       break;
+               case RDEND:
+                       entry->hdr.driver_flags |= NX_DUMP_SKIP;
+                       if (!sane_end)
+                               end_cnt = e_cnt;
+                       sane_end += 1;
+                       break;
+               case CNTRL:
+                       rv = netxen_md_cntrl(adapter,
+                               template_hdr, (void *)entry);
+                       if (rv)
+                               entry->hdr.driver_flags |= NX_DUMP_SKIP;
+                       break;
+               case RDCRB:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rd_crb(adapter,
+                                       (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case RDMN:
+               case RDMEM:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rdmem(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case BOARD:
+               case RDROM:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rdrom(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case L2ITG:
+               case L2DTG:
+               case L2DAT:
+               case L2INS:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_L2Cache(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case L1DAT:
+               case L1INS:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_L1Cache(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case RDOCM:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rdocm(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case RDMUX:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rdmux(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               case QUEUE:
+                       dbuff = dump_buff + buff_level;
+                       esize = netxen_md_rdqueue(adapter,
+                               (void *) entry, (void *) dbuff);
+                       rv = netxen_md_entry_err_chk
+                               (adapter, entry, esize);
+                       if (rv  < 0)
+                               break;
+                       buff_level += esize;
+                       break;
+               default:
+                       entry->hdr.driver_flags |= NX_DUMP_SKIP;
+                       break;
+               }
+               /* Next entry in the template */
+               entry = (struct netxen_minidump_entry *)
+                       ((char *) entry + entry->hdr.entry_size);
+       }
+       if (!sane_start || sane_end > 1) {
+               dev_err(&adapter->pdev->dev,
+                               "Firmware minidump template configuration error.\n");
+       }
+       return 0;
+}
+
+static int
+netxen_collect_minidump(struct netxen_adapter *adapter)
+{
+       int ret = 0;
+       struct netxen_minidump_template_hdr *hdr;
+       struct timespec val;
+       hdr = (struct netxen_minidump_template_hdr *)
+                               adapter->mdump.md_template;
+       hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
+       jiffies_to_timespec(jiffies, &val);
+       hdr->driver_timestamp = (u32) val.tv_sec;
+       hdr->driver_info_word2 = adapter->fw_version;
+       hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
+       ret = netxen_parse_md_template(adapter);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+
+void
+netxen_dump_fw(struct netxen_adapter *adapter)
+{
+       struct netxen_minidump_template_hdr *hdr;
+       int i, k, data_size = 0;
+       u32 capture_mask;
+       hdr = (struct netxen_minidump_template_hdr *)
+                               adapter->mdump.md_template;
+       capture_mask = adapter->mdump.md_capture_mask;
+
+       for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) {
+               if (i & capture_mask)
+                       data_size += hdr->capture_size_array[k];
+       }
+       if (!data_size) {
+               dev_err(&adapter->pdev->dev,
+                               "Invalid cap sizes for capture_mask=0x%x\n",
+                       adapter->mdump.md_capture_mask);
+               return;
+       }
+       adapter->mdump.md_capture_size = data_size;
+       adapter->mdump.md_dump_size = adapter->mdump.md_template_size +
+                                       adapter->mdump.md_capture_size;
+       if (!adapter->mdump.md_capture_buff) {
+               adapter->mdump.md_capture_buff =
+                               vzalloc(adapter->mdump.md_dump_size);
+               if (!adapter->mdump.md_capture_buff)
+                       return;
+
+               if (netxen_collect_minidump(adapter)) {
+                       adapter->mdump.has_valid_dump = 0;
+                       adapter->mdump.md_dump_size = 0;
+                       vfree(adapter->mdump.md_capture_buff);
+                       adapter->mdump.md_capture_buff = NULL;
+                       dev_err(&adapter->pdev->dev,
+                               "Error in collecting firmware minidump.\n");
+               } else {
+                       adapter->mdump.md_timestamp = jiffies;
+                       adapter->mdump.has_valid_dump = 1;
+                       adapter->fw_mdump_rdy = 1;
+                       dev_info(&adapter->pdev->dev, "%s Successfully "
+                               "collected fw dump.\n", adapter->netdev->name);
+               }
+
+       } else {
+               dev_info(&adapter->pdev->dev,
+                                       "Cannot overwrite previously collected "
+                                                       "firmware minidump.\n");
+               adapter->fw_mdump_rdy = 1;
+               return;
+       }
+}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
new file mode 100644 (file)
index 0000000..7433c4d
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES  (NETXEN_PCI_MAPSIZE << 20)
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+
+#define _netxen_crb_get_bit(var, bit)  ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ *     Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ *     Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ *     Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ *     Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ *     Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ *     Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ *     Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ *     Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ *     Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ *     Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ *     Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ *     Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_tx_flowctl(config_word)      \
+       ((config_word) |= 1 << 4)
+#define netxen_gb_rx_flowctl(config_word)      \
+       ((config_word) |= 1 << 5)
+#define netxen_gb_tx_reset_pb(config_word)     \
+       ((config_word) |= 1 << 16)
+#define netxen_gb_rx_reset_pb(config_word)     \
+       ((config_word) |= 1 << 17)
+#define netxen_gb_tx_reset_mac(config_word)    \
+       ((config_word) |= 1 << 18)
+#define netxen_gb_rx_reset_mac(config_word)    \
+       ((config_word) |= 1 << 19)
+
+#define netxen_gb_unset_tx_flowctl(config_word)        \
+       ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_rx_flowctl(config_word)        \
+       ((config_word) &= ~(1 << 5))
+
+#define netxen_gb_get_tx_synced(config_word)   \
+               _netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word)   \
+               _netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word)  \
+               _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word)  \
+               _netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word)  \
+               _netxen_crb_get_bit((config_word), 31)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val)   \
+               ((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word)  \
+               ((config_word) |= 1 << 31)
+#define netxen_gb_mii_mgmt_unset(config_word)  \
+               ((config_word) &= ~(1 << 31))
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
+               ((config_word) |= 1 << 0)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val)  \
+               ((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val)  \
+               ((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word)       \
+               _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word)   \
+               _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word)   \
+               _netxen_crb_get_bit(config_word, 2)
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define netxen_xg_set_xg0_mask(config_word)    \
+       ((config_word) |= 1 << 0)
+#define netxen_xg_set_xg1_mask(config_word)    \
+       ((config_word) |= 1 << 3)
+
+#define netxen_xg_get_xg0_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 0)
+#define netxen_xg_get_xg1_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 3)
+
+#define netxen_xg_unset_xg0_mask(config_word)  \
+       ((config_word) &= ~(1 << 0))
+#define netxen_xg_unset_xg1_mask(config_word)  \
+       ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+#define netxen_gb_set_gb0_mask(config_word)    \
+       ((config_word) |= 1 << 0)
+#define netxen_gb_set_gb1_mask(config_word)    \
+       ((config_word) |= 1 << 2)
+#define netxen_gb_set_gb2_mask(config_word)    \
+       ((config_word) |= 1 << 4)
+#define netxen_gb_set_gb3_mask(config_word)    \
+       ((config_word) |= 1 << 6)
+
+#define netxen_gb_get_gb0_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 0)
+#define netxen_gb_get_gb1_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 2)
+#define netxen_gb_get_gb2_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_gb3_mask(config_word)    \
+       _netxen_crb_get_bit((config_word), 6)
+
+#define netxen_gb_unset_gb0_mask(config_word)  \
+       ((config_word) &= ~(1 << 0))
+#define netxen_gb_unset_gb1_mask(config_word)  \
+       ((config_word) &= ~(1 << 2))
+#define netxen_gb_unset_gb2_mask(config_word)  \
+       ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_gb3_mask(config_word)  \
+       ((config_word) &= ~(1 << 6))
+
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL            0
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS             1
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0           2
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1           3
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG            4
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART            5
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE       6
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT      7
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE   8
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL     9
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS      10
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS    15
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL                16
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS         17
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE         18
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS         19
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE   20
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT   21
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL                24
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE       25
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET       26
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE    27
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0      : jabber => 1:jabber detected, 0:not
+ * Bit 1      : polarity => 1:polarity reversed, 0:normal
+ * Bit 2      : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3      : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4      : energydetect => 1:sleep, 0:active
+ * Bit 5      : downshift => 1:downshift, 0:no downshift
+ * Bit 6      : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9   : cablelen => not valid in 10Mb/s mode
+ *                     0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10     : link => 1:link up, 0:link down
+ * Bit 11     : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12     : pagercvd => 1:page received, 0:page not received
+ * Bit 13     : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val) \
+               ((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word)     \
+               ((config_word) |= 1 << 13)
+#define netxen_clear_phy_duplex(config_word)   \
+               ((config_word) &= ~(1 << 13))
+
+#define netxen_get_phy_link(config_word)       \
+               _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_duplex(config_word)     \
+               _netxen_crb_get_bit(config_word, 13)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word)  \
+               _netxen_crb_get_bit(config_word, 1)
+
+#define NETXEN_NIU_NON_PROMISC_MODE    0
+#define NETXEN_NIU_PROMISC_MODE                1
+#define NETXEN_NIU_ALLMULTI_MODE       2
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word)      \
+               ((config_word) |= 1 << 4)
+
+typedef struct {
+       unsigned valid;
+       unsigned start_128M;
+       unsigned end_128M;
+       unsigned start_2M;
+} crb_128M_2M_sub_block_map_t;
+
+typedef struct {
+       crb_128M_2M_sub_block_map_t sub_block[16];
+} crb_128M_2M_block_map_t;
+
+#endif                         /* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
new file mode 100644 (file)
index 0000000..7b43a3b
--- /dev/null
@@ -0,0 +1,1933 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <net/checksum.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct crb_addr_pair {
+       u32 addr;
+       u32 data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR (0xffffffff)
+
+#define crb_addr_transform(name) \
+       crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+       NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+               struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+       crb_addr_transform(XDMA);
+       crb_addr_transform(TIMR);
+       crb_addr_transform(SRE);
+       crb_addr_transform(SQN3);
+       crb_addr_transform(SQN2);
+       crb_addr_transform(SQN1);
+       crb_addr_transform(SQN0);
+       crb_addr_transform(SQS3);
+       crb_addr_transform(SQS2);
+       crb_addr_transform(SQS1);
+       crb_addr_transform(SQS0);
+       crb_addr_transform(RPMX7);
+       crb_addr_transform(RPMX6);
+       crb_addr_transform(RPMX5);
+       crb_addr_transform(RPMX4);
+       crb_addr_transform(RPMX3);
+       crb_addr_transform(RPMX2);
+       crb_addr_transform(RPMX1);
+       crb_addr_transform(RPMX0);
+       crb_addr_transform(ROMUSB);
+       crb_addr_transform(SN);
+       crb_addr_transform(QMN);
+       crb_addr_transform(QMS);
+       crb_addr_transform(PGNI);
+       crb_addr_transform(PGND);
+       crb_addr_transform(PGN3);
+       crb_addr_transform(PGN2);
+       crb_addr_transform(PGN1);
+       crb_addr_transform(PGN0);
+       crb_addr_transform(PGSI);
+       crb_addr_transform(PGSD);
+       crb_addr_transform(PGS3);
+       crb_addr_transform(PGS2);
+       crb_addr_transform(PGS1);
+       crb_addr_transform(PGS0);
+       crb_addr_transform(PS);
+       crb_addr_transform(PH);
+       crb_addr_transform(NIU);
+       crb_addr_transform(I2Q);
+       crb_addr_transform(EG);
+       crb_addr_transform(MN);
+       crb_addr_transform(MS);
+       crb_addr_transform(CAS2);
+       crb_addr_transform(CAS1);
+       crb_addr_transform(CAS0);
+       crb_addr_transform(CAM);
+       crb_addr_transform(C2C1);
+       crb_addr_transform(C2C0);
+       crb_addr_transform(SMB);
+       crb_addr_transform(OCM0);
+       crb_addr_transform(I2C0);
+}
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct netxen_rx_buffer *rx_buf;
+       int i, ring;
+
+       recv_ctx = &adapter->recv_ctx;
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               for (i = 0; i < rds_ring->num_desc; ++i) {
+                       rx_buf = &(rds_ring->rx_buf_arr[i]);
+                       if (rx_buf->state == NETXEN_BUFFER_FREE)
+                               continue;
+                       pci_unmap_single(adapter->pdev,
+                                       rx_buf->dma,
+                                       rds_ring->dma_size,
+                                       PCI_DMA_FROMDEVICE);
+                       if (rx_buf->skb != NULL)
+                               dev_kfree_skb_any(rx_buf->skb);
+               }
+       }
+}
+
+void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+{
+       struct netxen_cmd_buffer *cmd_buf;
+       struct netxen_skb_frag *buffrag;
+       int i, j;
+       struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+       spin_lock_bh(&adapter->tx_clean_lock);
+       cmd_buf = tx_ring->cmd_buf_arr;
+       for (i = 0; i < tx_ring->num_desc; i++) {
+               buffrag = cmd_buf->frag_array;
+               if (buffrag->dma) {
+                       pci_unmap_single(adapter->pdev, buffrag->dma,
+                                        buffrag->length, PCI_DMA_TODEVICE);
+                       buffrag->dma = 0ULL;
+               }
+               for (j = 1; j < cmd_buf->frag_count; j++) {
+                       buffrag++;
+                       if (buffrag->dma) {
+                               pci_unmap_page(adapter->pdev, buffrag->dma,
+                                              buffrag->length,
+                                              PCI_DMA_TODEVICE);
+                               buffrag->dma = 0ULL;
+                       }
+               }
+               if (cmd_buf->skb) {
+                       dev_kfree_skb_any(cmd_buf->skb);
+                       cmd_buf->skb = NULL;
+               }
+               cmd_buf++;
+       }
+       spin_unlock_bh(&adapter->tx_clean_lock);
+}
+
+void netxen_free_sw_resources(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_tx_ring *tx_ring;
+       int ring;
+
+       recv_ctx = &adapter->recv_ctx;
+
+       if (recv_ctx->rds_rings == NULL)
+               goto skip_rds;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               vfree(rds_ring->rx_buf_arr);
+               rds_ring->rx_buf_arr = NULL;
+       }
+       kfree(recv_ctx->rds_rings);
+
+skip_rds:
+       if (adapter->tx_ring == NULL)
+               return;
+
+       tx_ring = adapter->tx_ring;
+       vfree(tx_ring->cmd_buf_arr);
+       kfree(tx_ring);
+       adapter->tx_ring = NULL;
+}
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
+{
+       struct netxen_recv_context *recv_ctx;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct nx_host_tx_ring *tx_ring;
+       struct netxen_rx_buffer *rx_buf;
+       int ring, i;
+
+       struct netxen_cmd_buffer *cmd_buf_arr;
+       struct net_device *netdev = adapter->netdev;
+
+       tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+       if (tx_ring == NULL)
+               return -ENOMEM;
+
+       adapter->tx_ring = tx_ring;
+
+       tx_ring->num_desc = adapter->num_txd;
+       tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+       cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+       if (cmd_buf_arr == NULL)
+               goto err_out;
+
+       tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+       recv_ctx = &adapter->recv_ctx;
+
+       rds_ring = kcalloc(adapter->max_rds_rings,
+                          sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+       if (rds_ring == NULL)
+               goto err_out;
+
+       recv_ctx->rds_rings = rds_ring;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               switch (ring) {
+               case RCV_RING_NORMAL:
+                       rds_ring->num_desc = adapter->num_rxd;
+                       if (adapter->ahw.cut_through) {
+                               rds_ring->dma_size =
+                                       NX_CT_DEFAULT_RX_BUF_LEN;
+                               rds_ring->skb_size =
+                                       NX_CT_DEFAULT_RX_BUF_LEN;
+                       } else {
+                               if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+                                       rds_ring->dma_size =
+                                               NX_P3_RX_BUF_MAX_LEN;
+                               else
+                                       rds_ring->dma_size =
+                                               NX_P2_RX_BUF_MAX_LEN;
+                               rds_ring->skb_size =
+                                       rds_ring->dma_size + NET_IP_ALIGN;
+                       }
+                       break;
+
+               case RCV_RING_JUMBO:
+                       rds_ring->num_desc = adapter->num_jumbo_rxd;
+                       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+                               rds_ring->dma_size =
+                                       NX_P3_RX_JUMBO_BUF_MAX_LEN;
+                       else
+                               rds_ring->dma_size =
+                                       NX_P2_RX_JUMBO_BUF_MAX_LEN;
+
+                       if (adapter->capabilities & NX_CAP0_HW_LRO)
+                               rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
+
+                       rds_ring->skb_size =
+                               rds_ring->dma_size + NET_IP_ALIGN;
+                       break;
+
+               case RCV_RING_LRO:
+                       rds_ring->num_desc = adapter->num_lro_rxd;
+                       rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
+                       rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+                       break;
+
+               }
+               rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+               if (rds_ring->rx_buf_arr == NULL)
+                       /* free whatever was already allocated */
+                       goto err_out;
+
+               INIT_LIST_HEAD(&rds_ring->free_list);
+               /*
+                * Now go through all of them, set reference handles
+                * and put them in the queues.
+                */
+               rx_buf = rds_ring->rx_buf_arr;
+               for (i = 0; i < rds_ring->num_desc; i++) {
+                       list_add_tail(&rx_buf->list,
+                                       &rds_ring->free_list);
+                       rx_buf->ref_handle = i;
+                       rx_buf->state = NETXEN_BUFFER_FREE;
+                       rx_buf++;
+               }
+               spin_lock_init(&rds_ring->lock);
+       }
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               sds_ring->irq = adapter->msix_entries[ring].vector;
+               sds_ring->adapter = adapter;
+               sds_ring->num_desc = adapter->num_rxd;
+
+               for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+                       INIT_LIST_HEAD(&sds_ring->free_list[i]);
+       }
+
+       return 0;
+
+err_out:
+       netxen_free_sw_resources(adapter);
+       return -ENOMEM;
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+static u32 netxen_decode_crb_addr(u32 addr)
+{
+       int i;
+       u32 base_addr, offset, pci_base;
+
+       crb_addr_transform_setup();
+
+       pci_base = NETXEN_ADDR_ERROR;
+       base_addr = addr & 0xfff00000;
+       offset = addr & 0x000fffff;
+
+       for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+               if (crb_addr_xform[i] == base_addr) {
+                       pci_base = i << 20;
+                       break;
+               }
+       }
+       if (pci_base == NETXEN_ADDR_ERROR)
+               return pci_base;
+       else
+               return pci_base + offset;
+}
+
+#define NETXEN_MAX_ROM_WAIT_USEC       100
+
+static int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+       long timeout = 0;
+       long done = 0;
+
+       cond_resched();
+
+       while (done == 0) {
+               done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
+               done &= 2;
+               if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
+                       dev_err(&adapter->pdev->dev,
+                               "Timeout reached  waiting for rom done");
+                       return -EIO;
+               }
+               udelay(1);
+       }
+       return 0;
+}
+
+static int do_rom_fast_read(struct netxen_adapter *adapter,
+                           int addr, int *valp)
+{
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+       if (netxen_wait_rom_done(adapter)) {
+               printk("Error waiting for rom done\n");
+               return -EIO;
+       }
+       /* reset abyte_cnt and dummy_byte_cnt */
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+       udelay(10);
+       NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+       *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
+       return 0;
+}
+
+static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+                                 u8 *bytes, size_t size)
+{
+       int addridx;
+       int ret = 0;
+
+       for (addridx = addr; addridx < (addr + size); addridx += 4) {
+               int v;
+               ret = do_rom_fast_read(adapter, addridx, &v);
+               if (ret != 0)
+                       break;
+               *(__le32 *)bytes = cpu_to_le32(v);
+               bytes += 4;
+       }
+
+       return ret;
+}
+
+int
+netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+                               u8 *bytes, size_t size)
+{
+       int ret;
+
+       ret = netxen_rom_lock(adapter);
+       if (ret < 0)
+               return ret;
+
+       ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+       netxen_rom_unlock(adapter);
+       return ret;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+       int ret;
+
+       if (netxen_rom_lock(adapter) != 0)
+               return -EIO;
+
+       ret = do_rom_fast_read(adapter, addr, valp);
+       netxen_rom_unlock(adapter);
+       return ret;
+}
+
+#define NETXEN_BOARDTYPE               0x4008
+#define NETXEN_BOARDNUM                0x400c
+#define NETXEN_CHIPNUM                 0x4010
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter)
+{
+       int addr, val;
+       int i, n, init_delay = 0;
+       struct crb_addr_pair *buf;
+       unsigned offset;
+       u32 off;
+
+       /* resetall */
+       netxen_rom_lock(adapter);
+       NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+       netxen_rom_unlock(adapter);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+                       (n != 0xcafecafe) ||
+                       netxen_rom_fast_read(adapter, 4, &n) != 0) {
+                       printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+                                       "n: %08x\n", netxen_nic_driver_name, n);
+                       return -EIO;
+               }
+               offset = n & 0xffffU;
+               n = (n >> 16) & 0xffffU;
+       } else {
+               if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+                       !(n & 0x80000000)) {
+                       printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+                                       "n: %08x\n", netxen_nic_driver_name, n);
+                       return -EIO;
+               }
+               offset = 1;
+               n &= ~0x80000000;
+       }
+
+       if (n >= 1024) {
+               printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
+                      " initialized.\n", __func__, n);
+               return -EIO;
+       }
+
+       buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++) {
+               if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+               netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+                       kfree(buf);
+                       return -EIO;
+               }
+
+               buf[i].addr = addr;
+               buf[i].data = val;
+
+       }
+
+       for (i = 0; i < n; i++) {
+
+               off = netxen_decode_crb_addr(buf[i].addr);
+               if (off == NETXEN_ADDR_ERROR) {
+                       printk(KERN_ERR"CRB init value out of range %x\n",
+                                       buf[i].addr);
+                       continue;
+               }
+               off += NETXEN_PCI_CRBSPACE;
+
+               if (off & 1)
+                       continue;
+
+               /* skipping cold reboot MAGIC */
+               if (off == NETXEN_CAM_RAM(0x1fc))
+                       continue;
+
+               if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+                       if (off == (NETXEN_CRB_I2C0 + 0x1c))
+                               continue;
+                       /* do not reset PCI */
+                       if (off == (ROMUSB_GLB + 0xbc))
+                               continue;
+                       if (off == (ROMUSB_GLB + 0xa8))
+                               continue;
+                       if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+                               continue;
+                       if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+                               continue;
+                       if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+                               continue;
+                       if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
+                               continue;
+                       if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
+                               !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+                               buf[i].data = 0x1020;
+                       /* skip the function enable register */
+                       if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
+                               continue;
+                       if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
+                               continue;
+                       if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
+                               continue;
+               }
+
+               init_delay = 1;
+               /* After writing this register, HW needs time for CRB */
+               /* to quiet down (else crb_window returns 0xffffffff) */
+               if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+                       init_delay = 1000;
+                       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+                               /* hold xdma in reset also */
+                               buf[i].data = NETXEN_NIC_XDMA_RESET;
+                               buf[i].data = 0x8000ff;
+                       }
+               }
+
+               NXWR32(adapter, off, buf[i].data);
+
+               msleep(init_delay);
+       }
+       kfree(buf);
+
+       /* disable_peg_cache_all */
+
+       /* unreset_net_cache */
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
+       }
+
+       /* p2dn replyCount */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+       /* disable_peg_cache 0 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+       /* disable_peg_cache 1 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+       /* peg_clr_all */
+
+       /* peg_clr 0 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+       /* peg_clr 1 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+       /* peg_clr 2 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+       /* peg_clr 3 */
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+       NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+       return 0;
+}
+
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+       uint32_t i;
+       struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+       __le32 entries = cpu_to_le32(directory->num_entries);
+
+       for (i = 0; i < entries; i++) {
+
+               __le32 offs = cpu_to_le32(directory->findex) +
+                               (i * cpu_to_le32(directory->entry_size));
+               __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+               if (tab_type == section)
+                       return (struct uni_table_desc *) &unirom[offs];
+       }
+
+       return NULL;
+}
+
+#define        QLCNIC_FILEHEADER_SIZE  (14 * 4)
+
+static int
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
+       const u8 *unirom = adapter->fw->data;
+       struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+       u32 fw_file_size = adapter->fw->size;
+       u32 tab_size;
+       __le32 entries;
+       __le32 entry_size;
+
+       if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+               return -EINVAL;
+
+       entries = cpu_to_le32(directory->num_entries);
+       entry_size = cpu_to_le32(directory->entry_size);
+       tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+       if (fw_file_size < tab_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+       struct uni_table_desc *tab_desc;
+       struct uni_data_desc *descr;
+       const u8 *unirom = adapter->fw->data;
+       __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+                               NX_UNI_BOOTLD_IDX_OFF));
+       u32 offs;
+       u32 tab_size;
+       u32 data_size;
+
+       tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+       if (!tab_desc)
+               return -EINVAL;
+
+       tab_size = cpu_to_le32(tab_desc->findex) +
+                       (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+       offs = cpu_to_le32(tab_desc->findex) +
+               (cpu_to_le32(tab_desc->entry_size) * (idx));
+       descr = (struct uni_data_desc *)&unirom[offs];
+
+       data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+       if (adapter->fw->size < data_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+       struct uni_table_desc *tab_desc;
+       struct uni_data_desc *descr;
+       const u8 *unirom = adapter->fw->data;
+       __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+                               NX_UNI_FIRMWARE_IDX_OFF));
+       u32 offs;
+       u32 tab_size;
+       u32 data_size;
+
+       tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+       if (!tab_desc)
+               return -EINVAL;
+
+       tab_size = cpu_to_le32(tab_desc->findex) +
+                       (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+       offs = cpu_to_le32(tab_desc->findex) +
+               (cpu_to_le32(tab_desc->entry_size) * (idx));
+       descr = (struct uni_data_desc *)&unirom[offs];
+       data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+       if (adapter->fw->size < data_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+       struct uni_table_desc *ptab_descr;
+       const u8 *unirom = adapter->fw->data;
+       int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
+                       1 : netxen_p3_has_mn(adapter);
+       __le32 entries;
+       __le32 entry_size;
+       u32 tab_size;
+       u32 i;
+
+       ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+       if (ptab_descr == NULL)
+               return -EINVAL;
+
+       entries = cpu_to_le32(ptab_descr->num_entries);
+       entry_size = cpu_to_le32(ptab_descr->entry_size);
+       tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+nomn:
+       for (i = 0; i < entries; i++) {
+
+               __le32 flags, file_chiprev, offs;
+               u8 chiprev = adapter->ahw.revision_id;
+               uint32_t flagbit;
+
+               offs = cpu_to_le32(ptab_descr->findex) +
+                               (i * cpu_to_le32(ptab_descr->entry_size));
+               flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+               file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+                                                       NX_UNI_CHIP_REV_OFF));
+
+               flagbit = mn_present ? 1 : 2;
+
+               if ((chiprev == file_chiprev) &&
+                                       ((1ULL << flagbit) & flags)) {
+                       adapter->file_prd_off = offs;
+                       return 0;
+               }
+       }
+
+       if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               mn_present = 0;
+               goto nomn;
+       }
+
+       return -EINVAL;
+}
+
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+       if (netxen_nic_validate_header(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: header validation failed\n");
+               return -EINVAL;
+       }
+
+       if (netxen_nic_validate_product_offs(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: product validation failed\n");
+               return -EINVAL;
+       }
+
+       if (netxen_nic_validate_bootld(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: bootld validation failed\n");
+               return -EINVAL;
+       }
+
+       if (netxen_nic_validate_fw(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: firmware validation failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+                       u32 section, u32 idx_offset)
+{
+       const u8 *unirom = adapter->fw->data;
+       int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+                                                               idx_offset));
+       struct uni_table_desc *tab_desc;
+       __le32 offs;
+
+       tab_desc = nx_get_table_desc(unirom, section);
+
+       if (tab_desc == NULL)
+               return NULL;
+
+       offs = cpu_to_le32(tab_desc->findex) +
+                       (cpu_to_le32(tab_desc->entry_size) * idx);
+
+       return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+       u32 offs = NETXEN_BOOTLD_START;
+
+       if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+               offs = cpu_to_le32((nx_get_data_desc(adapter,
+                                       NX_UNI_DIR_SECT_BOOTLD,
+                                       NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+       return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+       u32 offs = NETXEN_IMAGE_START;
+
+       if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+               offs = cpu_to_le32((nx_get_data_desc(adapter,
+                                       NX_UNI_DIR_SECT_FW,
+                                       NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+       return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+       if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+               return cpu_to_le32((nx_get_data_desc(adapter,
+                                       NX_UNI_DIR_SECT_FW,
+                                       NX_UNI_FIRMWARE_IDX_OFF))->size);
+       else
+               return cpu_to_le32(
+                               *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+       struct uni_data_desc *fw_data_desc;
+       const struct firmware *fw = adapter->fw;
+       __le32 major, minor, sub;
+       const u8 *ver_str;
+       int i, ret = 0;
+
+       if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+               fw_data_desc = nx_get_data_desc(adapter,
+                               NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+               ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+                               cpu_to_le32(fw_data_desc->size) - 17;
+
+               for (i = 0; i < 12; i++) {
+                       if (!strncmp(&ver_str[i], "REV=", 4)) {
+                               ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+                                                       &major, &minor, &sub);
+                               break;
+                       }
+               }
+
+               if (ret != 3)
+                       return 0;
+
+               return major + (minor << 8) + (sub << 16);
+
+       } else
+               return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+       const struct firmware *fw = adapter->fw;
+       __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+       if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+               bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+                                               + NX_UNI_BIOS_VERSION_OFF));
+               return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
+                                                       (bios_ver >> 24);
+       } else
+               return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
+int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+       u32 count, old_count;
+       u32 val, version, major, minor, build;
+       int i, timeout;
+       u8 fw_type;
+
+       /* NX2031 firmware doesn't support heartbit */
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 1;
+
+       if (adapter->need_fw_reset)
+               return 1;
+
+       /* last attempt had failed */
+       if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+               return 1;
+
+       old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+       for (i = 0; i < 10; i++) {
+
+               timeout = msleep_interruptible(200);
+               if (timeout) {
+                       NXWR32(adapter, CRB_CMDPEG_STATE,
+                                       PHAN_INITIALIZE_FAILED);
+                       return -EINTR;
+               }
+
+               count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+               if (count != old_count)
+                       break;
+       }
+
+       /* firmware is dead */
+       if (count == old_count)
+               return 1;
+
+       /* check if we have got newer or different file firmware */
+       if (adapter->fw) {
+
+               val = nx_get_fw_version(adapter);
+
+               version = NETXEN_DECODE_VERSION(val);
+
+               major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+               minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+               build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+               if (version > NETXEN_VERSION_CODE(major, minor, build))
+                       return 1;
+
+               if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+                       adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
+
+                       val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+                       fw_type = (val & 0x4) ?
+                               NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+                       if (adapter->fw_type != fw_type)
+                               return 1;
+               }
+       }
+
+       return 0;
+}
+
+#define NETXEN_MIN_P3_FW_SUPP  NETXEN_VERSION_CODE(4, 0, 505)
+
+int
+netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
+{
+       u32 flash_fw_ver, min_fw_ver;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 0;
+
+       if (netxen_rom_fast_read(adapter,
+                       NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+               dev_err(&adapter->pdev->dev, "Unable to read flash fw"
+                       "version\n");
+               return -EIO;
+       }
+
+       flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+       min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
+       if (flash_fw_ver >= min_fw_ver)
+               return 0;
+
+       dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
+               "[4.0.505]. Please update firmware on flash\n",
+               _major(flash_fw_ver), _minor(flash_fw_ver),
+               _build(flash_fw_ver));
+       return -EINVAL;
+}
+
+static char *fw_name[] = {
+       NX_P2_MN_ROMIMAGE_NAME,
+       NX_P3_CT_ROMIMAGE_NAME,
+       NX_P3_MN_ROMIMAGE_NAME,
+       NX_UNIFIED_ROMIMAGE_NAME,
+       NX_FLASH_ROMIMAGE_NAME,
+};
+
+int
+netxen_load_firmware(struct netxen_adapter *adapter)
+{
+       u64 *ptr64;
+       u32 i, flashaddr, size;
+       const struct firmware *fw = adapter->fw;
+       struct pci_dev *pdev = adapter->pdev;
+
+       dev_info(&pdev->dev, "loading firmware from %s\n",
+                       fw_name[adapter->fw_type]);
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
+
+       if (fw) {
+               __le64 data;
+
+               size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+
+               ptr64 = (u64 *)nx_get_bootld_offs(adapter);
+               flashaddr = NETXEN_BOOTLD_START;
+
+               for (i = 0; i < size; i++) {
+                       data = cpu_to_le64(ptr64[i]);
+
+                       if (adapter->pci_mem_write(adapter, flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+
+               size = (__force u32)nx_get_fw_size(adapter) / 8;
+
+               ptr64 = (u64 *)nx_get_fw_offs(adapter);
+               flashaddr = NETXEN_IMAGE_START;
+
+               for (i = 0; i < size; i++) {
+                       data = cpu_to_le64(ptr64[i]);
+
+                       if (adapter->pci_mem_write(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+
+               size = (__force u32)nx_get_fw_size(adapter) % 8;
+               if (size) {
+                       data = cpu_to_le64(ptr64[i]);
+
+                       if (adapter->pci_mem_write(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+               }
+
+       } else {
+               u64 data;
+               u32 hi, lo;
+
+               size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+               flashaddr = NETXEN_BOOTLD_START;
+
+               for (i = 0; i < size; i++) {
+                       if (netxen_rom_fast_read(adapter,
+                                       flashaddr, (int *)&lo) != 0)
+                               return -EIO;
+                       if (netxen_rom_fast_read(adapter,
+                                       flashaddr + 4, (int *)&hi) != 0)
+                               return -EIO;
+
+                       /* hi, lo are already in host endian byteorder */
+                       data = (((u64)hi << 32) | lo);
+
+                       if (adapter->pci_mem_write(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+       }
+       msleep(1);
+
+       if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+               NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
+       } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+       else {
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
+       }
+
+       return 0;
+}
+
+static int
+netxen_validate_firmware(struct netxen_adapter *adapter)
+{
+       __le32 val;
+       __le32 flash_fw_ver;
+       u32 file_fw_ver, min_ver, bios;
+       struct pci_dev *pdev = adapter->pdev;
+       const struct firmware *fw = adapter->fw;
+       u8 fw_type = adapter->fw_type;
+       u32 crbinit_fix_fw;
+
+       if (fw_type == NX_UNIFIED_ROMIMAGE) {
+               if (netxen_nic_validate_unified_romimage(adapter))
+                       return -EINVAL;
+       } else {
+               val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+               if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+                       return -EINVAL;
+
+               if (fw->size < NX_FW_MIN_SIZE)
+                       return -EINVAL;
+       }
+
+       val = nx_get_fw_version(adapter);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               min_ver = NETXEN_MIN_P3_FW_SUPP;
+       else
+               min_ver = NETXEN_VERSION_CODE(3, 4, 216);
+
+       file_fw_ver = NETXEN_DECODE_VERSION(val);
+
+       if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
+           (file_fw_ver < min_ver)) {
+               dev_err(&pdev->dev,
+                               "%s: firmware version %d.%d.%d unsupported\n",
+               fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
+                _build(file_fw_ver));
+               return -EINVAL;
+       }
+       val = nx_get_bios_version(adapter);
+       netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+       if ((__force u32)val != bios) {
+               dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+                               fw_name[fw_type]);
+               return -EINVAL;
+       }
+
+       if (netxen_rom_fast_read(adapter,
+                       NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+               dev_err(&pdev->dev, "Unable to read flash fw version\n");
+               return -EIO;
+       }
+       flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+
+       /* New fw from file is not allowed, if fw on flash is < 4.0.554 */
+       crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
+       if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
+           NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               dev_err(&pdev->dev, "Incompatibility detected between driver "
+                       "and firmware version on flash. This configuration "
+                       "is not recommended. Please update the firmware on "
+                       "flash immediately\n");
+               return -EINVAL;
+       }
+
+       /* check if flashed firmware is newer only for no-mn and P2 case*/
+       if (!netxen_p3_has_mn(adapter) ||
+           NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               if (flash_fw_ver > file_fw_ver) {
+                       dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+                               fw_name[fw_type]);
+                       return -EINVAL;
+               }
+       }
+
+       NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+       return 0;
+}
+
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+       u8 fw_type;
+
+       switch (adapter->fw_type) {
+       case NX_UNKNOWN_ROMIMAGE:
+               fw_type = NX_UNIFIED_ROMIMAGE;
+               break;
+
+       case NX_UNIFIED_ROMIMAGE:
+               if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+                       fw_type = NX_FLASH_ROMIMAGE;
+               else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+                       fw_type = NX_P2_MN_ROMIMAGE;
+               else if (netxen_p3_has_mn(adapter))
+                       fw_type = NX_P3_MN_ROMIMAGE;
+               else
+                       fw_type = NX_P3_CT_ROMIMAGE;
+               break;
+
+       case NX_P3_MN_ROMIMAGE:
+               fw_type = NX_P3_CT_ROMIMAGE;
+               break;
+
+       case NX_P2_MN_ROMIMAGE:
+       case NX_P3_CT_ROMIMAGE:
+       default:
+               fw_type = NX_FLASH_ROMIMAGE;
+               break;
+       }
+
+       adapter->fw_type = fw_type;
+}
+
+static int
+netxen_p3_has_mn(struct netxen_adapter *adapter)
+{
+       u32 capability, flashed_ver;
+       capability = 0;
+
+       /* NX2031 always had MN */
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 1;
+
+       netxen_rom_fast_read(adapter,
+                       NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+       flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+       if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
+
+               capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
+               if (capability & NX_PEG_TUNE_MN_PRESENT)
+                       return 1;
+       }
+       return 0;
+}
+
+void netxen_request_firmware(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int rc = 0;
+
+       adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
+
+next:
+       nx_get_next_fwtype(adapter);
+
+       if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
+               adapter->fw = NULL;
+       } else {
+               rc = request_firmware(&adapter->fw,
+                               fw_name[adapter->fw_type], &pdev->dev);
+               if (rc != 0)
+                       goto next;
+
+               rc = netxen_validate_firmware(adapter);
+               if (rc != 0) {
+                       release_firmware(adapter->fw);
+                       msleep(1);
+                       goto next;
+               }
+       }
+}
+
+
+void
+netxen_release_firmware(struct netxen_adapter *adapter)
+{
+       release_firmware(adapter->fw);
+       adapter->fw = NULL;
+}
+
+int netxen_init_dummy_dma(struct netxen_adapter *adapter)
+{
+       u64 addr;
+       u32 hi, lo;
+
+       if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 0;
+
+       adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
+                                NETXEN_HOST_DUMMY_DMA_SIZE,
+                                &adapter->dummy_dma.phys_addr);
+       if (adapter->dummy_dma.addr == NULL) {
+               dev_err(&adapter->pdev->dev,
+                       "ERROR: Could not allocate dummy DMA memory\n");
+               return -ENOMEM;
+       }
+
+       addr = (uint64_t) adapter->dummy_dma.phys_addr;
+       hi = (addr >> 32) & 0xffffffff;
+       lo = addr & 0xffffffff;
+
+       NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+       NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+
+       return 0;
+}
+
+/*
+ * NetXen DMA watchdog control:
+ *
+ *     Bit 0           : enabled => R/O: 1 watchdog active, 0 inactive
+ *     Bit 1           : disable_request => 1 req disable dma watchdog
+ *     Bit 2           : enable_request =>  1 req enable dma watchdog
+ *     Bit 3-31        : unused
+ */
+void netxen_free_dummy_dma(struct netxen_adapter *adapter)
+{
+       int i = 100;
+       u32 ctrl;
+
+       if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return;
+
+       if (!adapter->dummy_dma.addr)
+               return;
+
+       ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+       if ((ctrl & 0x1) != 0) {
+               NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
+
+               while ((ctrl & 0x1) != 0) {
+
+                       msleep(50);
+
+                       ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+
+                       if (--i == 0)
+                               break;
+               }
+       }
+
+       if (i) {
+               pci_free_consistent(adapter->pdev,
+                           NETXEN_HOST_DUMMY_DMA_SIZE,
+                           adapter->dummy_dma.addr,
+                           adapter->dummy_dma.phys_addr);
+               adapter->dummy_dma.addr = NULL;
+       } else
+               dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
+}
+
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+       u32 val = 0;
+       int retries = 60;
+
+       if (pegtune_val)
+               return 0;
+
+       do {
+               val = NXRD32(adapter, CRB_CMDPEG_STATE);
+               switch (val) {
+               case PHAN_INITIALIZE_COMPLETE:
+               case PHAN_INITIALIZE_ACK:
+                       return 0;
+               case PHAN_INITIALIZE_FAILED:
+                       goto out_err;
+               default:
+                       break;
+               }
+
+               msleep(500);
+
+       } while (--retries);
+
+       NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+       dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+       return -EIO;
+}
+
+static int
+netxen_receive_peg_ready(struct netxen_adapter *adapter)
+{
+       u32 val = 0;
+       int retries = 2000;
+
+       do {
+               val = NXRD32(adapter, CRB_RCVPEG_STATE);
+
+               if (val == PHAN_PEG_RCV_INITIALIZED)
+                       return 0;
+
+               msleep(10);
+
+       } while (--retries);
+
+       if (!retries) {
+               printk(KERN_ERR "Receive Peg initialization not "
+                             "complete, state: 0x%x.\n", val);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+       int err;
+
+       err = netxen_receive_peg_ready(adapter);
+       if (err)
+               return err;
+
+       NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+       NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+       NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
+       return err;
+}
+
+static void
+netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
+{
+       u32 cable_OUI;
+       u16 cable_len;
+       u16 link_speed;
+       u8  link_status, module, duplex, autoneg;
+       struct net_device *netdev = adapter->netdev;
+
+       adapter->has_link_events = 1;
+
+       cable_OUI = msg->body[1] & 0xffffffff;
+       cable_len = (msg->body[1] >> 32) & 0xffff;
+       link_speed = (msg->body[1] >> 48) & 0xffff;
+
+       link_status = msg->body[2] & 0xff;
+       duplex = (msg->body[2] >> 16) & 0xff;
+       autoneg = (msg->body[2] >> 24) & 0xff;
+
+       module = (msg->body[2] >> 8) & 0xff;
+       if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+               printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+                               netdev->name, cable_OUI, cable_len);
+       } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+               printk(KERN_INFO "%s: unsupported cable length %d\n",
+                               netdev->name, cable_len);
+       }
+
+       /* update link parameters */
+       if (duplex == LINKEVENT_FULL_DUPLEX)
+               adapter->link_duplex = DUPLEX_FULL;
+       else
+               adapter->link_duplex = DUPLEX_HALF;
+       adapter->module_type = module;
+       adapter->link_autoneg = autoneg;
+       adapter->link_speed = link_speed;
+
+       netxen_advert_link_change(adapter, link_status);
+}
+
+static void
+netxen_handle_fw_message(int desc_cnt, int index,
+               struct nx_host_sds_ring *sds_ring)
+{
+       nx_fw_msg_t msg;
+       struct status_desc *desc;
+       int i = 0, opcode;
+
+       while (desc_cnt > 0 && i < 8) {
+               desc = &sds_ring->desc_head[index];
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+               index = get_next_index(index, sds_ring->num_desc);
+               desc_cnt--;
+       }
+
+       opcode = netxen_get_nic_msg_opcode(msg.body[0]);
+       switch (opcode) {
+       case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+               netxen_handle_linkevent(sds_ring->adapter, &msg);
+               break;
+       default:
+               break;
+       }
+}
+
+static int
+netxen_alloc_rx_skb(struct netxen_adapter *adapter,
+               struct nx_host_rds_ring *rds_ring,
+               struct netxen_rx_buffer *buffer)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma;
+       struct pci_dev *pdev = adapter->pdev;
+
+       buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+       if (!buffer->skb)
+               return 1;
+
+       skb = buffer->skb;
+
+       if (!adapter->ahw.cut_through)
+               skb_reserve(skb, 2);
+
+       dma = pci_map_single(pdev, skb->data,
+                       rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+       if (pci_dma_mapping_error(pdev, dma)) {
+               dev_kfree_skb_any(skb);
+               buffer->skb = NULL;
+               return 1;
+       }
+
+       buffer->skb = skb;
+       buffer->dma = dma;
+       buffer->state = NETXEN_BUFFER_BUSY;
+
+       return 0;
+}
+
+static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
+               struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+       struct netxen_rx_buffer *buffer;
+       struct sk_buff *skb;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+                       PCI_DMA_FROMDEVICE);
+
+       skb = buffer->skb;
+       if (!skb)
+               goto no_skb;
+
+       if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
+           && cksum == STATUS_CKSUM_OK)) {
+               adapter->stats.csummed++;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else
+               skb->ip_summed = CHECKSUM_NONE;
+
+       buffer->skb = NULL;
+no_skb:
+       buffer->state = NETXEN_BUFFER_FREE;
+       return skb;
+}
+
+static struct netxen_rx_buffer *
+netxen_process_rcv(struct netxen_adapter *adapter,
+               struct nx_host_sds_ring *sds_ring,
+               int ring, u64 sts_data0)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct netxen_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct nx_host_rds_ring *rds_ring;
+       int index, length, cksum, pkt_offset;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = netxen_get_sts_refhandle(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       length = netxen_get_sts_totallength(sts_data0);
+       cksum  = netxen_get_sts_status(sts_data0);
+       pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
+
+       skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return buffer;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+
+       if (pkt_offset)
+               skb_pull(skb, pkt_offset);
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       napi_gro_receive(&sds_ring->napi, skb);
+
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return buffer;
+}
+
+#define TCP_HDR_SIZE            20
+#define TCP_TS_OPTION_SIZE      12
+#define TCP_TS_HDR_SIZE         (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
+
+static struct netxen_rx_buffer *
+netxen_process_lro(struct netxen_adapter *adapter,
+               struct nx_host_sds_ring *sds_ring,
+               int ring, u64 sts_data0, u64 sts_data1)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+       struct netxen_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct nx_host_rds_ring *rds_ring;
+       struct iphdr *iph;
+       struct tcphdr *th;
+       bool push, timestamp;
+       int l2_hdr_offset, l4_hdr_offset;
+       int index;
+       u16 lro_length, length, data_offset;
+       u32 seq_number;
+       u8 vhdr_len = 0;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = netxen_get_lro_sts_refhandle(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       timestamp = netxen_get_lro_sts_timestamp(sts_data0);
+       lro_length = netxen_get_lro_sts_length(sts_data0);
+       l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
+       l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
+       push = netxen_get_lro_sts_push_flag(sts_data0);
+       seq_number = netxen_get_lro_sts_seq_number(sts_data1);
+
+       skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+       if (!skb)
+               return buffer;
+
+       if (timestamp)
+               data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
+       else
+               data_offset = l4_hdr_offset + TCP_HDR_SIZE;
+
+       skb_put(skb, lro_length + data_offset);
+
+       skb_pull(skb, l2_hdr_offset);
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               vhdr_len = VLAN_HLEN;
+       iph = (struct iphdr *)(skb->data + vhdr_len);
+       th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
+
+       length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+       csum_replace2(&iph->check, iph->tot_len, htons(length));
+       iph->tot_len = htons(length);
+       th->psh = push;
+       th->seq = htonl(seq_number);
+
+       length = skb->len;
+
+       if (adapter->flags & NETXEN_FW_MSS_CAP)
+               skb_shinfo(skb)->gso_size  =  netxen_get_lro_sts_mss(sts_data1);
+
+       netif_receive_skb(skb);
+
+       adapter->stats.lro_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return buffer;
+}
+
+#define netxen_merge_rx_buffers(list, head) \
+       do { list_splice_tail_init(list, head); } while (0);
+
+int
+netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
+{
+       struct netxen_adapter *adapter = sds_ring->adapter;
+
+       struct list_head *cur;
+
+       struct status_desc *desc;
+       struct netxen_rx_buffer *rxbuf;
+
+       u32 consumer = sds_ring->consumer;
+
+       int count = 0;
+       u64 sts_data0, sts_data1;
+       int opcode, ring = 0, desc_cnt;
+
+       while (count < max) {
+               desc = &sds_ring->desc_head[consumer];
+               sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+               if (!(sts_data0 & STATUS_OWNER_HOST))
+                       break;
+
+               desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
+
+               opcode = netxen_get_sts_opcode(sts_data0);
+
+               switch (opcode) {
+               case NETXEN_NIC_RXPKT_DESC:
+               case NETXEN_OLD_RXPKT_DESC:
+               case NETXEN_NIC_SYN_OFFLOAD:
+                       ring = netxen_get_sts_type(sts_data0);
+                       rxbuf = netxen_process_rcv(adapter, sds_ring,
+                                       ring, sts_data0);
+                       break;
+               case NETXEN_NIC_LRO_DESC:
+                       ring = netxen_get_lro_sts_type(sts_data0);
+                       sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+                       rxbuf = netxen_process_lro(adapter, sds_ring,
+                                       ring, sts_data0, sts_data1);
+                       break;
+               case NETXEN_NIC_RESPONSE_DESC:
+                       netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+               default:
+                       goto skip;
+               }
+
+               WARN_ON(desc_cnt > 1);
+
+               if (rxbuf)
+                       list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+               for (; desc_cnt > 0; desc_cnt--) {
+                       desc = &sds_ring->desc_head[consumer];
+                       desc->status_desc_data[0] =
+                               cpu_to_le64(STATUS_OWNER_PHANTOM);
+                       consumer = get_next_index(consumer, sds_ring->num_desc);
+               }
+               count++;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               struct nx_host_rds_ring *rds_ring =
+                       &adapter->recv_ctx.rds_rings[ring];
+
+               if (!list_empty(&sds_ring->free_list[ring])) {
+                       list_for_each(cur, &sds_ring->free_list[ring]) {
+                               rxbuf = list_entry(cur,
+                                               struct netxen_rx_buffer, list);
+                               netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
+                       }
+                       spin_lock(&rds_ring->lock);
+                       netxen_merge_rx_buffers(&sds_ring->free_list[ring],
+                                               &rds_ring->free_list);
+                       spin_unlock(&rds_ring->lock);
+               }
+
+               netxen_post_rx_buffers_nodb(adapter, rds_ring);
+       }
+
+       if (count) {
+               sds_ring->consumer = consumer;
+               NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
+       }
+
+       return count;
+}
+
+/* Process Command status ring */
+int netxen_process_cmd_ring(struct netxen_adapter *adapter)
+{
+       u32 sw_consumer, hw_consumer;
+       int count = 0, i;
+       struct netxen_cmd_buffer *buffer;
+       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = adapter->netdev;
+       struct netxen_skb_frag *frag;
+       int done = 0;
+       struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+       if (!spin_trylock_bh(&adapter->tx_clean_lock))
+               return 1;
+
+       sw_consumer = tx_ring->sw_consumer;
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+       while (sw_consumer != hw_consumer) {
+               buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+               if (buffer->skb) {
+                       frag = &buffer->frag_array[0];
+                       pci_unmap_single(pdev, frag->dma, frag->length,
+                                        PCI_DMA_TODEVICE);
+                       frag->dma = 0ULL;
+                       for (i = 1; i < buffer->frag_count; i++) {
+                               frag++; /* Get the next frag */
+                               pci_unmap_page(pdev, frag->dma, frag->length,
+                                              PCI_DMA_TODEVICE);
+                               frag->dma = 0ULL;
+                       }
+
+                       adapter->stats.xmitfinished++;
+                       dev_kfree_skb_any(buffer->skb);
+                       buffer->skb = NULL;
+               }
+
+               sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+               if (++count >= MAX_STATUS_HANDLE)
+                       break;
+       }
+
+       tx_ring->sw_consumer = sw_consumer;
+
+       if (count && netif_running(netdev)) {
+               smp_mb();
+
+               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+                       if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+                               netif_wake_queue(netdev);
+               adapter->tx_timeo_cnt = 0;
+       }
+       /*
+        * If everything is freed up to consumer then check if the ring is full
+        * If the ring is full then check if more needs to be freed and
+        * schedule the call back again.
+        *
+        * This happens when there are 2 CPUs. One could be freeing and the
+        * other filling it. If the ring is full when we get out of here and
+        * the card has already interrupted the host then the host can miss the
+        * interrupt.
+        *
+        * There is still a possible race condition and the host could miss an
+        * interrupt. The card has to take care of this.
+        */
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+       done = (sw_consumer == hw_consumer);
+       spin_unlock_bh(&adapter->tx_clean_lock);
+
+       return done;
+}
+
+void
+netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+       struct nx_host_rds_ring *rds_ring)
+{
+       struct rcv_desc *pdesc;
+       struct netxen_rx_buffer *buffer;
+       int producer, count = 0;
+       netxen_ctx_msg msg = 0;
+       struct list_head *head;
+
+       producer = rds_ring->producer;
+
+       head = &rds_ring->free_list;
+       while (!list_empty(head)) {
+
+               buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+
+       if (count) {
+               rds_ring->producer = producer;
+               NXWRIO(adapter, rds_ring->crb_rcv_producer,
+                               (producer-1) & (rds_ring->num_desc-1));
+
+               if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+                       /*
+                        * Write a doorbell msg to tell phanmon of change in
+                        * receive ring producer
+                        * Only for firmware version < 4.0.0
+                        */
+                       netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+                       netxen_set_msg_privid(msg);
+                       netxen_set_msg_count(msg,
+                                            ((producer - 1) &
+                                             (rds_ring->num_desc - 1)));
+                       netxen_set_msg_ctxid(msg, adapter->portnum);
+                       netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+                       NXWRIO(adapter, DB_NORMALIZE(adapter,
+                                       NETXEN_RCV_PRODUCER_OFFSET), msg);
+               }
+       }
+}
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+               struct nx_host_rds_ring *rds_ring)
+{
+       struct rcv_desc *pdesc;
+       struct netxen_rx_buffer *buffer;
+       int producer, count = 0;
+       struct list_head *head;
+
+       if (!spin_trylock(&rds_ring->lock))
+               return;
+
+       producer = rds_ring->producer;
+
+       head = &rds_ring->free_list;
+       while (!list_empty(head)) {
+
+               buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+
+       if (count) {
+               rds_ring->producer = producer;
+               NXWRIO(adapter, rds_ring->crb_rcv_producer,
+                               (producer - 1) & (rds_ring->num_desc - 1));
+       }
+       spin_unlock(&rds_ring->lock);
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+       memset(&adapter->stats, 0, sizeof(adapter->stats));
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
new file mode 100644 (file)
index 0000000..7a0281a
--- /dev/null
@@ -0,0 +1,3527 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
+
+char netxen_nic_driver_name[] = "netxen_nic";
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
+    NETXEN_NIC_LINUX_VERSIONID;
+
+static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+
+static int use_msi_x = 1;
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
+
+static int netxen_nic_probe(struct pci_dev *pdev,
+               const struct pci_device_id *ent);
+static void netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
+                                              struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
+static void netxen_fw_poll_work(struct work_struct *work);
+static void netxen_schedule_work(struct netxen_adapter *adapter,
+               work_func_t func, int delay);
+static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
+static int netxen_nic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev);
+#endif
+
+static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_create_diag_entries(struct netxen_adapter *adapter);
+static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
+static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
+static int netxen_can_start_firmware(struct netxen_adapter *adapter);
+
+static irqreturn_t netxen_intr(int irq, void *data);
+static irqreturn_t netxen_msi_intr(int irq, void *data);
+static irqreturn_t netxen_msix_intr(int irq, void *data);
+
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
+static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+                                                     struct rtnl_link_stats64 *stats);
+static int netxen_nic_set_mac(struct net_device *netdev, void *p);
+
+/*  PCI Device ID Table  */
+#define ENTRY(device) \
+       {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
+       .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id netxen_pci_tbl[] = {
+       ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
+       ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
+       ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
+       ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
+       ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
+       ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
+       ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
+       ENTRY(PCI_DEVICE_ID_NX3031),
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+static uint32_t crb_cmd_producer[4] = {
+       CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
+       CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
+};
+
+void
+netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+               struct nx_host_tx_ring *tx_ring)
+{
+       NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
+}
+
+static uint32_t crb_cmd_consumer[4] = {
+       CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
+       CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
+};
+
+static inline void
+netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
+               struct nx_host_tx_ring *tx_ring)
+{
+       NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
+}
+
+static uint32_t msi_tgt_status[8] = {
+       ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+       ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+       ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+       ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
+
+static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
+{
+       struct netxen_adapter *adapter = sds_ring->adapter;
+
+       NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
+}
+
+static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
+{
+       struct netxen_adapter *adapter = sds_ring->adapter;
+
+       NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
+
+       if (!NETXEN_IS_MSI_FAMILY(adapter))
+               NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
+}
+
+static int
+netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
+{
+       int size = sizeof(struct nx_host_sds_ring) * count;
+
+       recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+       return recv_ctx->sds_rings == NULL;
+}
+
+static void
+netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
+{
+       kfree(recv_ctx->sds_rings);
+       recv_ctx->sds_rings = NULL;
+}
+
+static int
+netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+               return -ENOMEM;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netif_napi_add(netdev, &sds_ring->napi,
+                               netxen_nic_poll, NAPI_POLL_WEIGHT);
+       }
+
+       return 0;
+}
+
+static void
+netxen_napi_del(struct netxen_adapter *adapter)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netif_napi_del(&sds_ring->napi);
+       }
+
+       netxen_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+netxen_napi_enable(struct netxen_adapter *adapter)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               napi_enable(&sds_ring->napi);
+               netxen_nic_enable_int(sds_ring);
+       }
+}
+
+static void
+netxen_napi_disable(struct netxen_adapter *adapter)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netxen_nic_disable_int(sds_ring);
+               napi_synchronize(&sds_ring->napi);
+               napi_disable(&sds_ring->napi);
+       }
+}
+
+static int nx_set_dma_mask(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       uint64_t mask, cmask;
+
+       adapter->pci_using_dac = 0;
+
+       mask = DMA_BIT_MASK(32);
+       cmask = DMA_BIT_MASK(32);
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+#ifndef CONFIG_IA64
+               mask = DMA_BIT_MASK(35);
+#endif
+       } else {
+               mask = DMA_BIT_MASK(39);
+               cmask = mask;
+       }
+
+       if (pci_set_dma_mask(pdev, mask) == 0 &&
+               pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+               adapter->pci_using_dac = 1;
+               return 0;
+       }
+
+       return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+nx_update_dma_mask(struct netxen_adapter *adapter)
+{
+       int change, shift, err;
+       uint64_t mask, old_mask, old_cmask;
+       struct pci_dev *pdev = adapter->pdev;
+
+       change = 0;
+
+       shift = NXRD32(adapter, CRB_DMA_SHIFT);
+       if (shift > 32)
+               return 0;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
+               change = 1;
+       else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
+               change = 1;
+
+       if (change) {
+               old_mask = pdev->dma_mask;
+               old_cmask = pdev->dev.coherent_dma_mask;
+
+               mask = DMA_BIT_MASK(32+shift);
+
+               err = pci_set_dma_mask(pdev, mask);
+               if (err)
+                       goto err_out;
+
+               if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+                       err = pci_set_consistent_dma_mask(pdev, mask);
+                       if (err)
+                               goto err_out;
+               }
+               dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+       }
+
+       return 0;
+
+err_out:
+       pci_set_dma_mask(pdev, old_mask);
+       pci_set_consistent_dma_mask(pdev, old_cmask);
+       return err;
+}
+
+static int
+netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
+{
+       u32 val, timeout;
+
+       if (first_boot == 0x55555555) {
+               /* This is the first boot after power up */
+               NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+
+               if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+                       return 0;
+
+               /* PCI bus master workaround */
+               first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+               if (!(first_boot & 0x4)) {
+                       first_boot |= 0x4;
+                       NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
+                       NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+               }
+
+               /* This is the first boot after power up */
+               first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+               if (first_boot != 0x80000f) {
+                       /* clear the register for future unloads/loads */
+                       NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
+                       return -EIO;
+               }
+
+               /* Start P2 boot loader */
+               val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+               NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+               timeout = 0;
+               do {
+                       msleep(1);
+                       val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+                       if (++timeout > 5000)
+                               return -EIO;
+
+               } while (val == NETXEN_BDINFO_MAGIC);
+       }
+       return 0;
+}
+
+static void netxen_set_port_mode(struct netxen_adapter *adapter)
+{
+       u32 val, data;
+
+       val = adapter->ahw.board_type;
+       if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
+               (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
+               if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+                       data = NETXEN_PORT_MODE_802_3_AP;
+                       NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+               } else if (port_mode == NETXEN_PORT_MODE_XG) {
+                       data = NETXEN_PORT_MODE_XG;
+                       NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+               } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
+                       data = NETXEN_PORT_MODE_AUTO_NEG_1G;
+                       NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+               } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
+                       data = NETXEN_PORT_MODE_AUTO_NEG_XG;
+                       NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+               } else {
+                       data = NETXEN_PORT_MODE_AUTO_NEG;
+                       NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+               }
+
+               if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
+                       (wol_port_mode != NETXEN_PORT_MODE_XG) &&
+                       (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
+                       (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
+                       wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+               }
+               NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
+       }
+}
+
+#define PCI_CAP_ID_GEN  0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+       u32 pdevfuncsave;
+       u32 c8c9value = 0;
+       u32 chicken = 0;
+       u32 control = 0;
+       int i, pos;
+       struct pci_dev *pdev;
+
+       pdev = adapter->pdev;
+
+       chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
+       /* clear chicken3.25:24 */
+       chicken &= 0xFCFFFFFF;
+       /*
+        * if gen1 and B0, set F1020 - if gen 2, do nothing
+        * if gen2 set to F1000
+        */
+       pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+       if (pos == 0xC0) {
+               pci_read_config_dword(pdev, pos + 0x10, &control);
+               if ((control & 0x000F0000) != 0x00020000) {
+                       /*  set chicken3.24 if gen1 */
+                       chicken |= 0x01000000;
+               }
+               dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
+               c8c9value = 0xF1000;
+       } else {
+               /* set chicken3.24 if gen1 */
+               chicken |= 0x01000000;
+               dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
+               if (adapter->ahw.revision_id == NX_P3_B0)
+                       c8c9value = 0xF1020;
+               else
+                       c8c9value = 0;
+       }
+
+       NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
+
+       if (!c8c9value)
+               return;
+
+       pdevfuncsave = pdev->devfn;
+       if (pdevfuncsave & 0x07)
+               return;
+
+       for (i = 0; i < 8; i++) {
+               pci_read_config_dword(pdev, pos + 8, &control);
+               pci_read_config_dword(pdev, pos + 8, &control);
+               pci_write_config_dword(pdev, pos + 8, c8c9value);
+               pdev->devfn++;
+       }
+       pdev->devfn = pdevfuncsave;
+}
+
+static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+       u32 control;
+
+       if (pdev->msix_cap) {
+               pci_read_config_dword(pdev, pdev->msix_cap, &control);
+               if (enable)
+                       control |= PCI_MSIX_FLAGS_ENABLE;
+               else
+                       control = 0;
+               pci_write_config_dword(pdev, pdev->msix_cap, control);
+       }
+}
+
+static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               adapter->msix_entries[i].entry = i;
+}
+
+static int
+netxen_read_mac_addr(struct netxen_adapter *adapter)
+{
+       int i;
+       unsigned char *p;
+       u64 mac_addr;
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
+                       return -EIO;
+       } else {
+               if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
+                       return -EIO;
+       }
+
+       p = (unsigned char *)&mac_addr;
+       for (i = 0; i < 6; i++)
+               netdev->dev_addr[i] = *(p + 5 - i);
+
+       memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+       /* set station address */
+
+       if (!is_valid_ether_addr(netdev->dev_addr))
+               dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
+
+       return 0;
+}
+
+static int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (netif_running(netdev)) {
+               netif_device_detach(netdev);
+               netxen_napi_disable(adapter);
+       }
+
+       memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       adapter->macaddr_set(adapter, addr->sa_data);
+
+       if (netif_running(netdev)) {
+               netif_device_attach(netdev);
+               netxen_napi_enable(adapter);
+       }
+       return 0;
+}
+
+static void netxen_set_multicast_list(struct net_device *dev)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+
+       adapter->set_multi(dev);
+}
+
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+       netdev_features_t features)
+{
+       if (!(features & NETIF_F_RXCSUM)) {
+               netdev_info(dev, "disabling LRO as RXCSUM is off\n");
+
+               features &= ~NETIF_F_LRO;
+       }
+
+       return features;
+}
+
+static int netxen_set_features(struct net_device *dev,
+       netdev_features_t features)
+{
+       struct netxen_adapter *adapter = netdev_priv(dev);
+       int hw_lro;
+
+       if (!((dev->features ^ features) & NETIF_F_LRO))
+               return 0;
+
+       hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
+                : NETXEN_NIC_LRO_DISABLED;
+
+       if (netxen_config_hw_lro(adapter, hw_lro))
+               return -EIO;
+
+       if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
+               return -EIO;
+
+       return 0;
+}
+
+static const struct net_device_ops netxen_netdev_ops = {
+       .ndo_open          = netxen_nic_open,
+       .ndo_stop          = netxen_nic_close,
+       .ndo_start_xmit    = netxen_nic_xmit_frame,
+       .ndo_get_stats64   = netxen_nic_get_stats,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_set_rx_mode   = netxen_set_multicast_list,
+       .ndo_set_mac_address    = netxen_nic_set_mac,
+       .ndo_change_mtu    = netxen_nic_change_mtu,
+       .ndo_tx_timeout    = netxen_tx_timeout,
+       .ndo_fix_features = netxen_fix_features,
+       .ndo_set_features = netxen_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = netxen_nic_poll_controller,
+#endif
+};
+
+static inline bool netxen_function_zero(struct pci_dev *pdev)
+{
+       return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
+}
+
+static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+                                            u32 mode)
+{
+       NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
+}
+
+static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
+{
+       return NXRD32(adapter, NETXEN_INTR_MODE_REG);
+}
+
+static void
+netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
+{
+       struct netxen_legacy_intr_set *legacy_intrp;
+       u32 tgt_status_reg, int_state_reg;
+
+       if (adapter->ahw.revision_id >= NX_P3_B0)
+               legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+       else
+               legacy_intrp = &legacy_intr[0];
+
+       tgt_status_reg = legacy_intrp->tgt_status_reg;
+       int_state_reg = ISR_INT_STATE_REG;
+
+       adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+       adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
+       adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
+                                                 legacy_intrp->tgt_mask_reg);
+       adapter->pci_int_reg = netxen_get_ioaddr(adapter,
+                                                legacy_intrp->pci_int_reg);
+       adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+       if (adapter->ahw.revision_id >= NX_P3_B1)
+               adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+                                                              int_state_reg);
+       else
+               adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+                                                              CRB_INT_VECTOR);
+}
+
+static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
+                                      int num_msix)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       u32 value;
+       int err;
+
+       if (adapter->msix_supported) {
+               netxen_init_msix_entries(adapter, num_msix);
+               err = pci_enable_msix_range(pdev, adapter->msix_entries,
+                                           num_msix, num_msix);
+               if (err > 0) {
+                       adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+                       netxen_set_msix_bit(pdev, 1);
+
+                       if (adapter->rss_supported)
+                               adapter->max_sds_rings = num_msix;
+
+                       dev_info(&pdev->dev, "using msi-x interrupts\n");
+                       return 0;
+               }
+               /* fall through for msi */
+       }
+
+       if (use_msi && !pci_enable_msi(pdev)) {
+               value = msi_tgt_status[adapter->ahw.pci_func];
+               adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+               adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
+               adapter->msix_entries[0].vector = pdev->irq;
+               dev_info(&pdev->dev, "using msi interrupts\n");
+               return 0;
+       }
+
+       dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
+       return -EIO;
+}
+
+static int netxen_setup_intr(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int num_msix;
+
+       if (adapter->rss_supported)
+               num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+                           MSIX_ENTRIES_PER_ADAPTER : 2;
+       else
+               num_msix = 1;
+
+       adapter->max_sds_rings = 1;
+       adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+       netxen_initialize_interrupt_registers(adapter);
+       netxen_set_msix_bit(pdev, 0);
+
+       if (netxen_function_zero(pdev)) {
+               if (!netxen_setup_msi_interrupts(adapter, num_msix))
+                       netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+               else
+                       netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
+       } else {
+               if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
+                   netxen_setup_msi_interrupts(adapter, num_msix)) {
+                       dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
+                       return -EIO;
+               }
+       }
+
+       if (!NETXEN_IS_MSI_FAMILY(adapter)) {
+               adapter->msix_entries[0].vector = pdev->irq;
+               dev_info(&pdev->dev, "using legacy interrupts\n");
+       }
+       return 0;
+}
+
+static void
+netxen_teardown_intr(struct netxen_adapter *adapter)
+{
+       if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+               pci_disable_msix(adapter->pdev);
+       if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+               pci_disable_msi(adapter->pdev);
+}
+
+static void
+netxen_cleanup_pci_map(struct netxen_adapter *adapter)
+{
+       if (adapter->ahw.db_base != NULL)
+               iounmap(adapter->ahw.db_base);
+       if (adapter->ahw.pci_base0 != NULL)
+               iounmap(adapter->ahw.pci_base0);
+       if (adapter->ahw.pci_base1 != NULL)
+               iounmap(adapter->ahw.pci_base1);
+       if (adapter->ahw.pci_base2 != NULL)
+               iounmap(adapter->ahw.pci_base2);
+}
+
+static int
+netxen_setup_pci_map(struct netxen_adapter *adapter)
+{
+       void __iomem *db_ptr = NULL;
+
+       resource_size_t mem_base, db_base;
+       unsigned long mem_len, db_len = 0;
+
+       struct pci_dev *pdev = adapter->pdev;
+       int pci_func = adapter->ahw.pci_func;
+       struct netxen_hardware_context *ahw = &adapter->ahw;
+
+       int err = 0;
+
+       /*
+        * Set the CRB window to invalid. If any register in window 0 is
+        * accessed it should set the window to 0 and then reset it to 1.
+        */
+       adapter->ahw.crb_win = -1;
+       adapter->ahw.ocm_win = -1;
+
+       /* remap phys address */
+       mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+       mem_len = pci_resource_len(pdev, 0);
+
+       /* 128 Meg of memory */
+       if (mem_len == NETXEN_PCI_128MB_SIZE) {
+
+               ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+               ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+                               SECOND_PAGE_GROUP_SIZE);
+               ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+                               THIRD_PAGE_GROUP_SIZE);
+               if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+                                               ahw->pci_base2 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       err = -EIO;
+                       goto err_out;
+               }
+
+               ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
+       } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
+
+               ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+               ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+                       SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+               if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       err = -EIO;
+                       goto err_out;
+               }
+
+       } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
+
+               ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+               if (ahw->pci_base0 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       return -EIO;
+               }
+               ahw->pci_len0 = mem_len;
+       } else {
+               return -EIO;
+       }
+
+       netxen_setup_hwops(adapter);
+
+       dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+       if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+               adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+                       NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+       } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+                       NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
+       }
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               goto skip_doorbell;
+
+       db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
+       db_len = pci_resource_len(pdev, 4);
+
+       if (db_len == 0) {
+               printk(KERN_ERR "%s: doorbell is disabled\n",
+                               netxen_nic_driver_name);
+               err = -EIO;
+               goto err_out;
+       }
+
+       db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+       if (!db_ptr) {
+               printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+                               netxen_nic_driver_name);
+               err = -EIO;
+               goto err_out;
+       }
+
+skip_doorbell:
+       adapter->ahw.db_base = db_ptr;
+       adapter->ahw.db_len = db_len;
+       return 0;
+
+err_out:
+       netxen_cleanup_pci_map(adapter);
+       return err;
+}
+
+static void
+netxen_check_options(struct netxen_adapter *adapter)
+{
+       u32 fw_major, fw_minor, fw_build, prev_fw_version;
+       char brd_name[NETXEN_MAX_SHORT_NAME];
+       char serial_num[32];
+       int i, offset, val, err;
+       __le32 *ptr32;
+       struct pci_dev *pdev = adapter->pdev;
+
+       adapter->driver_mismatch = 0;
+
+       ptr32 = (__le32 *)&serial_num;
+       offset = NX_FW_SERIAL_NUM_OFFSET;
+       for (i = 0; i < 8; i++) {
+               err = netxen_rom_fast_read(adapter, offset, &val);
+               if (err) {
+                       dev_err(&pdev->dev, "error reading board info\n");
+                       adapter->driver_mismatch = 1;
+                       return;
+               }
+               ptr32[i] = cpu_to_le32(val);
+               offset += sizeof(u32);
+       }
+
+       fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+       fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+       fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+       prev_fw_version = adapter->fw_version;
+       adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+       /* Get FW Mini Coredump template and store it */
+        if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               if (adapter->mdump.md_template == NULL ||
+                               adapter->fw_version > prev_fw_version) {
+                       kfree(adapter->mdump.md_template);
+                       adapter->mdump.md_template = NULL;
+                       err = netxen_setup_minidump(adapter);
+                       if (err)
+                               dev_err(&adapter->pdev->dev,
+                               "Failed to setup minidump rcode = %d\n", err);
+               }
+       }
+
+       if (adapter->portnum == 0) {
+               if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
+                                                   brd_name))
+                       strcpy(serial_num, "Unknown");
+
+               pr_info("%s: %s Board S/N %s  Chip rev 0x%x\n",
+                               module_name(THIS_MODULE),
+                               brd_name, serial_num, adapter->ahw.revision_id);
+       }
+
+       if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
+               adapter->driver_mismatch = 1;
+               dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+                               fw_major, fw_minor, fw_build);
+               return;
+       }
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               i = NXRD32(adapter, NETXEN_SRE_MISC);
+               adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+       }
+
+       dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
+                NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
+                adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+       if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
+               adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+       if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+       } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+       }
+
+       adapter->msix_supported = 0;
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               adapter->msix_supported = !!use_msi_x;
+               adapter->rss_supported = !!use_msi_x;
+       } else {
+               u32 flashed_ver = 0;
+               netxen_rom_fast_read(adapter,
+                               NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+               flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+               if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+                       switch (adapter->ahw.board_type) {
+                       case NETXEN_BRDTYPE_P2_SB31_10G:
+                       case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+                               adapter->msix_supported = !!use_msi_x;
+                               adapter->rss_supported = !!use_msi_x;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
+               adapter->max_rds_rings = 3;
+       } else {
+               adapter->num_lro_rxd = 0;
+               adapter->max_rds_rings = 2;
+       }
+}
+
+static int
+netxen_start_firmware(struct netxen_adapter *adapter)
+{
+       int val, err, first_boot;
+       struct pci_dev *pdev = adapter->pdev;
+
+       /* required for NX2031 dummy dma */
+       err = nx_set_dma_mask(adapter);
+       if (err)
+               return err;
+
+       err = netxen_can_start_firmware(adapter);
+
+       if (err < 0)
+               return err;
+
+       if (!err)
+               goto wait_init;
+
+       first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+       err = netxen_check_hw_init(adapter, first_boot);
+       if (err) {
+               dev_err(&pdev->dev, "error in init HW init sequence\n");
+               return err;
+       }
+
+       netxen_request_firmware(adapter);
+
+       err = netxen_need_fw_reset(adapter);
+       if (err < 0)
+               goto err_out;
+       if (err == 0)
+               goto pcie_strap_init;
+
+       if (first_boot != 0x55555555) {
+               NXWR32(adapter, CRB_CMDPEG_STATE, 0);
+               netxen_pinit_from_rom(adapter);
+               msleep(1);
+       }
+
+       NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+       NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
+       NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netxen_set_port_mode(adapter);
+
+       err = netxen_load_firmware(adapter);
+       if (err)
+               goto err_out;
+
+       netxen_release_firmware(adapter);
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+
+               /* Initialize multicast addr pool owners */
+               val = 0x7654;
+               if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
+                       val |= 0x0f000000;
+               NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+       }
+
+       err = netxen_init_dummy_dma(adapter);
+       if (err)
+               goto err_out;
+
+       /*
+        * Tell the hardware our version number.
+        */
+       val = (_NETXEN_NIC_LINUX_MAJOR << 16)
+               | ((_NETXEN_NIC_LINUX_MINOR << 8))
+               | (_NETXEN_NIC_LINUX_SUBVERSION);
+       NXWR32(adapter, CRB_DRIVER_VERSION, val);
+
+pcie_strap_init:
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netxen_pcie_strap_init(adapter);
+
+wait_init:
+       /* Handshake with the card before we register the devices. */
+       err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+       if (err) {
+               netxen_free_dummy_dma(adapter);
+               goto err_out;
+       }
+
+       NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
+
+       nx_update_dma_mask(adapter);
+
+       netxen_check_options(adapter);
+
+       adapter->need_fw_reset = 0;
+
+       /* fall through and release firmware */
+
+err_out:
+       netxen_release_firmware(adapter);
+       return err;
+}
+
+static int
+netxen_nic_request_irq(struct netxen_adapter *adapter)
+{
+       irq_handler_t handler;
+       struct nx_host_sds_ring *sds_ring;
+       int err, ring;
+
+       unsigned long flags = 0;
+       struct net_device *netdev = adapter->netdev;
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+               handler = netxen_msix_intr;
+       else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+               handler = netxen_msi_intr;
+       else {
+               flags |= IRQF_SHARED;
+               handler = netxen_intr;
+       }
+       adapter->irq = netdev->irq;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+               err = request_irq(sds_ring->irq, handler,
+                                 flags, sds_ring->name, sds_ring);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static void
+netxen_nic_free_irq(struct netxen_adapter *adapter)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               free_irq(sds_ring->irq, sds_ring);
+       }
+}
+
+static void
+netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
+{
+       adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+       adapter->coal.normal.data.rx_time_us =
+               NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+       adapter->coal.normal.data.rx_packets =
+               NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+       adapter->coal.normal.data.tx_time_us =
+               NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
+       adapter->coal.normal.data.tx_packets =
+               NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+/* with rtnl_lock */
+static int
+__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+       int err;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return -EIO;
+
+       err = adapter->init_port(adapter, adapter->physical_port);
+       if (err) {
+               printk(KERN_ERR "%s: Failed to initialize port %d\n",
+                               netxen_nic_driver_name, adapter->portnum);
+               return err;
+       }
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               adapter->macaddr_set(adapter, adapter->mac_addr);
+
+       adapter->set_multi(netdev);
+       adapter->set_mtu(adapter, netdev->mtu);
+
+       adapter->ahw.linkup = 0;
+
+       if (adapter->max_sds_rings > 1)
+               netxen_config_rss(adapter, 1);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netxen_config_intr_coalesce(adapter);
+
+       if (netdev->features & NETIF_F_LRO)
+               netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
+
+       netxen_napi_enable(adapter);
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+               netxen_linkevent_request(adapter, 1);
+       else
+               netxen_nic_set_link_parameters(adapter);
+
+       set_bit(__NX_DEV_UP, &adapter->state);
+       return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+       int err = 0;
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               err = __netxen_nic_up(adapter, netdev);
+       rtnl_unlock();
+
+       return err;
+}
+
+/* with rtnl_lock */
+static void
+__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return;
+
+       if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
+               return;
+
+       smp_mb();
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+               netxen_linkevent_request(adapter, 0);
+
+       if (adapter->stop_port)
+               adapter->stop_port(adapter);
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netxen_p3_free_mac_list(adapter);
+
+       adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
+
+       netxen_napi_disable(adapter);
+
+       netxen_release_tx_buffers(adapter);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+       rtnl_lock();
+       if (netif_running(netdev))
+               __netxen_nic_down(adapter, netdev);
+       rtnl_unlock();
+
+}
+
+static int
+netxen_nic_attach(struct netxen_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       int err, ring;
+       struct nx_host_rds_ring *rds_ring;
+       struct nx_host_tx_ring *tx_ring;
+       u32 capab2;
+
+       if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+               return 0;
+
+       err = netxen_init_firmware(adapter);
+       if (err)
+               return err;
+
+       adapter->flags &= ~NETXEN_FW_MSS_CAP;
+       if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
+               capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
+               if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+                       adapter->flags |= NETXEN_FW_MSS_CAP;
+       }
+
+       err = netxen_napi_add(adapter, netdev);
+       if (err)
+               return err;
+
+       err = netxen_alloc_sw_resources(adapter);
+       if (err) {
+               printk(KERN_ERR "%s: Error in setting sw resources\n",
+                               netdev->name);
+               return err;
+       }
+
+       err = netxen_alloc_hw_resources(adapter);
+       if (err) {
+               printk(KERN_ERR "%s: Error in setting hw resources\n",
+                               netdev->name);
+               goto err_out_free_sw;
+       }
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               tx_ring = adapter->tx_ring;
+               tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+                               crb_cmd_producer[adapter->portnum]);
+               tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
+                               crb_cmd_consumer[adapter->portnum]);
+
+               tx_ring->producer = 0;
+               tx_ring->sw_consumer = 0;
+
+               netxen_nic_update_cmd_producer(adapter, tx_ring);
+               netxen_nic_update_cmd_consumer(adapter, tx_ring);
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx.rds_rings[ring];
+               netxen_post_rx_buffers(adapter, ring, rds_ring);
+       }
+
+       err = netxen_nic_request_irq(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
+                               netdev->name);
+               goto err_out_free_rxbuf;
+       }
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netxen_nic_init_coalesce_defaults(adapter);
+
+       netxen_create_sysfs_entries(adapter);
+
+       adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+       return 0;
+
+err_out_free_rxbuf:
+       netxen_release_rx_buffers(adapter);
+       netxen_free_hw_resources(adapter);
+err_out_free_sw:
+       netxen_free_sw_resources(adapter);
+       return err;
+}
+
+static void
+netxen_nic_detach(struct netxen_adapter *adapter)
+{
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return;
+
+       netxen_remove_sysfs_entries(adapter);
+
+       netxen_free_hw_resources(adapter);
+       netxen_release_rx_buffers(adapter);
+       netxen_nic_free_irq(adapter);
+       netxen_napi_del(adapter);
+       netxen_free_sw_resources(adapter);
+
+       adapter->is_up = 0;
+}
+
+int
+netxen_nic_reset_context(struct netxen_adapter *adapter)
+{
+       int err = 0;
+       struct net_device *netdev = adapter->netdev;
+
+       if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
+
+               netif_device_detach(netdev);
+
+               if (netif_running(netdev))
+                       __netxen_nic_down(adapter, netdev);
+
+               netxen_nic_detach(adapter);
+
+               if (netif_running(netdev)) {
+                       err = netxen_nic_attach(adapter);
+                       if (!err)
+                               err = __netxen_nic_up(adapter, netdev);
+
+                       if (err)
+                               goto done;
+               }
+
+               netif_device_attach(netdev);
+       }
+
+done:
+       clear_bit(__NX_RESETTING, &adapter->state);
+       return err;
+}
+
+static int
+netxen_setup_netdev(struct netxen_adapter *adapter,
+               struct net_device *netdev)
+{
+       int err = 0;
+       struct pci_dev *pdev = adapter->pdev;
+
+       adapter->mc_enabled = 0;
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               adapter->max_mc_count = 38;
+       else
+               adapter->max_mc_count = 16;
+
+       netdev->netdev_ops         = &netxen_netdev_ops;
+       netdev->watchdog_timeo     = 5*HZ;
+
+       netxen_nic_change_mtu(netdev, netdev->mtu);
+
+       netdev->ethtool_ops = &netxen_nic_ethtool_ops;
+
+       netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+                             NETIF_F_RXCSUM;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+               netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+       netdev->vlan_features |= netdev->hw_features;
+
+       if (adapter->pci_using_dac) {
+               netdev->features |= NETIF_F_HIGHDMA;
+               netdev->vlan_features |= NETIF_F_HIGHDMA;
+       }
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
+               netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
+               netdev->hw_features |= NETIF_F_LRO;
+
+       netdev->features |= netdev->hw_features;
+
+       netdev->irq = adapter->msix_entries[0].vector;
+
+       INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+
+       if (netxen_read_mac_addr(adapter))
+               dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+       netif_carrier_off(netdev);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to register net device\n");
+               return err;
+       }
+
+       return 0;
+}
+
+#define NETXEN_ULA_ADAPTER_KEY         (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY     (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+       u32 temp;
+
+       /* Print ULA info only once for an adapter */
+       if (adapter->portnum != 0)
+               return;
+
+       temp = NXRD32(adapter, NETXEN_ULA_KEY);
+       switch (temp) {
+       case NETXEN_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "ULA adapter");
+               break;
+       case NETXEN_NON_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "non ULA adapter");
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
+#ifdef CONFIG_PCIEAER
+static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *root = pdev->bus->self;
+       u32 aer_pos;
+
+       /* root bus? */
+       if (!root)
+               return;
+
+       if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+               adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+               return;
+
+       if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
+               return;
+
+       aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+       if (!aer_pos)
+               return;
+
+       pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
+}
+#endif
+
+static int
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct net_device *netdev = NULL;
+       struct netxen_adapter *adapter = NULL;
+       int i = 0, err;
+       int pci_func_id = PCI_FUNC(pdev->devfn);
+       uint8_t revision_id;
+       u32 val;
+
+       if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+               pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n",
+                       module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
+               return -ENODEV;
+       }
+
+       if ((err = pci_enable_device(pdev)))
+               return err;
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               err = -ENODEV;
+               goto err_out_disable_pdev;
+       }
+
+       if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+               goto err_out_disable_pdev;
+
+       if (NX_IS_REVISION_P3(pdev->revision))
+               pci_enable_pcie_error_reporting(pdev);
+
+       pci_set_master(pdev);
+
+       netdev = alloc_etherdev(sizeof(struct netxen_adapter));
+       if(!netdev) {
+               err = -ENOMEM;
+               goto err_out_free_res;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+
+       adapter = netdev_priv(netdev);
+       adapter->netdev  = netdev;
+       adapter->pdev    = pdev;
+       adapter->ahw.pci_func  = pci_func_id;
+
+       revision_id = pdev->revision;
+       adapter->ahw.revision_id = revision_id;
+
+       rwlock_init(&adapter->ahw.crb_lock);
+       spin_lock_init(&adapter->ahw.mem_lock);
+
+       spin_lock_init(&adapter->tx_clean_lock);
+       INIT_LIST_HEAD(&adapter->mac_list);
+       INIT_LIST_HEAD(&adapter->ip_list);
+
+       err = netxen_setup_pci_map(adapter);
+       if (err)
+               goto err_out_free_netdev;
+
+       /* This will be reset for mezz cards  */
+       adapter->portnum = pci_func_id;
+
+       err = netxen_nic_get_board_info(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Error getting board config info.\n");
+               goto err_out_iounmap;
+       }
+
+#ifdef CONFIG_PCIEAER
+       netxen_mask_aer_correctable(adapter);
+#endif
+
+       /* Mezz cards have PCI function 0,2,3 enabled */
+       switch (adapter->ahw.board_type) {
+       case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+       case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+               if (pci_func_id >= 2)
+                       adapter->portnum = pci_func_id - 2;
+               break;
+       default:
+               break;
+       }
+
+       err = netxen_check_flash_fw_compatibility(adapter);
+       if (err)
+               goto err_out_iounmap;
+
+       if (adapter->portnum == 0) {
+               val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+               if (val != 0xffffffff && val != 0) {
+                       NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+                       adapter->need_fw_reset = 1;
+               }
+       }
+
+       err = netxen_start_firmware(adapter);
+       if (err)
+               goto err_out_decr_ref;
+
+       /*
+        * See if the firmware gave us a virtual-physical port mapping.
+        */
+       adapter->physical_port = adapter->portnum;
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               i = NXRD32(adapter, CRB_V2P(adapter->portnum));
+               if (i != 0x55555555)
+                       adapter->physical_port = i;
+       }
+
+       netxen_nic_clear_stats(adapter);
+
+       err = netxen_setup_intr(adapter);
+
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to setup interrupts, error = %d\n", err);
+               goto err_out_disable_msi;
+       }
+
+       netxen_read_ula_info(adapter);
+
+       err = netxen_setup_netdev(adapter, netdev);
+       if (err)
+               goto err_out_disable_msi;
+
+       pci_set_drvdata(pdev, adapter);
+
+       netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+
+       switch (adapter->ahw.port_type) {
+       case NETXEN_NIC_GBE:
+               dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+                               adapter->netdev->name);
+               break;
+       case NETXEN_NIC_XGBE:
+               dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+                               adapter->netdev->name);
+               break;
+       }
+
+       netxen_create_diag_entries(adapter);
+
+       return 0;
+
+err_out_disable_msi:
+       netxen_teardown_intr(adapter);
+
+       netxen_free_dummy_dma(adapter);
+
+err_out_decr_ref:
+       nx_decr_dev_ref_cnt(adapter);
+
+err_out_iounmap:
+       netxen_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+       free_netdev(netdev);
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static
+void netxen_cleanup_minidump(struct netxen_adapter *adapter)
+{
+       kfree(adapter->mdump.md_template);
+       adapter->mdump.md_template = NULL;
+
+       if (adapter->mdump.md_capture_buff) {
+               vfree(adapter->mdump.md_capture_buff);
+               adapter->mdump.md_capture_buff = NULL;
+       }
+}
+
+static void netxen_nic_remove(struct pci_dev *pdev)
+{
+       struct netxen_adapter *adapter;
+       struct net_device *netdev;
+
+       adapter = pci_get_drvdata(pdev);
+       if (adapter == NULL)
+               return;
+
+       netdev = adapter->netdev;
+
+       netxen_cancel_fw_work(adapter);
+
+       unregister_netdev(netdev);
+
+       cancel_work_sync(&adapter->tx_timeout_task);
+
+       netxen_free_ip_list(adapter, false);
+       netxen_nic_detach(adapter);
+
+       nx_decr_dev_ref_cnt(adapter);
+
+       if (adapter->portnum == 0)
+               netxen_free_dummy_dma(adapter);
+
+       clear_bit(__NX_RESETTING, &adapter->state);
+
+       netxen_teardown_intr(adapter);
+       netxen_set_interrupt_mode(adapter, 0);
+       netxen_remove_diag_entries(adapter);
+
+       netxen_cleanup_pci_map(adapter);
+
+       netxen_release_firmware(adapter);
+
+       if (NX_IS_REVISION_P3(pdev->revision)) {
+               netxen_cleanup_minidump(adapter);
+               pci_disable_pcie_error_reporting(pdev);
+       }
+
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+
+       free_netdev(netdev);
+}
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       netif_device_detach(netdev);
+
+       netxen_cancel_fw_work(adapter);
+
+       if (netif_running(netdev))
+               netxen_nic_down(adapter, netdev);
+
+       cancel_work_sync(&adapter->tx_timeout_task);
+
+       netxen_nic_detach(adapter);
+
+       if (adapter->portnum == 0)
+               netxen_free_dummy_dma(adapter);
+
+       nx_decr_dev_ref_cnt(adapter);
+
+       clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int netxen_nic_attach_func(struct pci_dev *pdev)
+{
+       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       adapter->ahw.crb_win = -1;
+       adapter->ahw.ocm_win = -1;
+
+       err = netxen_start_firmware(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "failed to start firmware\n");
+               return err;
+       }
+
+       if (netif_running(netdev)) {
+               err = netxen_nic_attach(adapter);
+               if (err)
+                       goto err_out;
+
+               err = netxen_nic_up(adapter, netdev);
+               if (err)
+                       goto err_out_detach;
+
+               netxen_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+       netif_device_attach(netdev);
+       netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+       return 0;
+
+err_out_detach:
+       netxen_nic_detach(adapter);
+err_out:
+       nx_decr_dev_ref_cnt(adapter);
+       return err;
+}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+                                               pci_channel_state_t state)
+{
+       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       if (nx_dev_request_aer(adapter))
+               return PCI_ERS_RESULT_RECOVERED;
+
+       netxen_nic_detach_func(adapter);
+
+       pci_disable_device(pdev);
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+       int err = 0;
+
+       err = netxen_nic_attach_func(pdev);
+
+       return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+       netxen_nic_detach_func(adapter);
+
+       if (pci_save_state(pdev))
+               return;
+
+       if (netxen_nic_wol_supported(adapter)) {
+               pci_enable_wake(pdev, PCI_D3cold, 1);
+               pci_enable_wake(pdev, PCI_D3hot, 1);
+       }
+
+       pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+       int retval;
+
+       netxen_nic_detach_func(adapter);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       if (netxen_nic_wol_supported(adapter)) {
+               pci_enable_wake(pdev, PCI_D3cold, 1);
+               pci_enable_wake(pdev, PCI_D3hot, 1);
+       }
+
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+       return netxen_nic_attach_func(pdev);
+}
+#endif
+
+static int netxen_nic_open(struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       int err = 0;
+
+       if (adapter->driver_mismatch)
+               return -EIO;
+
+       err = netxen_nic_attach(adapter);
+       if (err)
+               return err;
+
+       err = __netxen_nic_up(adapter, netdev);
+       if (err)
+               goto err_out;
+
+       netif_start_queue(netdev);
+
+       return 0;
+
+err_out:
+       netxen_nic_detach(adapter);
+       return err;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+
+       __netxen_nic_down(adapter, netdev);
+       return 0;
+}
+
+static void
+netxen_tso_check(struct net_device *netdev,
+               struct nx_host_tx_ring *tx_ring,
+               struct cmd_desc_type0 *first_desc,
+               struct sk_buff *skb)
+{
+       u8 opcode = TX_ETHER_PKT;
+       __be16 protocol = skb->protocol;
+       u16 flags = 0, vid = 0;
+       u32 producer;
+       int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+       struct cmd_desc_type0 *hwdesc;
+       struct vlan_ethhdr *vh;
+
+       if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+               vh = (struct vlan_ethhdr *)skb->data;
+               protocol = vh->h_vlan_encapsulated_proto;
+               flags = FLAGS_VLAN_TAGGED;
+
+       } else if (skb_vlan_tag_present(skb)) {
+               flags = FLAGS_VLAN_OOB;
+               vid = skb_vlan_tag_get(skb);
+               netxen_set_tx_vlan_tci(first_desc, vid);
+               vlan_oob = 1;
+       }
+
+       if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+                       skb_shinfo(skb)->gso_size > 0) {
+
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               first_desc->total_hdr_length = hdr_len;
+               if (vlan_oob) {
+                       first_desc->total_hdr_length += VLAN_HLEN;
+                       first_desc->tcp_hdr_offset = VLAN_HLEN;
+                       first_desc->ip_hdr_offset = VLAN_HLEN;
+                       /* Only in case of TSO on vlan device */
+                       flags |= FLAGS_VLAN_TAGGED;
+               }
+
+               opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+                               TX_TCP_LSO6 : TX_TCP_LSO;
+               tso = 1;
+
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u8 l4proto;
+
+               if (protocol == cpu_to_be16(ETH_P_IP)) {
+                       l4proto = ip_hdr(skb)->protocol;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = TX_TCP_PKT;
+                       else if(l4proto == IPPROTO_UDP)
+                               opcode = TX_UDP_PKT;
+               } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+                       l4proto = ipv6_hdr(skb)->nexthdr;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = TX_TCPV6_PKT;
+                       else if(l4proto == IPPROTO_UDP)
+                               opcode = TX_UDPV6_PKT;
+               }
+       }
+
+       first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+       first_desc->ip_hdr_offset += skb_network_offset(skb);
+       netxen_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       if (!tso)
+               return;
+
+       /* For LSO, we need to copy the MAC/IP/TCP headers into
+        * the descriptor ring
+        */
+       producer = tx_ring->producer;
+       copied = 0;
+       offset = 2;
+
+       if (vlan_oob) {
+               /* Create a TSO vlan header template for firmware */
+
+               hwdesc = &tx_ring->desc_head[producer];
+               tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+               copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+                               hdr_len + VLAN_HLEN);
+
+               vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+               skb_copy_from_linear_data(skb, vh, 12);
+               vh->h_vlan_proto = htons(ETH_P_8021Q);
+               vh->h_vlan_TCI = htons(vid);
+               skb_copy_from_linear_data_offset(skb, 12,
+                               (char *)vh + 16, copy_len - 16);
+
+               copied = copy_len - VLAN_HLEN;
+               offset = 0;
+
+               producer = get_next_index(producer, tx_ring->num_desc);
+       }
+
+       while (copied < hdr_len) {
+
+               copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+                               (hdr_len - copied));
+
+               hwdesc = &tx_ring->desc_head[producer];
+               tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+               skb_copy_from_linear_data_offset(skb, copied,
+                                (char *)hwdesc + offset, copy_len);
+
+               copied += copy_len;
+               offset = 0;
+
+               producer = get_next_index(producer, tx_ring->num_desc);
+       }
+
+       tx_ring->producer = producer;
+       barrier();
+}
+
+static int
+netxen_map_tx_skb(struct pci_dev *pdev,
+               struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
+{
+       struct netxen_skb_frag *nf;
+       struct skb_frag_struct *frag;
+       int i, nr_frags;
+       dma_addr_t map;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       nf = &pbuf->frag_array[0];
+
+       map = pci_map_single(pdev, skb->data,
+                       skb_headlen(skb), PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(pdev, map))
+               goto out_err;
+
+       nf->dma = map;
+       nf->length = skb_headlen(skb);
+
+       for (i = 0; i < nr_frags; i++) {
+               frag = &skb_shinfo(skb)->frags[i];
+               nf = &pbuf->frag_array[i+1];
+
+               map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, map))
+                       goto unwind;
+
+               nf->dma = map;
+               nf->length = skb_frag_size(frag);
+       }
+
+       return 0;
+
+unwind:
+       while (--i >= 0) {
+               nf = &pbuf->frag_array[i+1];
+               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+               nf->dma = 0ULL;
+       }
+
+       nf = &pbuf->frag_array[0];
+       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+       nf->dma = 0ULL;
+
+out_err:
+       return -ENOMEM;
+}
+
+static inline void
+netxen_clear_cmddesc(u64 *desc)
+{
+       desc[0] = 0ULL;
+       desc[2] = 0ULL;
+}
+
+static netdev_tx_t
+netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+       struct netxen_cmd_buffer *pbuf;
+       struct netxen_skb_frag *buffrag;
+       struct cmd_desc_type0 *hwdesc, *first_desc;
+       struct pci_dev *pdev;
+       int i, k;
+       int delta = 0;
+       struct skb_frag_struct *frag;
+
+       u32 producer;
+       int frag_count, no_of_desc;
+       u32 num_txd = tx_ring->num_desc;
+
+       frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+               for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       delta += skb_frag_size(frag);
+               }
+
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
+       /* 4 fragments per cmd des */
+       no_of_desc = (frag_count + 3) >> 2;
+
+       if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+               netif_stop_queue(netdev);
+               smp_mb();
+               if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+                       netif_start_queue(netdev);
+               else
+                       return NETDEV_TX_BUSY;
+       }
+
+       producer = tx_ring->producer;
+       pbuf = &tx_ring->cmd_buf_arr[producer];
+
+       pdev = adapter->pdev;
+
+       if (netxen_map_tx_skb(pdev, skb, pbuf))
+               goto drop_packet;
+
+       pbuf->skb = skb;
+       pbuf->frag_count = frag_count;
+
+       first_desc = hwdesc = &tx_ring->desc_head[producer];
+       netxen_clear_cmddesc((u64 *)hwdesc);
+
+       netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
+       netxen_set_tx_port(first_desc, adapter->portnum);
+
+       for (i = 0; i < frag_count; i++) {
+
+               k = i % 4;
+
+               if ((k == 0) && (i > 0)) {
+                       /* move to next desc.*/
+                       producer = get_next_index(producer, num_txd);
+                       hwdesc = &tx_ring->desc_head[producer];
+                       netxen_clear_cmddesc((u64 *)hwdesc);
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+               }
+
+               buffrag = &pbuf->frag_array[i];
+
+               hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+               switch (k) {
+               case 0:
+                       hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 1:
+                       hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 2:
+                       hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 3:
+                       hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+                       break;
+               }
+       }
+
+       tx_ring->producer = get_next_index(producer, num_txd);
+
+       netxen_tso_check(netdev, tx_ring, first_desc, skb);
+
+       adapter->stats.txbytes += skb->len;
+       adapter->stats.xmitcalled++;
+
+       netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+       return NETDEV_TX_OK;
+
+drop_packet:
+       adapter->stats.txdropped++;
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       uint32_t temp, temp_state, temp_val;
+       int rv = 0;
+
+       temp = NXRD32(adapter, CRB_TEMP_STATE);
+
+       temp_state = nx_get_temp_state(temp);
+       temp_val = nx_get_temp_val(temp);
+
+       if (temp_state == NX_TEMP_PANIC) {
+               printk(KERN_ALERT
+                      "%s: Device temperature %d degrees C exceeds"
+                      " maximum allowed. Hardware has been shut down.\n",
+                      netdev->name, temp_val);
+               rv = 1;
+       } else if (temp_state == NX_TEMP_WARN) {
+               if (adapter->temp == NX_TEMP_NORMAL) {
+                       printk(KERN_ALERT
+                              "%s: Device temperature %d degrees C "
+                              "exceeds operating range."
+                              " Immediate action needed.\n",
+                              netdev->name, temp_val);
+               }
+       } else {
+               if (adapter->temp == NX_TEMP_WARN) {
+                       printk(KERN_INFO
+                              "%s: Device temperature is now %d degrees C"
+                              " in normal range.\n", netdev->name,
+                              temp_val);
+               }
+       }
+       adapter->temp = temp_state;
+       return rv;
+}
+
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (adapter->ahw.linkup && !linkup) {
+               printk(KERN_INFO "%s: %s NIC Link is down\n",
+                      netxen_nic_driver_name, netdev->name);
+               adapter->ahw.linkup = 0;
+               if (netif_running(netdev)) {
+                       netif_carrier_off(netdev);
+                       netif_stop_queue(netdev);
+               }
+               adapter->link_changed = !adapter->has_link_events;
+       } else if (!adapter->ahw.linkup && linkup) {
+               printk(KERN_INFO "%s: %s NIC Link is up\n",
+                      netxen_nic_driver_name, netdev->name);
+               adapter->ahw.linkup = 1;
+               if (netif_running(netdev)) {
+                       netif_carrier_on(netdev);
+                       netif_wake_queue(netdev);
+               }
+               adapter->link_changed = !adapter->has_link_events;
+       }
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+       u32 val, port, linkup;
+
+       port = adapter->physical_port;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               val = NXRD32(adapter, CRB_XG_STATE_P3);
+               val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+               linkup = (val == XG_LINK_UP_P3);
+       } else {
+               val = NXRD32(adapter, CRB_XG_STATE);
+               val = (val >> port*8) & 0xff;
+               linkup = (val == XG_LINK_UP);
+       }
+
+       netxen_advert_link_change(adapter, linkup);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+
+       if (test_bit(__NX_RESETTING, &adapter->state))
+               return;
+
+       dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+       schedule_work(&adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct work_struct *work)
+{
+       struct netxen_adapter *adapter =
+               container_of(work, struct netxen_adapter, tx_timeout_task);
+
+       if (!netif_running(adapter->netdev))
+               return;
+
+       if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+               return;
+
+       if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
+               goto request_reset;
+
+       rtnl_lock();
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+               /* try to scrub interrupt */
+               netxen_napi_disable(adapter);
+
+               netxen_napi_enable(adapter);
+
+               netif_wake_queue(adapter->netdev);
+
+               clear_bit(__NX_RESETTING, &adapter->state);
+       } else {
+               clear_bit(__NX_RESETTING, &adapter->state);
+               if (netxen_nic_reset_context(adapter)) {
+                       rtnl_unlock();
+                       goto request_reset;
+               }
+       }
+       netif_trans_update(adapter->netdev);
+       rtnl_unlock();
+       return;
+
+request_reset:
+       adapter->need_fw_reset = 1;
+       clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+                                                     struct rtnl_link_stats64 *stats)
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+
+       stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+       stats->tx_packets = adapter->stats.xmitfinished;
+       stats->rx_bytes = adapter->stats.rxbytes;
+       stats->tx_bytes = adapter->stats.txbytes;
+       stats->rx_dropped = adapter->stats.rxdropped;
+       stats->tx_dropped = adapter->stats.txdropped;
+
+       return stats;
+}
+
+static irqreturn_t netxen_intr(int irq, void *data)
+{
+       struct nx_host_sds_ring *sds_ring = data;
+       struct netxen_adapter *adapter = sds_ring->adapter;
+       u32 status = 0;
+
+       status = readl(adapter->isr_int_vec);
+
+       if (!(status & adapter->int_vec_bit))
+               return IRQ_NONE;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+               /* check interrupt state machine, to be sure */
+               status = readl(adapter->crb_int_state_reg);
+               if (!ISR_LEGACY_INT_TRIGGERED(status))
+                       return IRQ_NONE;
+
+       } else {
+               unsigned long our_int = 0;
+
+               our_int = readl(adapter->crb_int_state_reg);
+
+               /* not our interrupt */
+               if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
+                       return IRQ_NONE;
+
+               /* claim interrupt */
+               writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
+
+               /* clear interrupt */
+               netxen_nic_disable_int(sds_ring);
+       }
+
+       writel(0xffffffff, adapter->tgt_status_reg);
+       /* read twice to ensure write is flushed */
+       readl(adapter->isr_int_vec);
+       readl(adapter->isr_int_vec);
+
+       napi_schedule(&sds_ring->napi);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msi_intr(int irq, void *data)
+{
+       struct nx_host_sds_ring *sds_ring = data;
+       struct netxen_adapter *adapter = sds_ring->adapter;
+
+       /* clear interrupt */
+       writel(0xffffffff, adapter->tgt_status_reg);
+
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msix_intr(int irq, void *data)
+{
+       struct nx_host_sds_ring *sds_ring = data;
+
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct napi_struct *napi, int budget)
+{
+       struct nx_host_sds_ring *sds_ring =
+               container_of(napi, struct nx_host_sds_ring, napi);
+
+       struct netxen_adapter *adapter = sds_ring->adapter;
+
+       int tx_complete;
+       int work_done;
+
+       tx_complete = netxen_process_cmd_ring(adapter);
+
+       work_done = netxen_process_rcv_ring(sds_ring, budget);
+
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__NX_DEV_UP, &adapter->state))
+                       netxen_nic_enable_int(sds_ring);
+       }
+
+       return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev)
+{
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+       disable_irq(adapter->irq);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netxen_intr(adapter->irq, sds_ring);
+       }
+       enable_irq(adapter->irq);
+}
+#endif
+
+static int
+nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+       int count;
+       if (netxen_api_lock(adapter))
+               return -EIO;
+
+       count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+       NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+       netxen_api_unlock(adapter);
+       return count;
+}
+
+static int
+nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+       int count, state;
+       if (netxen_api_lock(adapter))
+               return -EIO;
+
+       count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+       WARN_ON(count == 0);
+
+       NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+       state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+       if (count == 0 && state != NX_DEV_FAILED)
+               NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+
+       netxen_api_unlock(adapter);
+       return count;
+}
+
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+       u32 state;
+       int ret = -EINVAL;
+
+       if (netxen_api_lock(adapter))
+               return ret;
+
+       state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+       if (state == NX_DEV_NEED_AER)
+               ret = 0;
+       else if (state == NX_DEV_READY) {
+               NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+               ret = 0;
+       }
+
+       netxen_api_unlock(adapter);
+       return ret;
+}
+
+int
+nx_dev_request_reset(struct netxen_adapter *adapter)
+{
+       u32 state;
+       int ret = -EINVAL;
+
+       if (netxen_api_lock(adapter))
+               return ret;
+
+       state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+       if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
+               ret = 0;
+       else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
+               NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+               adapter->flags |= NETXEN_FW_RESET_OWNER;
+               ret = 0;
+       }
+
+       netxen_api_unlock(adapter);
+
+       return ret;
+}
+
+static int
+netxen_can_start_firmware(struct netxen_adapter *adapter)
+{
+       int count;
+       int can_start = 0;
+
+       if (netxen_api_lock(adapter)) {
+               nx_incr_dev_ref_cnt(adapter);
+               return -1;
+       }
+
+       count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+       if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
+               count = 0;
+
+       if (count == 0) {
+               can_start = 1;
+               NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
+       }
+
+       NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+       netxen_api_unlock(adapter);
+
+       return can_start;
+}
+
+static void
+netxen_schedule_work(struct netxen_adapter *adapter,
+               work_func_t func, int delay)
+{
+       INIT_DELAYED_WORK(&adapter->fw_work, func);
+       schedule_delayed_work(&adapter->fw_work, delay);
+}
+
+static void
+netxen_cancel_fw_work(struct netxen_adapter *adapter)
+{
+       while (test_and_set_bit(__NX_RESETTING, &adapter->state))
+               msleep(10);
+
+       cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+netxen_attach_work(struct work_struct *work)
+{
+       struct netxen_adapter *adapter = container_of(work,
+                               struct netxen_adapter, fw_work.work);
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (netif_running(netdev)) {
+               err = netxen_nic_attach(adapter);
+               if (err)
+                       goto done;
+
+               err = netxen_nic_up(adapter, netdev);
+               if (err) {
+                       netxen_nic_detach(adapter);
+                       goto done;
+               }
+
+               netxen_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+       netif_device_attach(netdev);
+
+done:
+       adapter->fw_fail_cnt = 0;
+       clear_bit(__NX_RESETTING, &adapter->state);
+       netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static void
+netxen_fwinit_work(struct work_struct *work)
+{
+       struct netxen_adapter *adapter = container_of(work,
+                               struct netxen_adapter, fw_work.work);
+       int dev_state;
+       int count;
+       dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
+       if (adapter->flags & NETXEN_FW_RESET_OWNER) {
+               count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+               WARN_ON(count == 0);
+               if (count == 1) {
+                       if (adapter->mdump.md_enabled) {
+                               rtnl_lock();
+                               netxen_dump_fw(adapter);
+                               rtnl_unlock();
+                       }
+                       adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+                       if (netxen_api_lock(adapter)) {
+                               clear_bit(__NX_RESETTING, &adapter->state);
+                               NXWR32(adapter, NX_CRB_DEV_STATE,
+                                               NX_DEV_FAILED);
+                               return;
+                       }
+                       count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+                       NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+                       NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+                       dev_state = NX_DEV_COLD;
+                       netxen_api_unlock(adapter);
+               }
+       }
+
+       switch (dev_state) {
+       case NX_DEV_COLD:
+       case NX_DEV_READY:
+               if (!netxen_start_firmware(adapter)) {
+                       netxen_schedule_work(adapter, netxen_attach_work, 0);
+                       return;
+               }
+               break;
+
+       case NX_DEV_NEED_RESET:
+       case NX_DEV_INITALIZING:
+                       netxen_schedule_work(adapter,
+                                       netxen_fwinit_work, 2 * FW_POLL_DELAY);
+                       return;
+
+       case NX_DEV_FAILED:
+       default:
+               nx_incr_dev_ref_cnt(adapter);
+               break;
+       }
+
+       if (netxen_api_lock(adapter)) {
+               clear_bit(__NX_RESETTING, &adapter->state);
+               return;
+       }
+       NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
+       netxen_api_unlock(adapter);
+       dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
+                               adapter->netdev->name);
+
+       clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static void
+netxen_detach_work(struct work_struct *work)
+{
+       struct netxen_adapter *adapter = container_of(work,
+                               struct netxen_adapter, fw_work.work);
+       struct net_device *netdev = adapter->netdev;
+       int ref_cnt = 0, delay;
+       u32 status;
+
+       netif_device_detach(netdev);
+
+       netxen_nic_down(adapter, netdev);
+
+       rtnl_lock();
+       netxen_nic_detach(adapter);
+       rtnl_unlock();
+
+       status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+
+       if (status & NX_RCODE_FATAL_ERROR)
+               goto err_ret;
+
+       if (adapter->temp == NX_TEMP_PANIC)
+               goto err_ret;
+
+       if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
+               ref_cnt = nx_decr_dev_ref_cnt(adapter);
+
+       if (ref_cnt == -EIO)
+               goto err_ret;
+
+       delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
+
+       adapter->fw_wait_cnt = 0;
+       netxen_schedule_work(adapter, netxen_fwinit_work, delay);
+
+       return;
+
+err_ret:
+       clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int
+netxen_check_health(struct netxen_adapter *adapter)
+{
+       u32 state, heartbit;
+       u32 peg_status;
+       struct net_device *netdev = adapter->netdev;
+
+       state = NXRD32(adapter, NX_CRB_DEV_STATE);
+       if (state == NX_DEV_NEED_AER)
+               return 0;
+
+       if (netxen_nic_check_temp(adapter))
+               goto detach;
+
+       if (adapter->need_fw_reset) {
+               if (nx_dev_request_reset(adapter))
+                       return 0;
+               goto detach;
+       }
+
+       /* NX_DEV_NEED_RESET, this state can be marked in two cases
+        * 1. Tx timeout 2. Fw hang
+        * Send request to destroy context in case of tx timeout only
+        * and doesn't required in case of Fw hang
+        */
+       if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
+               adapter->need_fw_reset = 1;
+               if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+                       goto detach;
+       }
+
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 0;
+
+       heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+       if (heartbit != adapter->heartbit) {
+               adapter->heartbit = heartbit;
+               adapter->fw_fail_cnt = 0;
+               if (adapter->need_fw_reset)
+                       goto detach;
+               return 0;
+       }
+
+       if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+               return 0;
+
+       if (nx_dev_request_reset(adapter))
+               return 0;
+
+       clear_bit(__NX_FW_ATTACHED, &adapter->state);
+
+       dev_err(&netdev->dev, "firmware hang detected\n");
+       peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+       dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+                       "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+                       "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+                       "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+                       "PEG_NET_4_PC: 0x%x\n",
+                       peg_status,
+                       NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
+                       NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
+                       NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
+                       NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
+                       NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
+                       NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
+       if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
+               dev_err(&adapter->pdev->dev,
+                       "Firmware aborted with error code 0x00006700. "
+                               "Device is being reset.\n");
+detach:
+       if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+                       !test_and_set_bit(__NX_RESETTING, &adapter->state))
+               netxen_schedule_work(adapter, netxen_detach_work, 0);
+       return 1;
+}
+
+static void
+netxen_fw_poll_work(struct work_struct *work)
+{
+       struct netxen_adapter *adapter = container_of(work,
+                               struct netxen_adapter, fw_work.work);
+
+       if (test_bit(__NX_RESETTING, &adapter->state))
+               goto reschedule;
+
+       if (test_bit(__NX_DEV_UP, &adapter->state) &&
+           !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
+               if (!adapter->has_link_events) {
+
+                       netxen_nic_handle_phy_intr(adapter);
+
+                       if (adapter->link_changed)
+                               netxen_nic_set_link_parameters(adapter);
+               }
+       }
+
+       if (netxen_check_health(adapter))
+               return;
+
+reschedule:
+       netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+netxen_store_bridged_mode(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct net_device *net = to_net_dev(dev);
+       struct netxen_adapter *adapter = netdev_priv(net);
+       unsigned long new;
+       int ret = -EINVAL;
+
+       if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
+               goto err_out;
+
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               goto err_out;
+
+       if (kstrtoul(buf, 2, &new))
+               goto err_out;
+
+       if (!netxen_config_bridged_mode(adapter, !!new))
+               ret = len;
+
+err_out:
+       return ret;
+}
+
+static ssize_t
+netxen_show_bridged_mode(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct net_device *net = to_net_dev(dev);
+       struct netxen_adapter *adapter;
+       int bridged_mode = 0;
+
+       adapter = netdev_priv(net);
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+               bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
+
+       return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = netxen_show_bridged_mode,
+       .store = netxen_store_bridged_mode,
+};
+
+static ssize_t
+netxen_store_diag_mode(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       unsigned long new;
+
+       if (kstrtoul(buf, 2, &new))
+               return -EINVAL;
+
+       if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+               adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
+
+       return len;
+}
+
+static ssize_t
+netxen_show_diag_mode(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n",
+                       !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+       .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = netxen_show_diag_mode,
+       .store = netxen_store_diag_mode,
+};
+
+static int
+netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
+               loff_t offset, size_t size)
+{
+       size_t crb_size = 4;
+
+       if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+               return -EIO;
+
+       if (offset < NETXEN_PCI_CRBSPACE) {
+               if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+                       return -EINVAL;
+
+               if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+                                               NETXEN_PCI_CAMQM_2M_END))
+                       crb_size = 8;
+               else
+                       return -EINVAL;
+       }
+
+       if ((size != crb_size) || (offset & (crb_size-1)))
+               return  -EINVAL;
+
+       return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+               struct bin_attribute *attr,
+               char *buf, loff_t offset, size_t size)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       u32 data;
+       u64 qmdata;
+       int ret;
+
+       ret = netxen_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+               ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+                                       NETXEN_PCI_CAMQM_2M_END)) {
+               netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+               memcpy(buf, &qmdata, size);
+       } else {
+               data = NXRD32(adapter, offset);
+               memcpy(buf, &data, size);
+       }
+
+       return size;
+}
+
+static ssize_t
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+               struct bin_attribute *attr,
+               char *buf, loff_t offset, size_t size)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       u32 data;
+       u64 qmdata;
+       int ret;
+
+       ret = netxen_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+               ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+                                       NETXEN_PCI_CAMQM_2M_END)) {
+               memcpy(&qmdata, buf, size);
+               netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+       } else {
+               memcpy(&data, buf, size);
+               NXWR32(adapter, offset, data);
+       }
+
+       return size;
+}
+
+static int
+netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
+               loff_t offset, size_t size)
+{
+       if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+               return -EIO;
+
+       if ((size != 8) || (offset & 0x7))
+               return  -EIO;
+
+       return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+               struct bin_attribute *attr,
+               char *buf, loff_t offset, size_t size)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = netxen_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (adapter->pci_mem_read(adapter, offset, &data))
+               return -EIO;
+
+       memcpy(buf, &data, size);
+
+       return size;
+}
+
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+               struct bin_attribute *attr, char *buf,
+               loff_t offset, size_t size)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = netxen_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       memcpy(&data, buf, size);
+
+       if (adapter->pci_mem_write(adapter, offset, data))
+               return -EIO;
+
+       return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+       .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = netxen_sysfs_read_crb,
+       .write = netxen_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+       .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = netxen_sysfs_read_mem,
+       .write = netxen_sysfs_write_mem,
+};
+
+static ssize_t
+netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
+               struct bin_attribute *attr,
+               char *buf, loff_t offset, size_t size)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct netxen_adapter *adapter = dev_get_drvdata(dev);
+       struct net_device *netdev = adapter->netdev;
+       struct netxen_dimm_cfg dimm;
+       u8 dw, rows, cols, banks, ranks;
+       u32 val;
+
+       if (size < attr->size) {
+               netdev_err(netdev, "Invalid size\n");
+               return -EINVAL;
+       }
+
+       memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
+       val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
+
+       /* Checks if DIMM info is valid. */
+       if (val & NETXEN_DIMM_VALID_FLAG) {
+               netdev_err(netdev, "Invalid DIMM flag\n");
+               dimm.presence = 0xff;
+               goto out;
+       }
+
+       rows = NETXEN_DIMM_NUMROWS(val);
+       cols = NETXEN_DIMM_NUMCOLS(val);
+       ranks = NETXEN_DIMM_NUMRANKS(val);
+       banks = NETXEN_DIMM_NUMBANKS(val);
+       dw = NETXEN_DIMM_DATAWIDTH(val);
+
+       dimm.presence = (val & NETXEN_DIMM_PRESENT);
+
+       /* Checks if DIMM info is present. */
+       if (!dimm.presence) {
+               netdev_err(netdev, "DIMM not present\n");
+               goto out;
+       }
+
+       dimm.dimm_type = NETXEN_DIMM_TYPE(val);
+
+       switch (dimm.dimm_type) {
+       case NETXEN_DIMM_TYPE_RDIMM:
+       case NETXEN_DIMM_TYPE_UDIMM:
+       case NETXEN_DIMM_TYPE_SO_DIMM:
+       case NETXEN_DIMM_TYPE_Micro_DIMM:
+       case NETXEN_DIMM_TYPE_Mini_RDIMM:
+       case NETXEN_DIMM_TYPE_Mini_UDIMM:
+               break;
+       default:
+               netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
+               goto out;
+       }
+
+       if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
+               dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
+       else
+               dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
+
+       if (val & NETXEN_DIMM_SIZE) {
+               dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
+               goto out;
+       }
+
+       if (!rows) {
+               netdev_err(netdev, "Invalid no of rows %x\n", rows);
+               goto out;
+       }
+
+       if (!cols) {
+               netdev_err(netdev, "Invalid no of columns %x\n", cols);
+               goto out;
+       }
+
+       if (!banks) {
+               netdev_err(netdev, "Invalid no of banks %x\n", banks);
+               goto out;
+       }
+
+       ranks += 1;
+
+       switch (dw) {
+       case 0x0:
+               dw = 32;
+               break;
+       case 0x1:
+               dw = 33;
+               break;
+       case 0x2:
+               dw = 36;
+               break;
+       case 0x3:
+               dw = 64;
+               break;
+       case 0x4:
+               dw = 72;
+               break;
+       case 0x5:
+               dw = 80;
+               break;
+       case 0x6:
+               dw = 128;
+               break;
+       case 0x7:
+               dw = 144;
+               break;
+       default:
+               netdev_err(netdev, "Invalid data-width %x\n", dw);
+               goto out;
+       }
+
+       dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
+       /* Size returned in MB. */
+       dimm.size = (dimm.size) / 0x100000;
+out:
+       memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
+       return sizeof(struct netxen_dimm_cfg);
+
+}
+
+static struct bin_attribute bin_attr_dimm = {
+       .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
+       .size = sizeof(struct netxen_dimm_cfg),
+       .read = netxen_sysfs_read_dimm,
+};
+
+
+static void
+netxen_create_sysfs_entries(struct netxen_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
+               /* bridged_mode control */
+               if (device_create_file(dev, &dev_attr_bridged_mode)) {
+                       dev_warn(dev,
+                               "failed to create bridged_mode sysfs entry\n");
+               }
+       }
+}
+
+static void
+netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+               device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+netxen_create_diag_entries(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev;
+
+       dev = &pdev->dev;
+       if (device_create_file(dev, &dev_attr_diag_mode))
+               dev_info(dev, "failed to create diag_mode sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_crb))
+               dev_info(dev, "failed to create crb sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_mem))
+               dev_info(dev, "failed to create mem sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_dimm))
+               dev_info(dev, "failed to create dimm sysfs entry\n");
+}
+
+
+static void
+netxen_remove_diag_entries(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = &pdev->dev;
+
+       device_remove_file(dev, &dev_attr_diag_mode);
+       device_remove_bin_file(dev, &bin_attr_crb);
+       device_remove_bin_file(dev, &bin_attr_mem);
+       device_remove_bin_file(dev, &bin_attr_dimm);
+}
+
+#ifdef CONFIG_INET
+
+#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
+
+static int
+netxen_destip_supported(struct netxen_adapter *adapter)
+{
+       if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+               return 0;
+
+       if (adapter->ahw.cut_through)
+               return 0;
+
+       return 1;
+}
+
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{
+       struct nx_ip_list  *cur, *tmp_cur;
+
+       list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+               if (master) {
+                       if (cur->master) {
+                               netxen_config_ipaddr(adapter, cur->ip_addr,
+                                                    NX_IP_DOWN);
+                               list_del(&cur->list);
+                               kfree(cur);
+                       }
+               } else {
+                       netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+                       list_del(&cur->list);
+                       kfree(cur);
+               }
+       }
+}
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
+               struct in_ifaddr *ifa, unsigned long event)
+{
+       struct net_device *dev;
+       struct nx_ip_list *cur, *tmp_cur;
+       struct list_head *head;
+       bool ret = false;
+
+       dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+       if (dev == NULL)
+               goto out;
+
+       switch (event) {
+       case NX_IP_UP:
+               list_for_each(head, &adapter->ip_list) {
+                       cur = list_entry(head, struct nx_ip_list, list);
+
+                       if (cur->ip_addr == ifa->ifa_address)
+                               goto out;
+               }
+
+               cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
+               if (cur == NULL)
+                       goto out;
+               if (dev->priv_flags & IFF_802_1Q_VLAN)
+                       dev = vlan_dev_real_dev(dev);
+               cur->master = !!netif_is_bond_master(dev);
+               cur->ip_addr = ifa->ifa_address;
+               list_add_tail(&cur->list, &adapter->ip_list);
+               netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+               ret = true;
+               break;
+       case NX_IP_DOWN:
+               list_for_each_entry_safe(cur, tmp_cur,
+                                       &adapter->ip_list, list) {
+                       if (cur->ip_addr == ifa->ifa_address) {
+                               list_del(&cur->list);
+                               kfree(cur);
+                               netxen_config_ipaddr(adapter, ifa->ifa_address,
+                                                    NX_IP_DOWN);
+                               ret = true;
+                               break;
+                       }
+               }
+       }
+out:
+       return ret;
+}
+
+static void
+netxen_config_indev_addr(struct netxen_adapter *adapter,
+               struct net_device *dev, unsigned long event)
+{
+       struct in_device *indev;
+
+       if (!netxen_destip_supported(adapter))
+               return;
+
+       indev = in_dev_get(dev);
+       if (!indev)
+               return;
+
+       for_ifa(indev) {
+               switch (event) {
+               case NETDEV_UP:
+                       netxen_list_config_ip(adapter, ifa, NX_IP_UP);
+                       break;
+               case NETDEV_DOWN:
+                       netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
+                       break;
+               default:
+                       break;
+               }
+       } endfor_ifa(indev);
+
+       in_dev_put(indev);
+}
+
+static void
+netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
+
+{
+       struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct nx_ip_list *pos, *tmp_pos;
+       unsigned long ip_event;
+
+       ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+       netxen_config_indev_addr(adapter, netdev, event);
+
+       list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
+               netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
+       }
+}
+
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+       struct netxen_adapter *adapter;
+
+       if (!is_netxen_netdev(dev))
+               return false;
+       adapter = netdev_priv(dev);
+       if (!adapter)
+               return false;
+       if (!netxen_destip_supported(adapter))
+               return false;
+       if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+               return false;
+
+       return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+       struct net_device *master, *slave;
+       struct netxen_adapter *adapter = netdev_priv(dev);
+
+       rcu_read_lock();
+       master = netdev_master_upper_dev_get_rcu(dev);
+       /*
+        * This is the case where the netxen nic is being
+        * enslaved and is dev_open()ed in bond_enslave()
+        * Now we should program the bond's (and its vlans')
+        * addresses in the netxen NIC.
+        */
+       if (master && netif_is_bond_master(master) &&
+           !netif_is_bond_slave(dev)) {
+               netxen_config_indev_addr(adapter, master, event);
+               for_each_netdev_rcu(&init_net, slave)
+                       if (slave->priv_flags & IFF_802_1Q_VLAN &&
+                           vlan_dev_real_dev(slave) == master)
+                               netxen_config_indev_addr(adapter, slave, event);
+       }
+       rcu_read_unlock();
+       /*
+        * This is the case where the netxen nic is being
+        * released and is dev_close()ed in bond_release()
+        * just before IFF_BONDING is stripped.
+        */
+       if (!master && dev->priv_flags & IFF_BONDING)
+               netxen_free_ip_list(adapter, true);
+}
+
+static int netxen_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct netxen_adapter *adapter;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct net_device *orig_dev = dev;
+       struct net_device *slave;
+
+recheck:
+       if (dev == NULL)
+               goto done;
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               dev = vlan_dev_real_dev(dev);
+               goto recheck;
+       }
+       if (event == NETDEV_UP || event == NETDEV_DOWN) {
+               /* If this is a bonding device, look for netxen-based slaves*/
+               if (netif_is_bond_master(dev)) {
+                       rcu_read_lock();
+                       for_each_netdev_in_bond_rcu(dev, slave) {
+                               if (!netxen_config_checkdev(slave))
+                                       continue;
+                               adapter = netdev_priv(slave);
+                               netxen_config_indev_addr(adapter,
+                                                        orig_dev, event);
+                       }
+                       rcu_read_unlock();
+               } else {
+                       if (!netxen_config_checkdev(dev))
+                               goto done;
+                       adapter = netdev_priv(dev);
+                       /* Act only if the actual netxen is the target */
+                       if (orig_dev == dev)
+                               netxen_config_master(dev, event);
+                       netxen_config_indev_addr(adapter, orig_dev, event);
+               }
+       }
+done:
+       return NOTIFY_DONE;
+}
+
+static int
+netxen_inetaddr_event(struct notifier_block *this,
+               unsigned long event, void *ptr)
+{
+       struct netxen_adapter *adapter;
+       struct net_device *dev, *slave;
+       struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+       unsigned long ip_event;
+
+       dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+       ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+recheck:
+       if (dev == NULL)
+               goto done;
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               dev = vlan_dev_real_dev(dev);
+               goto recheck;
+       }
+       if (event == NETDEV_UP || event == NETDEV_DOWN) {
+               /* If this is a bonding device, look for netxen-based slaves*/
+               if (netif_is_bond_master(dev)) {
+                       rcu_read_lock();
+                       for_each_netdev_in_bond_rcu(dev, slave) {
+                               if (!netxen_config_checkdev(slave))
+                                       continue;
+                               adapter = netdev_priv(slave);
+                               netxen_list_config_ip(adapter, ifa, ip_event);
+                       }
+                       rcu_read_unlock();
+               } else {
+                       if (!netxen_config_checkdev(dev))
+                               goto done;
+                       adapter = netdev_priv(dev);
+                       netxen_list_config_ip(adapter, ifa, ip_event);
+               }
+       }
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block   netxen_netdev_cb = {
+       .notifier_call = netxen_netdev_event,
+};
+
+static struct notifier_block netxen_inetaddr_cb = {
+       .notifier_call = netxen_inetaddr_event,
+};
+#else
+static void
+netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{ }
+#endif
+
+static const struct pci_error_handlers netxen_err_handler = {
+       .error_detected = netxen_io_error_detected,
+       .slot_reset = netxen_io_slot_reset,
+       .resume = netxen_io_resume,
+};
+
+static struct pci_driver netxen_driver = {
+       .name = netxen_nic_driver_name,
+       .id_table = netxen_pci_tbl,
+       .probe = netxen_nic_probe,
+       .remove = netxen_nic_remove,
+#ifdef CONFIG_PM
+       .suspend = netxen_nic_suspend,
+       .resume = netxen_nic_resume,
+#endif
+       .shutdown = netxen_nic_shutdown,
+       .err_handler = &netxen_err_handler
+};
+
+static int __init netxen_init_module(void)
+{
+       printk(KERN_INFO "%s\n", netxen_nic_driver_string);
+
+#ifdef CONFIG_INET
+       register_netdevice_notifier(&netxen_netdev_cb);
+       register_inetaddr_notifier(&netxen_inetaddr_cb);
+#endif
+       return pci_register_driver(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+       pci_unregister_driver(&netxen_driver);
+
+#ifdef CONFIG_INET
+       unregister_inetaddr_notifier(&netxen_inetaddr_cb);
+       unregister_netdevice_notifier(&netxen_netdev_cb);
+#endif
+}
+
+module_exit(netxen_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644 (file)
index 0000000..d1f157e
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+        qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644 (file)
index 0000000..45ab746
--- /dev/null
@@ -0,0 +1,612 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.7.1.20"
+
+#define MAX_HWFNS_PER_DEVICE    (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+#define QED_WFQ_UNIT   100
+
+/* cau states */
+enum qed_coalescing_mode {
+       QED_COAL_MODE_DISABLE,
+       QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+       u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+                     FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+       return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)                                \
+       ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+        ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+       (val == (cond1) ? true1 :                     \
+        (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+       u32     *init_val;
+       bool    *b_valid;
+};
+
+enum qed_tunn_mode {
+       QED_MODE_L2GENEVE_TUNN,
+       QED_MODE_IPGENEVE_TUNN,
+       QED_MODE_L2GRE_TUNN,
+       QED_MODE_IPGRE_TUNN,
+       QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+       QED_TUNN_CLSS_MAC_VLAN,
+       QED_TUNN_CLSS_MAC_VNI,
+       QED_TUNN_CLSS_INNER_MAC_VLAN,
+       QED_TUNN_CLSS_INNER_MAC_VNI,
+       MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+       unsigned long   tunn_mode;
+       u16             vxlan_udp_port;
+       u16             geneve_udp_port;
+       u8              update_vxlan_udp_port;
+       u8              update_geneve_udp_port;
+       u8              tunn_clss_vxlan;
+       u8              tunn_clss_l2geneve;
+       u8              tunn_clss_ipgeneve;
+       u8              tunn_clss_l2gre;
+       u8              tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+       unsigned long   tunn_mode_update_mask;
+       unsigned long   tunn_mode;
+       u16             vxlan_udp_port;
+       u16             geneve_udp_port;
+       u8              update_rx_pf_clss;
+       u8              update_tx_pf_clss;
+       u8              update_vxlan_udp_port;
+       u8              update_geneve_udp_port;
+       u8              tunn_clss_vxlan;
+       u8              tunn_clss_l2geneve;
+       u8              tunn_clss_ipgeneve;
+       u8              tunn_clss_l2gre;
+       u8              tunn_clss_ipgre;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+       QED_PCI_ETH,
+       QED_PCI_ISCSI,
+       QED_PCI_ETH_ROCE,
+       QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+       u32 cids;
+       u32 vf_cids;
+       u32 tids;
+};
+
+enum QED_RESOURCES {
+       QED_SB,
+       QED_L2_QUEUE,
+       QED_VPORT,
+       QED_RSS_ENG,
+       QED_PQ,
+       QED_RL,
+       QED_MAC,
+       QED_VLAN,
+       QED_ILT,
+       QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+       QED_PF_L2_QUE,
+       QED_VF,
+       QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+       QED_PORT_MODE_DE_2X40G,
+       QED_PORT_MODE_DE_2X50G,
+       QED_PORT_MODE_DE_1X100G,
+       QED_PORT_MODE_DE_4X10G_F,
+       QED_PORT_MODE_DE_4X10G_E,
+       QED_PORT_MODE_DE_4X20G,
+       QED_PORT_MODE_DE_1X40G,
+       QED_PORT_MODE_DE_2X25G,
+       QED_PORT_MODE_DE_1X25G
+};
+
+enum qed_dev_cap {
+       QED_DEV_CAP_ETH,
+       QED_DEV_CAP_ISCSI,
+       QED_DEV_CAP_ROCE,
+};
+
+struct qed_hw_info {
+       /* PCI personality */
+       enum qed_pci_personality        personality;
+
+       /* Resource Allocation scheme results */
+       u32                             resc_start[QED_MAX_RESC];
+       u32                             resc_num[QED_MAX_RESC];
+       u32                             feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+                                RESC_NUM(_p_hwfn, resc))
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+       u8                              num_tc;
+       u8                              offload_tc;
+       u8                              non_offload_tc;
+
+       u32                             concrete_fid;
+       u16                             opaque_fid;
+       u16                             ovlan;
+       u32                             part_num[4];
+
+       unsigned char                   hw_mac_addr[ETH_ALEN];
+
+       struct qed_igu_info             *p_igu_info;
+
+       u32                             port_mode;
+       u32                             hw_mode;
+       unsigned long           device_capabilities;
+};
+
+struct qed_hw_cid_data {
+       u32     cid;
+       bool    b_cid_allocated;
+
+       /* Additional identifiers */
+       u16     opaque_fid;
+       u8      vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE        0x2000
+
+struct qed_dmae_info {
+       /* Mutex for synchronizing access to functions */
+       struct mutex    mutex;
+
+       u8              channel;
+
+       dma_addr_t      completion_word_phys_addr;
+
+       /* The memory location where the DMAE writes the completion
+        * value when an operation is finished on this context.
+        */
+       u32             *p_completion_word;
+
+       dma_addr_t      intermediate_buffer_phys_addr;
+
+       /* An intermediate buffer for DMAE operations that use virtual
+        * addresses - data is DMA'd to/from this buffer and then
+        * memcpy'd to/from the virtual address
+        */
+       u32             *p_intermediate_buffer;
+
+       dma_addr_t      dmae_cmd_phys_addr;
+       struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_wfq_data {
+       /* when feature is configured for at least 1 vport */
+       u32     min_speed;
+       bool    configured;
+};
+
+struct qed_qm_info {
+       struct init_qm_pq_params        *qm_pq_params;
+       struct init_qm_vport_params     *qm_vport_params;
+       struct init_qm_port_params      *qm_port_params;
+       u16                             start_pq;
+       u8                              start_vport;
+       u8                              pure_lb_pq;
+       u8                              offload_pq;
+       u8                              pure_ack_pq;
+       u8 ooo_pq;
+       u8                              vf_queues_offset;
+       u16                             num_pqs;
+       u16                             num_vf_pqs;
+       u8                              num_vports;
+       u8                              max_phys_tcs_per_port;
+       bool                            pf_rl_en;
+       bool                            pf_wfq_en;
+       bool                            vport_rl_en;
+       bool                            vport_wfq_en;
+       u8                              pf_wfq;
+       u32                             pf_rl;
+       struct qed_wfq_data             *wfq_data;
+       u8 num_pf_rls;
+};
+
+struct storm_stats {
+       u32     address;
+       u32     len;
+};
+
+struct qed_storm_stats {
+       struct storm_stats mstats;
+       struct storm_stats pstats;
+       struct storm_stats tstats;
+       struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+       struct fw_ver_info      *fw_ver_info;
+       const u8                *modes_tree_buf;
+       union init_op           *init_ops;
+       const u32               *arr_data;
+       u32                     init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+       void    *token;
+       void    (*func)(void *);
+};
+
+struct qed_hwfn {
+       struct qed_dev                  *cdev;
+       u8                              my_id;          /* ID inside the PF */
+#define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
+       u8                              rel_pf_id;      /* Relative to engine*/
+       u8                              abs_pf_id;
+#define QED_PATH_ID(_p_hwfn)           ((_p_hwfn)->abs_pf_id & 1)
+       u8                              port_id;
+       bool                            b_active;
+
+       u32                             dp_module;
+       u8                              dp_level;
+       char                            name[NAME_SIZE];
+
+       bool                            first_on_engine;
+       bool                            hw_init_done;
+
+       u8                              num_funcs_on_engine;
+       u8 enabled_func_idx;
+
+       /* BAR access */
+       void __iomem                    *regview;
+       void __iomem                    *doorbells;
+       u64                             db_phys_addr;
+       unsigned long                   db_size;
+
+       /* PTT pool */
+       struct qed_ptt_pool             *p_ptt_pool;
+
+       /* HW info */
+       struct qed_hw_info              hw_info;
+
+       /* rt_array (for init-tool) */
+       struct qed_rt_data              rt_data;
+
+       /* SPQ */
+       struct qed_spq                  *p_spq;
+
+       /* EQ */
+       struct qed_eq                   *p_eq;
+
+       /* Consolidate Q*/
+       struct qed_consq                *p_consq;
+
+       /* Slow-Path definitions */
+       struct tasklet_struct           *sp_dpc;
+       bool                            b_sp_dpc_enabled;
+
+       struct qed_ptt                  *p_main_ptt;
+       struct qed_ptt                  *p_dpc_ptt;
+
+       struct qed_sb_sp_info           *p_sp_sb;
+       struct qed_sb_attn_info         *p_sb_attn;
+
+       /* Protocol related */
+       struct qed_pf_params            pf_params;
+
+       bool b_rdma_enabled_in_prs;
+       u32 rdma_prs_search_reg;
+
+       /* Array of sb_info of all status blocks */
+       struct qed_sb_info              *sbs_info[MAX_SB_PER_PF_MIMD];
+       u16                             num_sbs;
+
+       struct qed_cxt_mngr             *p_cxt_mngr;
+
+       /* Flag indicating whether interrupts are enabled or not*/
+       bool                            b_int_enabled;
+       bool                            b_int_requested;
+
+       /* True if the driver requests for the link */
+       bool                            b_drv_link_init;
+
+       struct qed_vf_iov               *vf_iov_info;
+       struct qed_pf_iov               *pf_iov_info;
+       struct qed_mcp_info             *mcp_info;
+
+       struct qed_dcbx_info            *p_dcbx_info;
+
+       struct qed_hw_cid_data          *p_tx_cids;
+       struct qed_hw_cid_data          *p_rx_cids;
+
+       struct qed_dmae_info            dmae_info;
+
+       /* QM init */
+       struct qed_qm_info              qm_info;
+       struct qed_storm_stats          storm_stats;
+
+       /* Buffer for unzipping firmware data */
+       void                            *unzip_buf;
+
+       struct qed_simd_fp_handler      simd_proto_handler[64];
+
+#ifdef CONFIG_QED_SRIOV
+       struct workqueue_struct *iov_wq;
+       struct delayed_work iov_task;
+       unsigned long iov_task_flags;
+#endif
+
+       struct z_stream_s               *stream;
+};
+
+struct pci_params {
+       int             pm_cap;
+
+       unsigned long   mem_start;
+       unsigned long   mem_end;
+       unsigned int    irq;
+       u8              pf_num;
+};
+
+struct qed_int_param {
+       u32     int_mode;
+       u8      num_vectors;
+       u8      min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+       struct qed_int_param    in;
+       struct qed_int_param    out;
+       struct msix_entry       *msix_table;
+       bool                    fp_initialized;
+       u8                      fp_msix_base;
+       u8                      fp_msix_cnt;
+};
+
+struct qed_dev {
+       u32     dp_module;
+       u8      dp_level;
+       char    name[NAME_SIZE];
+
+       u8      type;
+#define QED_DEV_TYPE_BB (0 << 0)
+#define QED_DEV_TYPE_AH BIT(0)
+/* Translate type/revision combo into the proper conditions */
+#define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
+#define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
+                                CHIP_REV_IS_A0(dev))
+#define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
+                                CHIP_REV_IS_B0(dev))
+
+#define QED_GET_TYPE(dev)       (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+                                QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+       u16     vendor_id;
+       u16     device_id;
+
+       u16     chip_num;
+#define CHIP_NUM_MASK                   0xffff
+#define CHIP_NUM_SHIFT                  16
+
+       u16     chip_rev;
+#define CHIP_REV_MASK                   0xf
+#define CHIP_REV_SHIFT                  12
+#define CHIP_REV_IS_A0(_cdev)   (!(_cdev)->chip_rev)
+#define CHIP_REV_IS_B0(_cdev)   ((_cdev)->chip_rev == 1)
+
+       u16                             chip_metal;
+#define CHIP_METAL_MASK                 0xff
+#define CHIP_METAL_SHIFT                4
+
+       u16                             chip_bond_id;
+#define CHIP_BOND_ID_MASK               0xf
+#define CHIP_BOND_ID_SHIFT              0
+
+       u8                              num_engines;
+       u8                              num_ports_in_engines;
+       u8                              num_funcs_in_port;
+
+       u8                              path_id;
+       enum qed_mf_mode                mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn)  (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
+#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
+
+       int                             pcie_width;
+       int                             pcie_speed;
+       u8                              ver_str[VER_SIZE];
+
+       /* Add MF related configuration */
+       u8                              mcp_rev;
+       u8                              boot_mode;
+
+       u8                              wol;
+
+       u32                             int_mode;
+       enum qed_coalescing_mode        int_coalescing_mode;
+       u16                             rx_coalesce_usecs;
+       u16                             tx_coalesce_usecs;
+
+       /* Start Bar offset of first hwfn */
+       void __iomem                    *regview;
+       void __iomem                    *doorbells;
+       u64                             db_phys_addr;
+       unsigned long                   db_size;
+
+       /* PCI */
+       u8                              cache_shift;
+
+       /* Init */
+       const struct iro                *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+       /* HW functions */
+       u8                              num_hwfns;
+       struct qed_hwfn                 hwfns[MAX_HWFNS_PER_DEVICE];
+
+       /* SRIOV */
+       struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)
+
+       unsigned long                   tunn_mode;
+
+       bool                            b_is_vf;
+       u32                             drv_type;
+
+       struct qed_eth_stats            *reset_stats;
+       struct qed_fw_data              *fw_data;
+
+       u32                             mcp_nvm_resp;
+
+       /* Linux specific here */
+       struct  qede_dev                *edev;
+       struct  pci_dev                 *pdev;
+       int                             msg_enable;
+
+       struct pci_params               pci_params;
+
+       struct qed_int_params           int_params;
+
+       u8                              protocol;
+#define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+       /* Callbacks to protocol driver */
+       union {
+               struct qed_common_cb_ops        *common;
+               struct qed_eth_cb_ops           *eth;
+       } protocol_ops;
+       void                            *ops_cookie;
+
+       const struct firmware           *firmware;
+};
+
+#define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev)  MAX_NUM_L2_QUEUES_BB
+#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ *        the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+                                       u32 concrete_fid)
+{
+       u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+       u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+       u8 vf_valid = GET_FIELD(concrete_fid,
+                               PXP_CONCRETE_FID_VFVALID);
+       u8 sw_fid;
+
+       if (vf_valid)
+               sw_fid = vfid + MAX_NUM_PFS;
+       else
+               sw_fid = pfid;
+
+       return sw_fid;
+}
+
+#define PURE_LB_TC 8
+#define OOO_LB_TC 9
+
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
+                                               (cdev->regview) + \
+                                                        (offset))
+
+#define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val)                    \
+       writel((u32)val, (void __iomem *)((u8 __iomem *)\
+                                         (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+                     struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+                  u32 input_len, u8 *input_buf,
+                  u32 max_size, u8 *unzip_buf);
+
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644 (file)
index 0000000..1c35f37
--- /dev/null
@@ -0,0 +1,2257 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES         PROTOCOLID_COMMON
+#define NUM_TASK_TYPES         2
+#define NUM_TASK_PF_SEGMENTS   4
+#define NUM_TASK_VF_SEGMENTS   1
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE     4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT         4
+#define DQ_RANGE_ALIGN         BIT(DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT        7
+#define TM_ALIGN        BIT(TM_SHIFT)
+#define TM_ELEM_SIZE    4
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE          3
+#define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg)  PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK                0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT       0
+#define ILT_ENTRY_VALID_MASK           0x1ULL
+#define ILT_ENTRY_VALID_SHIFT          52
+#define ILT_ENTRY_IN_REGS              2
+#define ILT_REG_SIZE_IN_BYTES          4
+
+/* connection context union */
+union conn_context {
+       struct core_conn_context core_ctx;
+       struct eth_conn_context eth_ctx;
+       struct iscsi_conn_context iscsi_ctx;
+       struct roce_conn_context roce_ctx;
+};
+
+/* TYPE-0 task context - iSCSI */
+union type0_task_context {
+       struct iscsi_task_context iscsi_ctx;
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+       struct rdma_task_context roce_ctx;
+};
+
+struct src_ent {
+       u8 opaque[56];
+       u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3    /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+       u32 count;
+       u8 type;
+       bool has_fl_mem;
+};
+
+struct qed_conn_type_cfg {
+       u32 cid_count;
+       u32 cid_start;
+       u32 cids_per_vf;
+       struct qed_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS      (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK               (0)
+#define SRQ_BLK                 (0)
+#define CDUT_SEG_BLK(n)         (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
+
+enum ilt_clients {
+       ILT_CLI_CDUC,
+       ILT_CLI_CDUT,
+       ILT_CLI_QM,
+       ILT_CLI_TM,
+       ILT_CLI_SRC,
+       ILT_CLI_TSDM,
+       ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+       u32 reg;
+       u32 val;
+};
+
+struct qed_ilt_cli_blk {
+       u32 total_size; /* 0 means not active */
+       u32 real_size_in_page;
+       u32 start_line;
+       u32 dynamic_line_cnt;
+};
+
+struct qed_ilt_client_cfg {
+       bool active;
+
+       /* ILT boundaries */
+       struct ilt_cfg_pair first;
+       struct ilt_cfg_pair last;
+       struct ilt_cfg_pair p_size;
+
+       /* ILT client blocks for PF */
+       struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+       u32 pf_total_lines;
+
+       /* ILT client blocks for VFs */
+       struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+       u32 vf_total_lines;
+};
+
+/* Per Path -
+ *      ILT shadow table
+ *      Protocol acquired CID lists
+ *      PF start line in ILT
+ */
+struct qed_dma_mem {
+       dma_addr_t p_phys;
+       void *p_virt;
+       size_t size;
+};
+
+struct qed_cid_acquired_map {
+       u32             start_cid;
+       u32             max_count;
+       unsigned long   *cid_map;
+};
+
+struct qed_cxt_mngr {
+       /* Per protocl configuration */
+       struct qed_conn_type_cfg        conn_cfg[MAX_CONN_TYPES];
+
+       /* computed ILT structure */
+       struct qed_ilt_client_cfg       clients[ILT_CLI_MAX];
+
+       /* Task type sizes */
+       u32 task_type_size[NUM_TASK_TYPES];
+
+       /* total number of VFs for this hwfn -
+        * ALL VFs are symmetric in terms of HW resources
+        */
+       u32                             vf_count;
+
+       /* total number of SRQ's for this hwfn */
+       u32 srq_count;
+
+       /* Acquired CIDs */
+       struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
+
+       /* ILT  shadow table */
+       struct qed_dma_mem              *ilt_shadow;
+       u32                             pf_start_line;
+
+       /* Mutex for a dynamic ILT allocation */
+       struct mutex mutex;
+
+       /* SRC T2 */
+       struct qed_dma_mem *t2;
+       u32 t2_num_pages;
+       u64 first_free;
+       u64 last_free;
+};
+static bool src_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_ISCSI ||
+              type == PROTOCOLID_ROCE;
+}
+
+static bool tm_cid_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_ISCSI ||
+              type == PROTOCOLID_ROCE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+                            struct qed_cdu_iids *iids)
+{
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct qed_src_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
+                            struct qed_src_iids *iids)
+{
+       u32 i;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               if (!src_proto(i))
+                       continue;
+
+               iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Timers block configuration */
+struct qed_tm_iids {
+       u32 pf_cids;
+       u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
+       u32 pf_tids_total;
+       u32 per_vf_cids;
+       u32 per_vf_tids;
+};
+
+static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+                           struct qed_tm_iids *iids)
+{
+       u32 i, j;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+               if (tm_cid_proto(i)) {
+                       iids->pf_cids += p_cfg->cid_count;
+                       iids->per_vf_cids += p_cfg->cids_per_vf;
+               }
+       }
+
+       iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
+       iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
+       iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
+
+       for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+               iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
+               iids->pf_tids_total += iids->pf_tids[j];
+       }
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+                           struct qed_qm_iids *iids)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_tid_seg *segs;
+       u32 vf_cids = 0, type, j;
+       u32 vf_tids = 0;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->cids += p_mngr->conn_cfg[type].cid_count;
+               vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+               segs = p_mngr->conn_cfg[type].tid_seg;
+               /* for each segment there is at most one
+                * protocol for which count is not 0.
+                */
+               for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+                       iids->tids += segs[j].count;
+
+               /* The last array elelment is for the VFs. As for PF
+                * segments there can be only one protocol for
+                * which this value is not 0.
+                */
+               vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+       }
+
+       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->tids += vf_tids * p_mngr->vf_count;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+                  iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
+                                               u32 seg)
+{
+       struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       /* Find the protocol with tid count > 0 for this segment.
+        * Note: there can only be one and this is already validated.
+        */
+       for (i = 0; i < MAX_CONN_TYPES; i++)
+               if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+                       return &p_cfg->conn_cfg[i].tid_seg[seg];
+       return NULL;
+}
+
+void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+{
+       struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+       p_mgr->srq_count = num_srqs;
+}
+
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+       return p_mgr->srq_count;
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+                                       enum protocol_type type,
+                                       u32 cid_count, u32 vf_cid_cnt)
+{
+       struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+       struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+       p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+       p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+       if (type == PROTOCOLID_ROCE) {
+               u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+               u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+               u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+
+               p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+       }
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn                *p_hwfn,
+                               enum protocol_type      type,
+                               u32                     *vf_cid)
+{
+       if (vf_cid)
+               *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+       return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type)
+{
+       return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type)
+{
+       u32 cnt = 0;
+       int i;
+
+       for (i = 0; i < TASK_SEGMENTS; i++)
+               cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+       return cnt;
+}
+
+static void
+qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+                           enum protocol_type proto,
+                           u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       p_seg->count = count;
+       p_seg->has_fl_mem = has_fl;
+       p_seg->type = seg_type;
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+                                struct qed_ilt_cli_blk *p_blk,
+                                u32 start_line, u32 total_size,
+                                u32 elem_size)
+{
+       u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       /* verify thatits called only once for each block */
+       if (p_blk->total_size)
+               return;
+
+       p_blk->total_size = total_size;
+       p_blk->real_size_in_page = 0;
+       if (elem_size)
+               p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+       p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+                                struct qed_ilt_client_cfg *p_cli,
+                                struct qed_ilt_cli_blk *p_blk,
+                                u32 *p_line, enum ilt_clients client_id)
+{
+       if (!p_blk->total_size)
+               return;
+
+       if (!p_cli->active)
+               p_cli->first.val = *p_line;
+
+       p_cli->active = true;
+       *p_line += DIV_ROUND_UP(p_blk->total_size,
+                               p_blk->real_size_in_page);
+       p_cli->last.val = *p_line - 1;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+                  client_id, p_cli->first.val,
+                  p_cli->last.val, p_blk->total_size,
+                  p_blk->real_size_in_page, p_blk->start_line);
+}
+
+static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
+                                       enum ilt_clients ilt_client)
+{
+       u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+       struct qed_ilt_client_cfg *p_cli;
+       u32 lines_to_skip = 0;
+       u32 cxts_per_p;
+
+       if (ilt_client == ILT_CLI_CDUC) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+               cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+                   (u32) CONN_CXT_SIZE(p_hwfn);
+
+               lines_to_skip = cid_count / cxts_per_p;
+       }
+
+       return lines_to_skip;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 curr_line, total, i, task_size, line;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_blk;
+       struct qed_cdu_iids cdu_iids;
+       struct qed_src_iids src_iids;
+       struct qed_qm_iids qm_iids;
+       struct qed_tm_iids tm_iids;
+       struct qed_tid_seg *p_seg;
+
+       memset(&qm_iids, 0, sizeof(qm_iids));
+       memset(&cdu_iids, 0, sizeof(cdu_iids));
+       memset(&src_iids, 0, sizeof(src_iids));
+       memset(&tm_iids, 0, sizeof(tm_iids));
+
+       p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+                  p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+       /* CDUC */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+       curr_line = p_mngr->pf_start_line;
+
+       /* CDUC PF */
+       p_cli->pf_total_lines = 0;
+
+       /* get the counters for the CDUC and QM clients  */
+       qed_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+       p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+       total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                            total, CONN_CXT_SIZE(p_hwfn));
+
+       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
+                                                              ILT_CLI_CDUC);
+
+       /* CDUC VF */
+       p_blk = &p_cli->vf_blks[CDUC_BLK];
+       total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                            total, CONN_CXT_SIZE(p_hwfn));
+
+       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+       for (i = 1; i < p_mngr->vf_count; i++)
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_CDUC);
+
+       /* CDUT PF */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       p_cli->first.val = curr_line;
+
+       /* first the 'working' task memory */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+               qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+                                    p_mngr->task_type_size[p_seg->type]);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_CDUT);
+       }
+
+       /* next the 'init' task memory (forced load memory) */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+               if (!p_seg->has_fl_mem) {
+                       /* The segment is active (total size pf 'working'
+                        * memory is > 0) but has no FL (forced-load, Init)
+                        * memory. Thus:
+                        *
+                        * 1.   The total-size in the corrsponding FL block of
+                        *      the ILT client is set to 0 - No ILT line are
+                        *      provisioned and no ILT memory allocated.
+                        *
+                        * 2.   The start-line of said block is set to the
+                        *      start line of the matching working memory
+                        *      block in the ILT client. This is later used to
+                        *      configure the CDU segment offset registers and
+                        *      results in an FL command for TIDs of this
+                        *      segement behaves as regular load commands
+                        *      (loading TIDs from the working memory).
+                        */
+                       line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+                       qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+                       continue;
+               }
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               qed_ilt_cli_blk_fill(p_cli, p_blk,
+                                    curr_line, total,
+                                    p_mngr->task_type_size[p_seg->type]);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_CDUT);
+       }
+       p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+       /* CDUT VF */
+       p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+       if (p_seg && p_seg->count) {
+               /* Stricly speaking we need to iterate over all VF
+                * task segment types, but a VF has only 1 segment
+                */
+
+               /* 'working' memory */
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+               qed_ilt_cli_blk_fill(p_cli, p_blk,
+                                    curr_line, total,
+                                    p_mngr->task_type_size[p_seg->type]);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_CDUT);
+
+               /* 'init' memory */
+               p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+               if (!p_seg->has_fl_mem) {
+                       /* see comment above */
+                       line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+                       qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+               } else {
+                       task_size = p_mngr->task_type_size[p_seg->type];
+                       qed_ilt_cli_blk_fill(p_cli, p_blk,
+                                            curr_line, total, task_size);
+                       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                            ILT_CLI_CDUT);
+               }
+               p_cli->vf_total_lines = curr_line -
+                   p_cli->vf_blks[0].start_line;
+
+               /* Now for the rest of the VFs */
+               for (i = 1; i < p_mngr->vf_count; i++) {
+                       p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+                       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                            ILT_CLI_CDUT);
+
+                       p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+                       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                            ILT_CLI_CDUT);
+               }
+       }
+
+       /* QM */
+       p_cli = &p_mngr->clients[ILT_CLI_QM];
+       p_blk = &p_cli->pf_blks[0];
+
+       qed_cxt_qm_iids(p_hwfn, &qm_iids);
+       total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+                                  qm_iids.vf_cids, qm_iids.tids,
+                                  p_hwfn->qm_info.num_pqs,
+                                  p_hwfn->qm_info.num_vf_pqs);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_ILT,
+                  "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+                  qm_iids.cids,
+                  qm_iids.vf_cids,
+                  qm_iids.tids,
+                  p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+       qed_ilt_cli_blk_fill(p_cli, p_blk,
+                            curr_line, total * 0x1000,
+                            QM_PQ_ELEMENT_SIZE);
+
+       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       /* SRC */
+       p_cli = &p_mngr->clients[ILT_CLI_SRC];
+       qed_cxt_src_iids(p_mngr, &src_iids);
+
+       /* Both the PF and VFs searcher connections are stored in the per PF
+        * database. Thus sum the PF searcher cids and all the VFs searcher
+        * cids.
+        */
+       total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (total) {
+               u32 local_max = max_t(u32, total,
+                                     SRC_MIN_NUM_ELEMS);
+
+               total = roundup_pow_of_two(local_max);
+
+               p_blk = &p_cli->pf_blks[0];
+               qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                    total * sizeof(struct src_ent),
+                                    sizeof(struct src_ent));
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_SRC);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM PF */
+       p_cli = &p_mngr->clients[ILT_CLI_TM];
+       qed_cxt_tm_iids(p_mngr, &tm_iids);
+       total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+       if (total) {
+               p_blk = &p_cli->pf_blks[0];
+               qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                    total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM VF */
+       total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+       if (total) {
+               p_blk = &p_cli->vf_blks[0];
+               qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                    total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+               for (i = 1; i < p_mngr->vf_count; i++)
+                       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                            ILT_CLI_TM);
+       }
+
+       /* TSDM (SRQ CONTEXT) */
+       total = qed_cxt_get_srq_count(p_hwfn);
+
+       if (total) {
+               p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                    total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+               qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                    ILT_CLI_TSDM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+           RESC_NUM(p_hwfn, QED_ILT)) {
+               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       if (!p_mngr->t2)
+               return;
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++)
+               if (p_mngr->t2[i].p_virt)
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         p_mngr->t2[i].size,
+                                         p_mngr->t2[i].p_virt,
+                                         p_mngr->t2[i].p_phys);
+
+       kfree(p_mngr->t2);
+       p_mngr->t2 = NULL;
+}
+
+static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_num, total_size, ent_per_page, psz, i;
+       struct qed_ilt_client_cfg *p_src;
+       struct qed_src_iids src_iids;
+       struct qed_dma_mem *p_t2;
+       int rc;
+
+       memset(&src_iids, 0, sizeof(src_iids));
+
+       /* if the SRC ILT client is inactive - there are no connection
+        * requiring the searcer, leave.
+        */
+       p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+       if (!p_src->active)
+               return 0;
+
+       qed_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       total_size = conn_num * sizeof(struct src_ent);
+
+       /* use the same page size as the SRC ILT client */
+       psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+       p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+       /* allocate t2 */
+       p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+                            GFP_KERNEL);
+       if (!p_mngr->t2) {
+               DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
+               rc = -ENOMEM;
+               goto t2_fail;
+       }
+
+       /* allocate t2 pages */
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 size = min_t(u32, total_size, psz);
+               void **p_virt = &p_mngr->t2[i].p_virt;
+
+               *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                            size,
+                                            &p_mngr->t2[i].p_phys, GFP_KERNEL);
+               if (!p_mngr->t2[i].p_virt) {
+                       rc = -ENOMEM;
+                       goto t2_fail;
+               }
+               memset(*p_virt, 0, size);
+               p_mngr->t2[i].size = size;
+               total_size -= size;
+       }
+
+       /* Set the t2 pointers */
+
+       /* entries per page - must be a power of two */
+       ent_per_page = psz / sizeof(struct src_ent);
+
+       p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+
+       p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+       p_mngr->last_free = (u64) p_t2->p_phys +
+           ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 ent_num = min_t(u32,
+                                   ent_per_page,
+                                   conn_num);
+               struct src_ent *entries = p_mngr->t2[i].p_virt;
+               u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+               u32 j;
+
+               for (j = 0; j < ent_num - 1; j++) {
+                       val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+                       entries[j].next = cpu_to_be64(val);
+               }
+
+               if (i < p_mngr->t2_num_pages - 1)
+                       val = (u64) p_mngr->t2[i + 1].p_phys;
+               else
+                       val = 0;
+               entries[j].next = cpu_to_be64(val);
+
+               conn_num -= ent_num;
+       }
+
+       return 0;
+
+t2_fail:
+       qed_cxt_src_t2_free(p_hwfn);
+       return rc;
+}
+
+#define for_each_ilt_valid_client(pos, clients)        \
+       for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+               if (!clients[pos].active) {     \
+                       continue;               \
+               } else                          \
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+       u32 size = 0;
+       u32 i;
+
+       for_each_ilt_valid_client(i, ilt_clients)
+           size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
+
+       return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 ilt_size, i;
+
+       ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+       for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+               struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+               if (p_dma->p_virt)
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         p_dma->size, p_dma->p_virt,
+                                         p_dma->p_phys);
+               p_dma->p_virt = NULL;
+       }
+       kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+                            struct qed_ilt_cli_blk *p_blk,
+                            enum ilt_clients ilt_client,
+                            u32 start_line_offset)
+{
+       struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+       u32 lines, line, sz_left, lines_to_skip = 0;
+
+       /* Special handling for RoCE that supports dynamic allocation */
+       if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+           ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
+               return 0;
+
+       lines_to_skip = p_blk->dynamic_line_cnt;
+
+       if (!p_blk->total_size)
+               return 0;
+
+       sz_left = p_blk->total_size;
+       lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+       line = p_blk->start_line + start_line_offset -
+           p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+       for (; lines; lines--) {
+               dma_addr_t p_phys;
+               void *p_virt;
+               u32 size;
+
+               size = min_t(u32, sz_left,
+                            p_blk->real_size_in_page);
+               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           size,
+                                           &p_phys,
+                                           GFP_KERNEL);
+               if (!p_virt)
+                       return -ENOMEM;
+               memset(p_virt, 0, size);
+
+               ilt_shadow[line].p_phys = p_phys;
+               ilt_shadow[line].p_virt = p_virt;
+               ilt_shadow[line].size = size;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                          "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+                           line, (u64)p_phys, p_virt, size);
+
+               sz_left -= size;
+               line++;
+       }
+
+       return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_ilt_client_cfg *clients = p_mngr->clients;
+       struct qed_ilt_cli_blk *p_blk;
+       u32 size, i, j, k;
+       int rc;
+
+       size = qed_cxt_ilt_shadow_size(clients);
+       p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+                                    GFP_KERNEL);
+       if (!p_mngr->ilt_shadow) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+               rc = -ENOMEM;
+               goto ilt_shadow_fail;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "Allocated 0x%x bytes for ilt shadow\n",
+                  (u32)(size * sizeof(struct qed_dma_mem)));
+
+       for_each_ilt_valid_client(i, clients) {
+               for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+                       p_blk = &clients[i].pf_blks[j];
+                       rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+                       if (rc != 0)
+                               goto ilt_shadow_fail;
+               }
+               for (k = 0; k < p_mngr->vf_count; k++) {
+                       for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+                               u32 lines = clients[i].vf_total_lines * k;
+
+                               p_blk = &clients[i].vf_blks[j];
+                               rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+                               if (rc != 0)
+                                       goto ilt_shadow_fail;
+                       }
+               }
+       }
+
+       return 0;
+
+ilt_shadow_fail:
+       qed_ilt_shadow_free(p_hwfn);
+       return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               kfree(p_mngr->acquired[type].cid_map);
+               p_mngr->acquired[type].max_count = 0;
+               p_mngr->acquired[type].start_cid = 0;
+       }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 start_cid = 0;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 size;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               size = DIV_ROUND_UP(cid_cnt,
+                                   sizeof(unsigned long) * BITS_PER_BYTE) *
+                      sizeof(unsigned long);
+               p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+               if (!p_mngr->acquired[type].cid_map)
+                       goto cid_map_fail;
+
+               p_mngr->acquired[type].max_count = cid_cnt;
+               p_mngr->acquired[type].start_cid = start_cid;
+
+               p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+                          "Type %08x start: %08x count %08x\n",
+                          type, p_mngr->acquired[type].start_cid,
+                          p_mngr->acquired[type].max_count);
+               start_cid += cid_cnt;
+       }
+
+       return 0;
+
+cid_map_fail:
+       qed_cid_map_free(p_hwfn);
+       return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients;
+       struct qed_cxt_mngr *p_mngr;
+       u32 i;
+
+       p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
+       if (!p_mngr) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize ILT client registers */
+       clients = p_mngr->clients;
+       clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+       clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+       clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+       clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+       clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+       clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+       clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+       clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+       clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+       clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+       clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+       clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+       clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+       clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+       clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+       clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+       clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+       clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
+       /* default ILT page size for all clients is 32K */
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+       /* Initialize task sizes */
+       p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+       p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+
+       if (p_hwfn->cdev->p_iov_info)
+               p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+       /* Initialize the dynamic ILT allocation mutex */
+       mutex_init(&p_mngr->mutex);
+
+       /* Set the cxt mangr pointer priori to further allocations */
+       p_hwfn->p_cxt_mngr = p_mngr;
+
+       return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+       int rc;
+
+       /* Allocate the ILT shadow table */
+       rc = qed_ilt_shadow_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate the T2  table */
+       rc = qed_cxt_src_t2_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate and initialize the acquired cids bitmaps */
+       rc = qed_cid_map_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+               goto tables_alloc_fail;
+       }
+
+       return 0;
+
+tables_alloc_fail:
+       qed_cxt_mngr_free(p_hwfn);
+       return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_cxt_mngr)
+               return;
+
+       qed_cid_map_free(p_hwfn);
+       qed_cxt_src_t2_free(p_hwfn);
+       qed_ilt_shadow_free(p_hwfn);
+       kfree(p_hwfn->p_cxt_mngr);
+
+       p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       int type;
+
+       /* Reset acquired cids */
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               memset(p_mngr->acquired[type].cid_map, 0,
+                      DIV_ROUND_UP(cid_cnt,
+                                   sizeof(unsigned long) * BITS_PER_BYTE) *
+                      sizeof(unsigned long));
+       }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+       CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+       CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT        \
+       CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK               \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+        CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK                   \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+        CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+       CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK                            \
+       (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+        CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK               \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+        CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK                   \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+        CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+       CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK                            \
+       (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+        CDUT_TYPE1_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+       u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+       /* CDUC - connection configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+       cxt_size = CONN_CXT_SIZE(p_hwfn);
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+       SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+       SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-0 tasks configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-1 tasks configuration */
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK           0x1
+#define CDU_SEG_REG_OFFSET_SHIFT        0
+#define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_tid_seg *p_seg;
+       u32 cdu_seg_params, offset;
+       int i;
+
+       static const u32 rt_type_offset_arr[] = {
+               CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       static const u32 rt_type_offset_fl_arr[] = {
+               CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+       /* There are initializations only for CDUT during pf Phase */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               /* Segment 0 */
+               p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg)
+                       continue;
+
+               /* Note: start_line is already adjusted for the CDU
+                * segment register granularity, so we just need to
+                * divide. Adjustment is implicit as we assume ILT
+                * Page size is larger than 32K!
+                */
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+       }
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_pf_rt_init_params params;
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_iids iids;
+
+       memset(&iids, 0, sizeof(iids));
+       qed_cxt_qm_iids(p_hwfn, &iids);
+
+       memset(&params, 0, sizeof(params));
+       params.port_id = p_hwfn->port_id;
+       params.pf_id = p_hwfn->rel_pf_id;
+       params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+       params.is_first_pf = p_hwfn->first_on_engine;
+       params.num_pf_cids = iids.cids;
+       params.num_vf_cids = iids.vf_cids;
+       params.start_pq = qm_info->start_pq;
+       params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+       params.num_vf_pqs = qm_info->num_vf_pqs;
+       params.start_vport = qm_info->start_vport;
+       params.num_vports = qm_info->num_vports;
+       params.pf_wfq = qm_info->pf_wfq;
+       params.pf_rl = qm_info->pf_rl;
+       params.pq_params = qm_info->qm_pq_params;
+       params.vport_params = qm_info->qm_vport_params;
+
+       qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+       union qed_qm_pq_params pq_params;
+       u16 pq;
+
+       /* XCM pure-LB queue */
+       memset(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+       return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+       /* Connection types 6 & 7 are not in use, yet they must be configured
+        * as the highest possible connection. Not configuring them means the
+        * defaults will be  used, and with a large number of cids a bug may
+        * occur, if the defaults will be smaller than dq_pf_max_cid /
+        * dq_vf_max_cid.
+        */
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *ilt_clients;
+       int i;
+
+       ilt_clients = p_hwfn->p_cxt_mngr->clients;
+       for_each_ilt_valid_client(i, ilt_clients) {
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].first.reg,
+                            ilt_clients[i].first.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].last.reg, ilt_clients[i].last.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].p_size.reg,
+                            ilt_clients[i].p_size.val);
+       }
+}
+
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *p_cli;
+       u32 blk_factor;
+
+       /* For simplicty  we set the 'block' to be an ILT page */
+       if (p_hwfn->cdev->p_iov_info) {
+               struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_VF_BASE_RT_OFFSET,
+                            p_iov->first_vf_in_pf);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+                            p_iov->first_vf_in_pf + p_iov->total_vfs);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+       blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+       blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+       blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients;
+       struct qed_cxt_mngr *p_mngr;
+       struct qed_dma_mem *p_shdw;
+       u32 line, rt_offst, i;
+
+       qed_ilt_bounds_init(p_hwfn);
+       qed_ilt_vf_bounds_init(p_hwfn);
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_shdw = p_mngr->ilt_shadow;
+       clients = p_hwfn->p_cxt_mngr->clients;
+
+       for_each_ilt_valid_client(i, clients) {
+               /** Client's 1st val and RT array are absolute, ILT shadows'
+                *  lines are relative.
+                */
+               line = clients[i].first.val - p_mngr->pf_start_line;
+               rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+                          clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+               for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+                    line++, rt_offst += ILT_ENTRY_IN_REGS) {
+                       u64 ilt_hw_entry = 0;
+
+                       /** p_virt could be NULL incase of dynamic
+                        *  allocation
+                        */
+                       if (p_shdw[line].p_virt) {
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+                                         (p_shdw[line].p_phys >> 12));
+
+                               DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                                          "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+                                          rt_offst, line, i,
+                                          (u64)(p_shdw[line].p_phys >> 12));
+                       }
+
+                       STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+               }
+       }
+}
+
+/* SRC (Searcher) PF */
+static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rounded_conn_num, conn_num, conn_max;
+       struct qed_src_iids src_iids;
+
+       memset(&src_iids, 0, sizeof(src_iids));
+       qed_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (!conn_num)
+               return;
+
+       conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
+       rounded_conn_num = roundup_pow_of_two(conn_max);
+
+       STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+       STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+                    ilog2(rounded_conn_num));
+
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->first_free);
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT            0
+#define TM_CFG_NUM_IDS_MASK             0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT          25
+#define TM_CFG_PARENT_PF_MASK           0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT         30
+#define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
+
+static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 active_seg_mask = 0, tm_offset, rt_reg;
+       struct qed_tm_iids tm_iids;
+       u64 cfg_word;
+       u8 i;
+
+       memset(&tm_iids, 0, sizeof(tm_iids));
+       qed_cxt_tm_iids(p_mngr, &tm_iids);
+
+       /* @@@TBD No pre-scan for now */
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+               rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       cfg_word = 0;
+       SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+       SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+       SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
+       SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
+
+       rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+           (sizeof(cfg_word) / sizeof(u32)) *
+           (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
+       STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+       /* enale scan */
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+                    tm_iids.pf_cids ? 0x1 : 0x0);
+
+       /* @@@TBD how to enable the scan for the VFs */
+
+       tm_offset = tm_iids.per_vf_cids;
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       tm_offset = tm_iids.pf_cids;
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (NUM_OF_VFS(p_hwfn->cdev) +
+                    p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+               active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+               tm_offset += tm_iids.pf_tids[i];
+       }
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+               active_seg_mask = 0;
+
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+       /* @@@TBD how to enable the scan for the VFs */
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+       qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+       qed_qm_init_pf(p_hwfn);
+       qed_cm_init_pf(p_hwfn);
+       qed_dq_init_pf(p_hwfn);
+       qed_cdu_init_pf(p_hwfn);
+       qed_ilt_init_pf(p_hwfn);
+       qed_src_init_pf(p_hwfn);
+       qed_tm_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+                       enum protocol_type type,
+                       u32 *p_cid)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rel_cid;
+
+       if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+               DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+               return -EINVAL;
+       }
+
+       rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+                                     p_mngr->acquired[type].max_count);
+
+       if (rel_cid >= p_mngr->acquired[type].max_count) {
+               DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+                         type);
+               return -EINVAL;
+       }
+
+       __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+       *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+       return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+                                     u32 cid,
+                                     enum protocol_type *p_type)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_cid_acquired_map *p_map;
+       enum protocol_type p;
+       u32 rel_cid;
+
+       /* Iterate over protocols and find matching cid range */
+       for (p = 0; p < MAX_CONN_TYPES; p++) {
+               p_map = &p_mngr->acquired[p];
+
+               if (!p_map->cid_map)
+                       continue;
+               if (cid >= p_map->start_cid &&
+                   cid < p_map->start_cid + p_map->max_count)
+                       break;
+       }
+       *p_type = p;
+
+       if (p == MAX_CONN_TYPES) {
+               DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+               return false;
+       }
+
+       rel_cid = cid - p_map->start_cid;
+       if (!test_bit(rel_cid, p_map->cid_map)) {
+               DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+               return false;
+       }
+       return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+                        u32 cid)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       enum protocol_type type;
+       bool b_acquired;
+       u32 rel_cid;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+       if (!b_acquired)
+               return;
+
+       rel_cid = cid - p_mngr->acquired[type].start_cid;
+       __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+                        struct qed_cxt_info *p_info)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+       enum protocol_type type;
+       bool b_acquired;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+       if (!b_acquired)
+               return -EINVAL;
+
+       /* set the protocl type */
+       p_info->type = type;
+
+       /* compute context virtual pointer */
+       hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+       conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+       cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+       line = p_info->iid / cxts_per_p;
+
+       /* Make sure context is allocated (dynamic allocation) */
+       if (!p_mngr->ilt_shadow[line].p_virt)
+               return -EINVAL;
+
+       p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+                       p_info->iid % cxts_per_p * conn_cxt_size;
+
+       DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+                  "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+                  p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+       return 0;
+}
+
+void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+                           struct qed_rdma_pf_params *p_params)
+{
+       u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+       enum protocol_type proto;
+
+       num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
+       num_tasks = num_mrs;    /* each mr uses a single task id */
+       num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH_ROCE:
+               num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
+               num_cons = num_qps * 2; /* each QP requires two connections */
+               proto = PROTOCOLID_ROCE;
+               break;
+       default:
+               return;
+       }
+
+       if (num_cons && num_tasks) {
+               qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
+
+               /* Deliberatly passing ROCE for tasks id. This is because
+                * iWARP / RoCE share the task id.
+                */
+               qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
+                                           QED_CXT_ROCE_TID_SEG, 1,
+                                           num_tasks, false);
+               qed_cxt_set_srq_count(p_hwfn, num_srqs);
+       } else {
+               DP_INFO(p_hwfn->cdev,
+                       "RDMA personality used without setting params!\n");
+       }
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+       /* Set the number of required CORE connections */
+       u32 core_cids = 1; /* SPQ */
+
+       qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH_ROCE:
+       {
+               qed_rdma_set_pf_params(p_hwfn,
+                                      &p_hwfn->
+                                      pf_params.rdma_pf_params);
+               /* no need for break since RoCE coexist with Ethernet */
+       }
+       case QED_PCI_ETH:
+       {
+               struct qed_eth_pf_params *p_params =
+                   &p_hwfn->pf_params.eth_pf_params;
+
+               qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+                                           p_params->num_cons, 1);
+               break;
+       }
+       case QED_PCI_ISCSI:
+       {
+               struct qed_iscsi_pf_params *p_params;
+
+               p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+               if (p_params->num_cons && p_params->num_tasks) {
+                       qed_cxt_set_proto_cid_count(p_hwfn,
+                                                   PROTOCOLID_ISCSI,
+                                                   p_params->num_cons,
+                                                   0);
+
+                       qed_cxt_set_proto_tid_count(p_hwfn,
+                                                   PROTOCOLID_ISCSI,
+                                                   QED_CXT_ISCSI_TID_SEG,
+                                                   0,
+                                                   p_params->num_tasks,
+                                                   true);
+               } else {
+                       DP_INFO(p_hwfn->cdev,
+                               "Iscsi personality used without setting params!\n");
+               }
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+                            struct qed_tid_mem *p_info)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 proto, seg, total_lines, i, shadow_line;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_fl_seg;
+       struct qed_tid_seg *p_seg_info;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ISCSI:
+               proto = PROTOCOLID_ISCSI;
+               seg = QED_CXT_ISCSI_TID_SEG;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return -EINVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+       if (!p_seg_info->has_fl_mem)
+               return -EINVAL;
+
+       p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+                                  p_fl_seg->real_size_in_page);
+
+       for (i = 0; i < total_lines; i++) {
+               shadow_line = i + p_fl_seg->start_line -
+                   p_hwfn->p_cxt_mngr->pf_start_line;
+               p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+       }
+       p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+           p_fl_seg->real_size_in_page;
+       p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+       p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+           p_info->tid_size;
+
+       return 0;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+int
+qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+                         enum qed_cxt_elem_type elem_type, u32 iid)
+{
+       u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_blk;
+       struct qed_ptt *p_ptt;
+       dma_addr_t p_phys;
+       u64 ilt_hw_entry;
+       void *p_virt;
+       int rc = 0;
+
+       switch (elem_type) {
+       case QED_ELEM_CXT:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+               elem_size = CONN_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUC_BLK];
+               break;
+       case QED_ELEM_SRQ:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+               elem_size = SRQ_CXT_SIZE;
+               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               break;
+       case QED_ELEM_TASK:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+               elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+               return -EINVAL;
+       }
+
+       /* Calculate line in ilt */
+       hw_p_size = p_cli->p_size.val;
+       elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+       line = p_blk->start_line + (iid / elems_per_p);
+       shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+       /* If line is already allocated, do nothing, otherwise allocate it and
+        * write it to the PSWRQ2 registers.
+        * This section can be run in parallel from different contexts and thus
+        * a mutex protection is needed.
+        */
+
+       mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
+
+       if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+               goto out0;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn,
+                         "QED_TIME_OUT on ptt acquire - dynamic allocation");
+               rc = -EBUSY;
+               goto out0;
+       }
+
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   p_blk->real_size_in_page,
+                                   &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               rc = -ENOMEM;
+               goto out1;
+       }
+       memset(p_virt, 0, p_blk->real_size_in_page);
+
+       /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
+        * to compensate for a HW bug, but it is configured even if DIF is not
+        * enabled. This is harmless and allows us to avoid a dedicated API. We
+        * configure the field for all of the contexts on the newly allocated
+        * page.
+        */
+       if (elem_type == QED_ELEM_TASK) {
+               u32 elem_i;
+               u8 *elem_start = (u8 *)p_virt;
+               union type1_task_context *elem;
+
+               for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
+                       elem = (union type1_task_context *)elem_start;
+                       SET_FIELD(elem->roce_ctx.tdif_context.flags1,
+                                 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+                       elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
+               }
+       }
+
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+           p_blk->real_size_in_page;
+
+       /* compute absolute offset */
+       reg_offset = PSWRQ2_REG_ILT_MEMORY +
+           (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+       ilt_hw_entry = 0;
+       SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+       SET_FIELD(ilt_hw_entry,
+                 ILT_ENTRY_PHY_ADDR,
+                 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+       /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+       qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
+                         reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+
+       if (elem_type == QED_ELEM_CXT) {
+               u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+                   elems_per_p;
+
+               /* Update the relevant register in the parser */
+               qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+                      last_cid_allocated - 1);
+
+               if (!p_hwfn->b_rdma_enabled_in_prs) {
+                       /* Enable RoCE search */
+                       qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+                       p_hwfn->b_rdma_enabled_in_prs = true;
+               }
+       }
+
+out1:
+       qed_ptt_release(p_hwfn, p_ptt);
+out0:
+       mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
+
+       return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static int
+qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
+                      enum qed_cxt_elem_type elem_type,
+                      u32 start_iid, u32 count)
+{
+       u32 start_line, end_line, shadow_start_line, shadow_end_line;
+       u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_blk;
+       u32 end_iid = start_iid + count;
+       struct qed_ptt *p_ptt;
+       u64 ilt_hw_entry = 0;
+       u32 i;
+
+       switch (elem_type) {
+       case QED_ELEM_CXT:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+               elem_size = CONN_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUC_BLK];
+               break;
+       case QED_ELEM_SRQ:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+               elem_size = SRQ_CXT_SIZE;
+               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               break;
+       case QED_ELEM_TASK:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+               elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+               return -EINVAL;
+       }
+
+       /* Calculate line in ilt */
+       hw_p_size = p_cli->p_size.val;
+       elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+       start_line = p_blk->start_line + (start_iid / elems_per_p);
+       end_line = p_blk->start_line + (end_iid / elems_per_p);
+       if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+               end_line--;
+
+       shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+       shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn,
+                         "QED_TIME_OUT on ptt acquire - dynamic allocation");
+               return -EBUSY;
+       }
+
+       for (i = shadow_start_line; i < shadow_end_line; i++) {
+               if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+                       continue;
+
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
+                                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+                                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+               /* compute absolute offset */
+               reg_offset = PSWRQ2_REG_ILT_MEMORY +
+                   ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+                    ILT_ENTRY_IN_REGS);
+
+               /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+                * wide-bus.
+                */
+               qed_dmae_host2grc(p_hwfn, p_ptt,
+                                 (u64) (uintptr_t) &ilt_hw_entry,
+                                 reg_offset,
+                                 sizeof(ilt_hw_entry) / sizeof(u32),
+                                 0);
+       }
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
+{
+       int rc;
+       u32 cid;
+
+       /* Free Connection CXT */
+       rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
+                                   qed_cxt_get_proto_cid_start(p_hwfn,
+                                                               proto),
+                                   qed_cxt_get_proto_cid_count(p_hwfn,
+                                                               proto, &cid));
+
+       if (rc)
+               return rc;
+
+       /* Free Task CXT */
+       rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
+                                   qed_cxt_get_proto_tid_count(p_hwfn, proto));
+       if (rc)
+               return rc;
+
+       /* Free TSDM CXT */
+       rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
+                                   qed_cxt_get_srq_count(p_hwfn));
+
+       return rc;
+}
+
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+                        u32 tid, u8 ctx_type, void **pp_task_ctx)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_seg;
+       struct qed_tid_seg *p_seg_info;
+       u32 proto, seg;
+       u32 total_lines;
+       u32 tid_size, ilt_idx;
+       u32 num_tids_per_block;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ISCSI:
+               proto = PROTOCOLID_ISCSI;
+               seg = QED_CXT_ISCSI_TID_SEG;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return -EINVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       if (ctx_type == QED_CTX_WORKING_MEM) {
+               p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+       } else if (ctx_type == QED_CTX_FL_MEM) {
+               if (!p_seg_info->has_fl_mem)
+                       return -EINVAL;
+               p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       } else {
+               return -EINVAL;
+       }
+       total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+       tid_size = p_mngr->task_type_size[p_seg_info->type];
+       num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+       if (total_lines < tid / num_tids_per_block)
+               return -EINVAL;
+
+       ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+                 p_mngr->pf_start_line;
+       *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+                      (tid % num_tids_per_block) * tid_size;
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644 (file)
index 0000000..c6f6f2e
--- /dev/null
@@ -0,0 +1,176 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+       void                    *p_cxt;
+       u32                     iid;
+       enum protocol_type      type;
+};
+
+#define MAX_TID_BLOCKS                  512
+struct qed_tid_mem {
+       u32 tid_size;
+       u32 num_tids_per_block;
+       u32 waste;
+       u8 *blocks[MAX_TID_BLOCKS];     /* 4K */
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+                       enum protocol_type type,
+                       u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+                        struct qed_cxt_info *p_info);
+
+/**
+ * @brief qed_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return int
+ */
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+                            struct qed_tid_mem *p_info);
+
+#define QED_CXT_ISCSI_TID_SEG  PROTOCOLID_ISCSI
+#define QED_CXT_ROCE_TID_SEG   PROTOCOLID_ROCE
+enum qed_cxt_elem_type {
+       QED_ELEM_CXT,
+       QED_ELEM_SRQ,
+       QED_ELEM_TASK
+};
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type, u32 *vf_cid);
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+                        u32 cid);
+
+#define QED_CTX_WORKING_MEM 0
+#define QED_CTX_FL_MEM 1
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644 (file)
index 0000000..3656d2f
--- /dev/null
@@ -0,0 +1,2313 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/dcbnl.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
+
+#define QED_DCBX_MAX_MIB_READ_TRY       (100)
+#define QED_ETH_TYPE_DEFAULT            (0)
+#define QED_ETH_TYPE_ROCE               (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2       (0x12B7)
+#define QED_ETH_TYPE_FCOE               (0x8906)
+#define QED_TCP_PORT_ISCSI              (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY       0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+       ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+       {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+       return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+                 DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+       u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+       /* Old MFW */
+       if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+               return qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+       return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+                 DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+       u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+       /* Old MFW */
+       if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+               return qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool port;
+
+       if (ieee)
+               port = qed_dcbx_ieee_app_port(app_info_bitmap,
+                                             DCBX_APP_SF_IEEE_TCP_PORT);
+       else
+               port = qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool ethtype;
+
+       if (ieee)
+               ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+       else
+               ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+       return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+       bool port;
+
+       if (ieee)
+               port = qed_dcbx_ieee_app_port(app_info_bitmap,
+                                             DCBX_APP_SF_IEEE_UDP_PORT);
+       else
+               port = qed_dcbx_app_port(app_info_bitmap);
+
+       return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+       enum dcbx_protocol_type id;
+       int i;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+                  p_data->dcbx_enabled);
+
+       for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+               id = qed_dcbx_app_update[i].id;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                          "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+                          qed_dcbx_app_update[i].name, p_data->arr[id].update,
+                          p_data->arr[id].enable, p_data->arr[id].priority,
+                          p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+       }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+                   struct qed_hw_info *p_info,
+                   bool enable,
+                   bool update,
+                   u8 prio,
+                   u8 tc,
+                   enum dcbx_protocol_type type,
+                   enum qed_pci_personality personality)
+{
+       /* PF update ramrod data */
+       p_data->arr[type].update = update;
+       p_data->arr[type].enable = enable;
+       p_data->arr[type].priority = prio;
+       p_data->arr[type].tc = tc;
+
+       /* QM reconf data */
+       if (p_info->personality == personality) {
+               if (personality == QED_PCI_ETH)
+                       p_info->non_offload_tc = tc;
+               else
+                       p_info->offload_tc = tc;
+       }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+                        struct qed_hwfn *p_hwfn,
+                        bool enable,
+                        bool update,
+                        u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+       struct qed_hw_info *p_info = &p_hwfn->hw_info;
+       enum qed_pci_personality personality;
+       enum dcbx_protocol_type id;
+       char *name;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+               id = qed_dcbx_app_update[i].id;
+
+               if (type != id)
+                       continue;
+
+               personality = qed_dcbx_app_update[i].personality;
+               name = qed_dcbx_app_update[i].name;
+
+               qed_dcbx_set_params(p_data, p_info, enable, update,
+                                   prio, tc, type, personality);
+       }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+                              u32 app_prio_bitmap,
+                              u16 id, enum dcbx_protocol_type *type, bool ieee)
+{
+       if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
+               *type = DCBX_PROTOCOL_FCOE;
+       } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
+               *type = DCBX_PROTOCOL_ROCE;
+       } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
+               *type = DCBX_PROTOCOL_ISCSI;
+       } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+               *type = DCBX_PROTOCOL_ETH;
+       } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
+               *type = DCBX_PROTOCOL_ROCE_V2;
+       } else {
+               *type = DCBX_MAX_PROTOCOL_TYPE;
+               DP_ERR(p_hwfn,
+                      "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+                      id, app_prio_bitmap);
+               return false;
+       }
+
+       return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+                    struct qed_dcbx_results *p_data,
+                    struct dcbx_app_priority_entry *p_tbl,
+                    u32 pri_tc_tbl, int count, u8 dcbx_version)
+{
+       u8 tc, priority_map;
+       enum dcbx_protocol_type type;
+       bool enable, ieee;
+       u16 protocol_id;
+       int priority;
+       int i;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+       ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+       /* Parse APP TLV */
+       for (i = 0; i < count; i++) {
+               protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                               DCBX_APP_PROTOCOL_ID);
+               priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                DCBX_APP_PRI_MAP);
+               priority = ffs(priority_map) - 1;
+               if (priority < 0) {
+                       DP_ERR(p_hwfn, "Invalid priority\n");
+                       return -EINVAL;
+               }
+
+               tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+               if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+                                                  protocol_id, &type, ieee)) {
+                       /* ETH always have the enable bit reset, as it gets
+                        * vlan information per packet. For other protocols,
+                        * should be set according to the dcbx_enabled
+                        * indication, but we only got here if there was an
+                        * app tlv for the protocol, so dcbx must be enabled.
+                        */
+                       enable = !(type == DCBX_PROTOCOL_ETH);
+
+                       qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+                                                priority, tc, type);
+               }
+       }
+
+       /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+        * data for RoCE-v2 not the default app data.
+        */
+       if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+           p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+               tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+               priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+               qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+                                        priority, tc, DCBX_PROTOCOL_ROCE_V2);
+       }
+
+       /* Update ramrod protocol data and hw_info fields
+        * with default info when corresponding APP TLV's are not detected.
+        * The enabled field has a different logic for ethernet as only for
+        * ethernet dcb should disabled by default, as the information arrives
+        * from the OS (unless an explicit app tlv was present).
+        */
+       tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+       priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+       for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+               if (p_data->arr[type].update)
+                       continue;
+
+               enable = !(type == DCBX_PROTOCOL_ETH);
+               qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+                                        priority, tc, type);
+       }
+
+       return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+       struct dcbx_app_priority_feature *p_app;
+       struct dcbx_app_priority_entry *p_tbl;
+       struct qed_dcbx_results data = { 0 };
+       struct dcbx_ets_feature *p_ets;
+       struct qed_hw_info *p_info;
+       u32 pri_tc_tbl, flags;
+       u8 dcbx_version;
+       int num_entries;
+       int rc = 0;
+
+       flags = p_hwfn->p_dcbx_info->operational.flags;
+       dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+       p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+       p_tbl = p_app->app_pri_tbl;
+
+       p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+       pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+       p_info = &p_hwfn->hw_info;
+       num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+       rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+                                 num_entries, dcbx_version);
+       if (rc)
+               return rc;
+
+       p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+       data.pf_id = p_hwfn->rel_pf_id;
+       data.dcbx_enabled = !!dcbx_version;
+
+       qed_dcbx_dp_protocol(p_hwfn, &data);
+
+       memcpy(&p_hwfn->p_dcbx_info->results, &data,
+              sizeof(struct qed_dcbx_results));
+
+       return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt,
+                 struct qed_dcbx_mib_meta_data *p_data,
+                 enum qed_mib_read_type type)
+{
+       u32 prefix_seq_num, suffix_seq_num;
+       int read_count = 0;
+       int rc = 0;
+
+       /* The data is considered to be valid only if both sequence numbers are
+        * the same.
+        */
+       do {
+               if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+                       qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+                                       p_data->addr, p_data->size);
+                       prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+                       suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+               } else {
+                       qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+                                       p_data->addr, p_data->size);
+                       prefix_seq_num = p_data->mib->prefix_seq_num;
+                       suffix_seq_num = p_data->mib->suffix_seq_num;
+               }
+               read_count++;
+
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DCB,
+                          "mib type = %d, try count = %d prefix seq num  = %d suffix seq num = %d\n",
+                          type, read_count, prefix_seq_num, suffix_seq_num);
+       } while ((prefix_seq_num != suffix_seq_num) &&
+                (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+       if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+               DP_ERR(p_hwfn,
+                      "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+                      type, read_count, prefix_seq_num, suffix_seq_num);
+               rc = -EIO;
+       }
+
+       return rc;
+}
+
+#ifdef CONFIG_DCB
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+                          struct qed_dcbx_app_prio *p_prio,
+                          struct qed_dcbx_results *p_results)
+{
+       u8 val;
+
+       p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+       p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+       p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+       p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+       if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+           p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+               p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+       if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+           p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+               val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+               p_prio->roce_v2 = val;
+       }
+
+       if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+           p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+               p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+       if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+           p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+               p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+       if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+           p_results->arr[DCBX_PROTOCOL_ETH].enable)
+               p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                  "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+                  p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+                  p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+                     struct dcbx_app_priority_feature *p_app,
+                     struct dcbx_app_priority_entry *p_tbl,
+                     struct qed_dcbx_params *p_params, bool ieee)
+{
+       struct qed_app_entry *entry;
+       u8 pri_map;
+       int i;
+
+       p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+                                                 DCBX_APP_WILLING);
+       p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+       p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+       p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+                                                     DCBX_APP_NUM_ENTRIES);
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &p_params->app_entry[i];
+               if (ieee) {
+                       u8 sf_ieee;
+                       u32 val;
+
+                       sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                   DCBX_APP_SF_IEEE);
+                       switch (sf_ieee) {
+                       case DCBX_APP_SF_IEEE_RESERVED:
+                               /* Old MFW */
+                               val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                       DCBX_APP_SF);
+                               entry->sf_ieee = val ?
+                                   QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+                                   QED_DCBX_SF_IEEE_ETHTYPE;
+                               break;
+                       case DCBX_APP_SF_IEEE_ETHTYPE:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+                               break;
+                       case DCBX_APP_SF_IEEE_TCP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+                               break;
+                       case DCBX_APP_SF_IEEE_UDP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+                               break;
+                       case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+                               entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+                               break;
+                       }
+               } else {
+                       entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                            DCBX_APP_SF));
+               }
+
+               pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+               entry->prio = ffs(pri_map) - 1;
+               entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+                                                   DCBX_APP_PROTOCOL_ID);
+               qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+                                              entry->proto_id,
+                                              &entry->proto_type, ieee);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                  "APP params: willing %d, valid %d error = %d\n",
+                  p_params->app_willing, p_params->app_valid,
+                  p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+                     u32 pfc, struct qed_dcbx_params *p_params)
+{
+       u8 pfc_map;
+
+       p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+       p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+       p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+       pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+       p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+       p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+       p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+       p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+       p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+       p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+       p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+       p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                  "PFC params: willing %d, pfc_bitmap %d\n",
+                  p_params->pfc.willing, pfc_map);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+                     struct dcbx_ets_feature *p_ets,
+                     struct qed_dcbx_params *p_params)
+{
+       u32 bw_map[2], tsa_map[2], pri_map;
+       int i;
+
+       p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+                                                 DCBX_ETS_WILLING);
+       p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+                                                 DCBX_ETS_ENABLED);
+       p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+       p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+                                                DCBX_ETS_MAX_TCS);
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                  "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+                  p_params->ets_willing,
+                  p_params->ets_cbs,
+                  p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+
+       /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+        * encoded in a type u32 array of size 2.
+        */
+       bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
+       bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
+       tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
+       tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+       pri_map = p_ets->pri_tc_tbl[0];
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+               p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+               p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+               p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+               DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                          "elem %d  bw_tbl %x tsa_tbl %x\n",
+                          i, p_params->ets_tc_bw_tbl[i],
+                          p_params->ets_tc_tsa_tbl[i]);
+       }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+                          struct dcbx_app_priority_feature *p_app,
+                          struct dcbx_app_priority_entry *p_tbl,
+                          struct dcbx_ets_feature *p_ets,
+                          u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
+{
+       qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+       qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+       qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+       struct dcbx_features *p_feat;
+
+       p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+       qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+                                  p_feat->app.app_pri_tbl, &p_feat->ets,
+                                  p_feat->pfc, &params->local.params, false);
+       params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+       struct dcbx_features *p_feat;
+
+       p_feat = &p_hwfn->p_dcbx_info->remote.features;
+       qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+                                  p_feat->app.app_pri_tbl, &p_feat->ets,
+                                  p_feat->pfc, &params->remote.params, false);
+       params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               struct qed_dcbx_get *params)
+{
+       struct qed_dcbx_operational_params *p_operational;
+       struct qed_dcbx_results *p_results;
+       struct dcbx_features *p_feat;
+       bool enabled, err;
+       u32 flags;
+       bool val;
+
+       flags = p_hwfn->p_dcbx_info->operational.flags;
+
+       /* If DCBx version is non zero, then negotiation
+        * was successfuly performed
+        */
+       p_operational = &params->operational;
+       enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+                    DCBX_CONFIG_VERSION_DISABLED);
+       if (!enabled) {
+               p_operational->enabled = enabled;
+               p_operational->valid = false;
+               return;
+       }
+
+       p_feat = &p_hwfn->p_dcbx_info->operational.features;
+       p_results = &p_hwfn->p_dcbx_info->results;
+
+       val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+                DCBX_CONFIG_VERSION_IEEE);
+       p_operational->ieee = val;
+       val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+                DCBX_CONFIG_VERSION_CEE);
+       p_operational->cee = val;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
+                  p_operational->ieee, p_operational->cee);
+
+       qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+                                  p_feat->app.app_pri_tbl, &p_feat->ets,
+                                  p_feat->pfc, &params->operational.params,
+                                  p_operational->ieee);
+       qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+       err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+       p_operational->err = err;
+       p_operational->enabled = enabled;
+       p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              struct qed_dcbx_get *params)
+{
+       struct lldp_config_params_s *p_local;
+
+       p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+       memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+              ARRAY_SIZE(p_local->local_chassis_id));
+       memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+              ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               struct qed_dcbx_get *params)
+{
+       struct lldp_status_params_s *p_remote;
+
+       p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+       memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+              ARRAY_SIZE(p_remote->peer_chassis_id));
+       memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+              ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                   struct qed_dcbx_get *p_params,
+                   enum qed_mib_read_type type)
+{
+       switch (type) {
+       case QED_DCBX_REMOTE_MIB:
+               qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+               break;
+       case QED_DCBX_LOCAL_MIB:
+               qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+               break;
+       case QED_DCBX_OPERATIONAL_MIB:
+               qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+               break;
+       case QED_DCBX_REMOTE_LLDP_MIB:
+               qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+               break;
+       case QED_DCBX_LOCAL_LLDP_MIB:
+               qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+               break;
+       default:
+               DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+#endif
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_dcbx_mib_meta_data data;
+       int rc = 0;
+
+       memset(&data, 0, sizeof(data));
+       data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+                                                          lldp_config_params);
+       data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+       data.size = sizeof(struct lldp_config_params_s);
+       qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+       return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             enum qed_mib_read_type type)
+{
+       struct qed_dcbx_mib_meta_data data;
+       int rc = 0;
+
+       memset(&data, 0, sizeof(data));
+       data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+                                                          lldp_status_params);
+       data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+       data.size = sizeof(struct lldp_status_params_s);
+       rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             enum qed_mib_read_type type)
+{
+       struct qed_dcbx_mib_meta_data data;
+       int rc = 0;
+
+       memset(&data, 0, sizeof(data));
+       data.addr = p_hwfn->mcp_info->port_addr +
+                   offsetof(struct public_port, operational_dcbx_mib);
+       data.mib = &p_hwfn->p_dcbx_info->operational;
+       data.size = sizeof(struct dcbx_mib);
+       rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+       struct qed_dcbx_mib_meta_data data;
+       int rc = 0;
+
+       memset(&data, 0, sizeof(data));
+       data.addr = p_hwfn->mcp_info->port_addr +
+                   offsetof(struct public_port, remote_dcbx_mib);
+       data.mib = &p_hwfn->p_dcbx_info->remote;
+       data.size = sizeof(struct dcbx_mib);
+       rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_dcbx_mib_meta_data data;
+       int rc = 0;
+
+       memset(&data, 0, sizeof(data));
+       data.addr = p_hwfn->mcp_info->port_addr +
+                   offsetof(struct public_port, local_admin_dcbx_mib);
+       data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+       data.size = sizeof(struct dcbx_local_params);
+       qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+       return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+       int rc = -EINVAL;
+
+       switch (type) {
+       case QED_DCBX_OPERATIONAL_MIB:
+               rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+               break;
+       case QED_DCBX_REMOTE_MIB:
+               rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+               break;
+       case QED_DCBX_LOCAL_MIB:
+               rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+               break;
+       case QED_DCBX_REMOTE_LLDP_MIB:
+               rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+               break;
+       case QED_DCBX_LOCAL_LLDP_MIB:
+               rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+               break;
+       default:
+               DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+       }
+
+       return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+       int rc = 0;
+
+       rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+       if (rc)
+               return rc;
+
+       if (type == QED_DCBX_OPERATIONAL_MIB) {
+               rc = qed_dcbx_process_mib_info(p_hwfn);
+               if (!rc) {
+                       /* reconfigure tcs of QM queues according
+                        * to negotiation results
+                        */
+                       qed_qm_reconf(p_hwfn, p_ptt);
+
+                       /* update storm FW with negotiation results */
+                       qed_sp_pf_update(p_hwfn);
+               }
+       }
+
+       return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+       int rc = 0;
+
+       p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+       if (!p_hwfn->p_dcbx_info) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to allocate 'struct qed_dcbx_info'\n");
+               rc = -ENOMEM;
+       }
+
+       return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+                       struct qed_dcbx_info *p_dcbx_info)
+{
+       kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+                                         struct qed_dcbx_results *p_src,
+                                         enum dcbx_protocol_type type)
+{
+       p_data->dcb_enable_flag = p_src->arr[type].enable;
+       p_data->dcb_priority = p_src->arr[type].priority;
+       p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+                                  struct pf_update_ramrod_data *p_dest)
+{
+       struct protocol_dcb_data *p_dcb_data;
+       bool update_flag = false;
+
+       p_dest->pf_id = p_src->pf_id;
+
+       update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+       p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+       update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+       p_dest->update_roce_dcb_data_flag = update_flag;
+       update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+       p_dest->update_roce_dcb_data_flag = update_flag;
+
+       update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+       p_dest->update_iscsi_dcb_data_flag = update_flag;
+       update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+       p_dest->update_eth_dcb_data_flag = update_flag;
+
+       p_dcb_data = &p_dest->fcoe_dcb_data;
+       qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+       p_dcb_data = &p_dest->roce_dcb_data;
+
+       if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+               qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+                                             DCBX_PROTOCOL_ROCE);
+       if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+               qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+                                             DCBX_PROTOCOL_ROCE_V2);
+
+       p_dcb_data = &p_dest->iscsi_dcb_data;
+       qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+       p_dcb_data = &p_dest->eth_dcb_data;
+       qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+                                struct qed_dcbx_get *p_get,
+                                enum qed_mib_read_type type)
+{
+       struct qed_ptt *p_ptt;
+       int rc;
+
+       if (IS_VF(p_hwfn->cdev))
+               return -EINVAL;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EBUSY;
+
+       rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+       if (rc)
+               goto out;
+
+       rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+                     u32 *pfc, struct qed_dcbx_params *p_params)
+{
+       u8 pfc_map = 0;
+       int i;
+
+       if (p_params->pfc.willing)
+               *pfc |= DCBX_PFC_WILLING_MASK;
+       else
+               *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+       if (p_params->pfc.enabled)
+               *pfc |= DCBX_PFC_ENABLED_MASK;
+       else
+               *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+       *pfc &= ~DCBX_PFC_CAPS_MASK;
+       *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+               if (p_params->pfc.prio[i])
+                       pfc_map |= BIT(i);
+
+       *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+       *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+                     struct dcbx_ets_feature *p_ets,
+                     struct qed_dcbx_params *p_params)
+{
+       u8 *bw_map, *tsa_map;
+       u32 val;
+       int i;
+
+       if (p_params->ets_willing)
+               p_ets->flags |= DCBX_ETS_WILLING_MASK;
+       else
+               p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+       if (p_params->ets_cbs)
+               p_ets->flags |= DCBX_ETS_CBS_MASK;
+       else
+               p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+       if (p_params->ets_enabled)
+               p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+       else
+               p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+       p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+       p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+       bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+       tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+       p_ets->pri_tc_tbl[0] = 0;
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+               bw_map[i] = p_params->ets_tc_bw_tbl[i];
+               tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+               /* Copy the priority value to the corresponding 4 bits in the
+                * traffic class table.
+                */
+               val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+               p_ets->pri_tc_tbl[0] |= val;
+       }
+       for (i = 0; i < 2; i++) {
+               p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
+               p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
+       }
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+                     struct dcbx_app_priority_feature *p_app,
+                     struct qed_dcbx_params *p_params, bool ieee)
+{
+       u32 *entry;
+       int i;
+
+       if (p_params->app_willing)
+               p_app->flags |= DCBX_APP_WILLING_MASK;
+       else
+               p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+       if (p_params->app_valid)
+               p_app->flags |= DCBX_APP_ENABLED_MASK;
+       else
+               p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+       p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+       p_app->flags |= (u32)p_params->num_app_entries <<
+           DCBX_APP_NUM_ENTRIES_SHIFT;
+
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &p_app->app_pri_tbl[i].entry;
+               *entry = 0;
+               if (ieee) {
+                       *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+                       switch (p_params->app_entry[i].sf_ieee) {
+                       case QED_DCBX_SF_IEEE_ETHTYPE:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_TCP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_UDP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+                               *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+                                          DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+                               break;
+                       }
+               } else {
+                       *entry &= ~DCBX_APP_SF_MASK;
+                       if (p_params->app_entry[i].ethtype)
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
+                       else
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
+               }
+
+               *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+               *entry |= ((u32)p_params->app_entry[i].proto_id <<
+                          DCBX_APP_PROTOCOL_ID_SHIFT);
+               *entry &= ~DCBX_APP_PRI_MAP_MASK;
+               *entry |= ((u32)(p_params->app_entry[i].prio) <<
+                          DCBX_APP_PRI_MAP_SHIFT);
+       }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+                         struct dcbx_local_params *local_admin,
+                         struct qed_dcbx_set *params)
+{
+       bool ieee = false;
+
+       local_admin->flags = 0;
+       memcpy(&local_admin->features,
+              &p_hwfn->p_dcbx_info->operational.features,
+              sizeof(local_admin->features));
+
+       if (params->enabled) {
+               local_admin->config = params->ver_num;
+               ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+       } else {
+               local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+       }
+
+       if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+               qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+                                     &params->config.params);
+
+       if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+               qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+                                     &params->config.params);
+
+       if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+               qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+                                     &params->config.params, ieee);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                          struct qed_dcbx_set *params, bool hw_commit)
+{
+       struct dcbx_local_params local_admin;
+       struct qed_dcbx_mib_meta_data data;
+       u32 resp = 0, param = 0;
+       int rc = 0;
+
+       if (!hw_commit) {
+               memcpy(&p_hwfn->p_dcbx_info->set, params,
+                      sizeof(struct qed_dcbx_set));
+               return 0;
+       }
+
+       /* clear set-parmas cache */
+       memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+       memset(&local_admin, 0, sizeof(local_admin));
+       qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+       data.addr = p_hwfn->mcp_info->port_addr +
+           offsetof(struct public_port, local_admin_dcbx_mib);
+       data.local_admin = &local_admin;
+       data.size = sizeof(struct dcbx_local_params);
+       qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+                        1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+       if (rc)
+               DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+       return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+                              struct qed_dcbx_set *params)
+{
+       struct qed_dcbx_get *dcbx_info;
+       int rc;
+
+       if (p_hwfn->p_dcbx_info->set.config.valid) {
+               memcpy(params, &p_hwfn->p_dcbx_info->set,
+                      sizeof(struct qed_dcbx_set));
+               return 0;
+       }
+
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       if (!dcbx_info) {
+               DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+               return -ENOMEM;
+       }
+
+       rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+       if (rc) {
+               kfree(dcbx_info);
+               return rc;
+       }
+
+       p_hwfn->p_dcbx_info->set.override_flags = 0;
+       p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+       if (dcbx_info->operational.cee)
+               p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+       if (dcbx_info->operational.ieee)
+               p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+
+       p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+       memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+              &dcbx_info->operational.params,
+              sizeof(struct qed_dcbx_admin_params));
+       p_hwfn->p_dcbx_info->set.config.valid = true;
+
+       memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+                                              enum qed_mib_read_type type)
+{
+       struct qed_dcbx_get *dcbx_info;
+
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       if (!dcbx_info) {
+               DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+               return NULL;
+       }
+
+       if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+               kfree(dcbx_info);
+               return NULL;
+       }
+
+       if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+           !dcbx_info->operational.enabled) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+               kfree(dcbx_info);
+               return NULL;
+       }
+
+       return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       bool enabled;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return 0;
+
+       enabled = dcbx_info->operational.enabled;
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+       kfree(dcbx_info);
+
+       return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return 1;
+
+       dcbx_set.enabled = !!state;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return 1;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+                                  u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+       *prio_type = *pgid = *bw_pct = *up_map = 0;
+       if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid tc %d\n", tc);
+               return;
+       }
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return;
+
+       *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+       kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+
+       *bw_pct = 0;
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+       if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+               return;
+       }
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return;
+
+       *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+       kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+                                  u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+       DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+       *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+                                   int bwg_id, u8 *bw_pct)
+{
+       DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+       *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+                               int priority, u8 *setting)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+       if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid priority %d\n", priority);
+               return;
+       }
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return;
+
+       *setting = dcbx_info->operational.params.pfc.prio[priority];
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+       kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+                  priority, setting);
+       if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid priority %d\n", priority);
+               return;
+       }
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+       dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       int rc = 0;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return 1;
+
+       switch (capid) {
+       case DCB_CAP_ATTR_PG:
+       case DCB_CAP_ATTR_PFC:
+       case DCB_CAP_ATTR_UP2TC:
+       case DCB_CAP_ATTR_GSP:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_PG_TCS:
+       case DCB_CAP_ATTR_PFC_TCS:
+               *cap = 0x80;
+               break;
+       case DCB_CAP_ATTR_DCBX:
+               *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+                       DCB_CAP_DCBX_VER_IEEE);
+               break;
+       default:
+               *cap = false;
+               rc = 1;
+       }
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+       kfree(dcbx_info);
+
+       return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       int rc = 0;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       switch (tcid) {
+       case DCB_NUMTCS_ATTR_PG:
+               *num = dcbx_info->operational.params.max_ets_tc;
+               break;
+       case DCB_NUMTCS_ATTR_PFC:
+               *num = dcbx_info->operational.params.pfc.max_tc;
+               break;
+       default:
+               rc = -EINVAL;
+       }
+
+       kfree(dcbx_info);
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+       return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       bool enabled;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return 0;
+
+       enabled = dcbx_info->operational.params.pfc.enabled;
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+       kfree(dcbx_info);
+
+       return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       u8 mode = 0;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return 0;
+
+       if (dcbx_info->operational.enabled)
+               mode |= DCB_CAP_DCBX_LLD_MANAGED;
+       if (dcbx_info->operational.ieee)
+               mode |= DCB_CAP_DCBX_VER_IEEE;
+       if (dcbx_info->operational.cee)
+               mode |= DCB_CAP_DCBX_VER_CEE;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+       kfree(dcbx_info);
+
+       return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+                                  int tc,
+                                  u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB,
+                  "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+                  tc, pri_type, pgid, bw_pct, up_map);
+
+       if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid tc %d\n", tc);
+               return;
+       }
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+       dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+                                  u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+       DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+       if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+               return;
+       }
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+       dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+       DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return 1;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return 1;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return 1;
+
+       switch (tcid) {
+       case DCB_NUMTCS_ATTR_PG:
+               dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+               dcbx_set.config.params.max_ets_tc = num;
+               break;
+       case DCB_NUMTCS_ATTR_PFC:
+               dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+               dcbx_set.config.params.pfc.max_tc = num;
+               break;
+       default:
+               DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+               return -EINVAL;
+       }
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EINVAL;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+       dcbx_set.config.params.pfc.enabled = !!state;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_app_entry *entry;
+       bool ethtype;
+       u8 prio = 0;
+       int i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+       for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &dcbx_info->operational.params.app_entry[i];
+               if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+                       prio = entry->prio;
+                       break;
+               }
+       }
+
+       if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+               DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       kfree(dcbx_info);
+
+       return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+                           u8 idtype, u16 idval, u8 pri_map)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_app_entry *entry;
+       struct qed_ptt *ptt;
+       bool ethtype;
+       int rc, i;
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return -EINVAL;
+
+       ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+       for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &dcbx_set.config.params.app_entry[i];
+               if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+                       break;
+               /* First empty slot */
+               if (!entry->proto_id) {
+                       dcbx_set.config.params.num_app_entries++;
+                       break;
+               }
+       }
+
+       if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+               DP_ERR(cdev, "App table is full\n");
+               return -EBUSY;
+       }
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+       dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+       dcbx_set.config.params.app_entry[i].proto_id = idval;
+       dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EBUSY;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+       if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
+               DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+               return 1;
+       }
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return 1;
+
+       dcbx_set.ver_num = 0;
+       if (mode & DCB_CAP_DCBX_VER_CEE) {
+               dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+               dcbx_set.enabled = true;
+       }
+
+       if (mode & DCB_CAP_DCBX_VER_IEEE) {
+               dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+               dcbx_set.enabled = true;
+       }
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return 1;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return 0;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id  = %d\n", featid);
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return 1;
+
+       *flags = 0;
+       switch (featid) {
+       case DCB_FEATCFG_ATTR_PG:
+               if (dcbx_info->operational.params.ets_enabled)
+                       *flags = DCB_FEATCFG_ENABLE;
+               else
+                       *flags = DCB_FEATCFG_ERROR;
+               break;
+       case DCB_FEATCFG_ATTR_PFC:
+               if (dcbx_info->operational.params.pfc.enabled)
+                       *flags = DCB_FEATCFG_ENABLE;
+               else
+                       *flags = DCB_FEATCFG_ERROR;
+               break;
+       case DCB_FEATCFG_ATTR_APP:
+               if (dcbx_info->operational.params.app_valid)
+                       *flags = DCB_FEATCFG_ENABLE;
+               else
+                       *flags = DCB_FEATCFG_ERROR;
+               break;
+       default:
+               DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+               kfree(dcbx_info);
+               return 1;
+       }
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_set dcbx_set;
+       bool enabled, willing;
+       struct qed_ptt *ptt;
+       int rc;
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+                  featid, flags);
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return 1;
+
+       enabled = !!(flags & DCB_FEATCFG_ENABLE);
+       willing = !!(flags & DCB_FEATCFG_WILLING);
+       switch (featid) {
+       case DCB_FEATCFG_ATTR_PG:
+               dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+               dcbx_set.config.params.ets_enabled = enabled;
+               dcbx_set.config.params.ets_willing = willing;
+               break;
+       case DCB_FEATCFG_ATTR_PFC:
+               dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+               dcbx_set.config.params.pfc.enabled = enabled;
+               dcbx_set.config.params.pfc.willing = willing;
+               break;
+       case DCB_FEATCFG_ATTR_APP:
+               dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+               dcbx_set.config.params.app_willing = willing;
+               break;
+       default:
+               DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+               return 1;
+       }
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return 1;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+                                    struct dcb_peer_app_info *info,
+                                    u16 *app_count)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       info->willing = dcbx_info->remote.params.app_willing;
+       info->error = dcbx_info->remote.params.app_error;
+       *app_count = dcbx_info->remote.params.num_app_entries;
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+                                     struct dcb_app *table)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       int i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+               if (dcbx_info->remote.params.app_entry[i].ethtype)
+                       table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+               else
+                       table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+               table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+               table[i].protocol =
+                   dcbx_info->remote.params.app_entry[i].proto_id;
+       }
+
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       int i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+               if (dcbx_info->remote.params.pfc.prio[i])
+                       pfc->pfc_en |= BIT(i);
+
+       pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+                  pfc->pfc_en, pfc->tcs_supported);
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       int i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       pg->willing = dcbx_info->remote.params.ets_willing;
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+               pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+               pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+       }
+
+       DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+                                 struct ieee_pfc *pfc, bool remote)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_params *params;
+       struct qed_dcbx_get *dcbx_info;
+       int rc, i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               return -EINVAL;
+       }
+
+       if (remote) {
+               memset(dcbx_info, 0, sizeof(*dcbx_info));
+               rc = qed_dcbx_query_params(hwfn, dcbx_info,
+                                          QED_DCBX_REMOTE_MIB);
+               if (rc) {
+                       kfree(dcbx_info);
+                       return -EINVAL;
+               }
+
+               params = &dcbx_info->remote.params;
+       } else {
+               params = &dcbx_info->operational.params;
+       }
+
+       pfc->pfc_cap = params->pfc.max_tc;
+       pfc->pfc_en = 0;
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+               if (params->pfc.prio[i])
+                       pfc->pfc_en |= BIT(i);
+
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+       return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc, i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       kfree(dcbx_info);
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return -EINVAL;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+       for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+               dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EINVAL;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+                                 struct ieee_ets *ets, bool remote)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_dcbx_params *params;
+       int rc;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       if (remote) {
+               memset(dcbx_info, 0, sizeof(*dcbx_info));
+               rc = qed_dcbx_query_params(hwfn, dcbx_info,
+                                          QED_DCBX_REMOTE_MIB);
+               if (rc) {
+                       kfree(dcbx_info);
+                       return -EINVAL;
+               }
+
+               params = &dcbx_info->remote.params;
+       } else {
+               params = &dcbx_info->operational.params;
+       }
+
+       ets->ets_cap = params->max_ets_tc;
+       ets->willing = params->ets_willing;
+       ets->cbs = params->ets_cbs;
+       memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+       return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_dcbx_set dcbx_set;
+       struct qed_ptt *ptt;
+       int rc;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       kfree(dcbx_info);
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return -EINVAL;
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+       dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+       dcbx_set.config.params.ets_willing = ets->willing;
+       dcbx_set.config.params.ets_cbs = ets->cbs;
+       memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+              sizeof(ets->tc_tx_bw));
+       memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+              sizeof(ets->tc_tsa));
+       memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+              sizeof(ets->prio_tc));
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EINVAL;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+       return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+       return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_app_entry *entry;
+       bool ethtype;
+       u8 prio = 0;
+       int i;
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       /* ieee defines the selector field value for ethertype to be 1 */
+       ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+       for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &dcbx_info->operational.params.app_entry[i];
+               if ((entry->ethtype == ethtype) &&
+                   (entry->proto_id == app->protocol)) {
+                       prio = entry->prio;
+                       break;
+               }
+       }
+
+       if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+               DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+                      app->protocol);
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       app->priority = ffs(prio) - 1;
+
+       kfree(dcbx_info);
+
+       return 0;
+}
+
+int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dcbx_get *dcbx_info;
+       struct qed_dcbx_set dcbx_set;
+       struct qed_app_entry *entry;
+       struct qed_ptt *ptt;
+       bool ethtype;
+       int rc, i;
+
+       if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+               DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+               return -EINVAL;
+       }
+
+       dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+       if (!dcbx_info)
+               return -EINVAL;
+
+       if (!dcbx_info->operational.ieee) {
+               DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
+               return -EINVAL;
+       }
+
+       kfree(dcbx_info);
+
+       memset(&dcbx_set, 0, sizeof(dcbx_set));
+       rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+       if (rc)
+               return -EINVAL;
+
+       /* ieee defines the selector field value for ethertype to be 1 */
+       ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+       for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+               entry = &dcbx_set.config.params.app_entry[i];
+               if ((entry->ethtype == ethtype) &&
+                   (entry->proto_id == app->protocol))
+                       break;
+               /* First empty slot */
+               if (!entry->proto_id) {
+                       dcbx_set.config.params.num_app_entries++;
+                       break;
+               }
+       }
+
+       if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+               DP_ERR(cdev, "App table is full\n");
+               return -EBUSY;
+       }
+
+       dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+       dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+       dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+       dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EBUSY;
+
+       rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+       .getstate = qed_dcbnl_getstate,
+       .setstate = qed_dcbnl_setstate,
+       .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+       .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+       .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+       .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+       .getpfccfg = qed_dcbnl_getpfccfg,
+       .setpfccfg = qed_dcbnl_setpfccfg,
+       .getcap = qed_dcbnl_getcap,
+       .getnumtcs = qed_dcbnl_getnumtcs,
+       .getpfcstate = qed_dcbnl_getpfcstate,
+       .getdcbx = qed_dcbnl_getdcbx,
+       .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+       .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+       .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+       .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+       .setall = qed_dcbnl_setall,
+       .setnumtcs = qed_dcbnl_setnumtcs,
+       .setpfcstate = qed_dcbnl_setpfcstate,
+       .setapp = qed_dcbnl_setapp,
+       .setdcbx = qed_dcbnl_setdcbx,
+       .setfeatcfg = qed_dcbnl_setfeatcfg,
+       .getfeatcfg = qed_dcbnl_getfeatcfg,
+       .getapp = qed_dcbnl_getapp,
+       .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+       .peer_getapptable = qed_dcbnl_peer_getapptable,
+       .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+       .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+       .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+       .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+       .ieee_getets = qed_dcbnl_ieee_getets,
+       .ieee_setets = qed_dcbnl_ieee_setets,
+       .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+       .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+       .ieee_getapp = qed_dcbnl_ieee_getapp,
+       .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644 (file)
index 0000000..9ba6816
--- /dev/null
@@ -0,0 +1,108 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL    4
+
+enum qed_mib_read_type {
+       QED_DCBX_OPERATIONAL_MIB,
+       QED_DCBX_REMOTE_MIB,
+       QED_DCBX_LOCAL_MIB,
+       QED_DCBX_REMOTE_LLDP_MIB,
+       QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+       bool enable;            /* DCB enabled */
+       bool update;            /* Update indication */
+       u8 priority;            /* Priority */
+       u8 tc;                  /* Traffic Class */
+};
+
+#ifdef CONFIG_DCB
+#define QED_DCBX_VERSION_DISABLED       0
+#define QED_DCBX_VERSION_IEEE           1
+#define QED_DCBX_VERSION_CEE            2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE                BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG       BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG       BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG       BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG      BIT(4)
+       u32 override_flags;
+       bool enabled;
+       struct qed_dcbx_admin_params config;
+       u32 ver_num;
+};
+#endif
+
+struct qed_dcbx_results {
+       bool dcbx_enabled;
+       u8 pf_id;
+       struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+       enum dcbx_protocol_type id;
+       char *name;
+       enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+       struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+       struct dcbx_local_params local_admin;
+       struct qed_dcbx_results results;
+       struct dcbx_mib operational;
+       struct dcbx_mib remote;
+#ifdef CONFIG_DCB
+       struct qed_dcbx_set set;
+#endif
+       u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+       struct lldp_config_params_s *lldp_local;
+       struct lldp_status_params_s *lldp_remote;
+       struct dcbx_local_params *local_admin;
+       struct dcbx_mib *mib;
+       size_t size;
+       u32 addr;
+};
+
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+
+int qed_dcbx_config_params(struct qed_hwfn *,
+                          struct qed_ptt *, struct qed_dcbx_set *, bool);
+#endif
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+                         struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+                                  struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644 (file)
index 0000000..0e4f4a9
--- /dev/null
@@ -0,0 +1,2720 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
+
+/* API common to all protocols */
+enum BAR_ID {
+       BAR_ID_0,       /* used for GRC */
+       BAR_ID_1        /* Used for doorbells */
+};
+
+static u32 qed_hw_bar_size(struct qed_hwfn     *p_hwfn,
+                          enum BAR_ID          bar_id)
+{
+       u32 bar_reg = (bar_id == BAR_ID_0 ?
+                      PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+       u32 val;
+
+       if (IS_VF(p_hwfn->cdev))
+               return 1 << 17;
+
+       val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+       if (val)
+               return 1 << (val + 15);
+
+       /* Old MFW initialized above registered only conditionally */
+       if (p_hwfn->cdev->num_hwfns > 1) {
+               DP_INFO(p_hwfn,
+                       "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+                       return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+       } else {
+               DP_INFO(p_hwfn,
+                       "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+                       return 512 * 1024;
+       }
+}
+
+void qed_init_dp(struct qed_dev *cdev,
+                u32 dp_module, u8 dp_level)
+{
+       u32 i;
+
+       cdev->dp_level = dp_level;
+       cdev->dp_module = dp_module;
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->dp_level = dp_level;
+               p_hwfn->dp_module = dp_module;
+       }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+       u8 i;
+
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->cdev = cdev;
+               p_hwfn->my_id = i;
+               p_hwfn->b_active = false;
+
+               mutex_init(&p_hwfn->dmae_info.mutex);
+       }
+
+       /* hwfn 0 is always active */
+       cdev->hwfns[0].b_active = true;
+
+       /* set the default cache alignment to 128 */
+       cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       kfree(qm_info->qm_pq_params);
+       qm_info->qm_pq_params = NULL;
+       kfree(qm_info->qm_vport_params);
+       qm_info->qm_vport_params = NULL;
+       kfree(qm_info->qm_port_params);
+       qm_info->qm_port_params = NULL;
+       kfree(qm_info->wfq_data);
+       qm_info->wfq_data = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+       int i;
+
+       if (IS_VF(cdev))
+               return;
+
+       kfree(cdev->fw_data);
+       cdev->fw_data = NULL;
+
+       kfree(cdev->reset_stats);
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               kfree(p_hwfn->p_tx_cids);
+               p_hwfn->p_tx_cids = NULL;
+               kfree(p_hwfn->p_rx_cids);
+               p_hwfn->p_rx_cids = NULL;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               qed_cxt_mngr_free(p_hwfn);
+               qed_qm_info_free(p_hwfn);
+               qed_spq_free(p_hwfn);
+               qed_eq_free(p_hwfn, p_hwfn->p_eq);
+               qed_consq_free(p_hwfn, p_hwfn->p_consq);
+               qed_int_free(p_hwfn);
+               qed_iov_free(p_hwfn);
+               qed_dmae_info_free(p_hwfn);
+               qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+       }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+{
+       u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_port_params *p_qm_port;
+       bool init_rdma_offload_pq = false;
+       bool init_pure_ack_pq = false;
+       bool init_ooo_pq = false;
+       u16 num_pqs, multi_cos_tcs = 1;
+       u8 pf_wfq = qm_info->pf_wfq;
+       u32 pf_rl = qm_info->pf_rl;
+       u16 num_pf_rls = 0;
+       u16 num_vfs = 0;
+
+#ifdef CONFIG_QED_SRIOV
+       if (p_hwfn->cdev->p_iov_info)
+               num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
+       memset(qm_info, 0, sizeof(*qm_info));
+
+       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
+       num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+               num_pqs++;      /* for RoCE queue */
+               init_rdma_offload_pq = true;
+               /* we subtract num_vfs because each require a rate limiter,
+                * and one default rate limiter
+                */
+               if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
+                       num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+
+               num_pqs += num_pf_rls;
+               qm_info->num_pf_rls = (u8) num_pf_rls;
+       }
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+               num_pqs += 2;   /* for iSCSI pure-ACK / OOO queue */
+               init_pure_ack_pq = true;
+               init_ooo_pq = true;
+       }
+
+       /* Sanity checking that setup requires legal number of resources */
+       if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+               DP_ERR(p_hwfn,
+                      "Need too many Physical queues - 0x%04x when only %04x are available\n",
+                      num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+               return -EINVAL;
+       }
+
+       /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+        */
+       qm_info->qm_pq_params = kcalloc(num_pqs,
+                                       sizeof(struct init_qm_pq_params),
+                                       b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = kcalloc(num_vports,
+                                          sizeof(struct init_qm_vport_params),
+                                          b_sleepable ? GFP_KERNEL
+                                                      : GFP_ATOMIC);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+                                         sizeof(struct init_qm_port_params),
+                                         b_sleepable ? GFP_KERNEL
+                                                     : GFP_ATOMIC);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+                                   b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
+       vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+       /* First init rate limited queues */
+       for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
+               qm_info->qm_pq_params[curr_queue].tc_id =
+                   p_hwfn->hw_info.non_offload_tc;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+       }
+
+       /* First init per-TC PQs */
+       for (i = 0; i < multi_cos_tcs; i++) {
+               struct init_qm_pq_params *params =
+                   &qm_info->qm_pq_params[curr_queue++];
+
+               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+                   p_hwfn->hw_info.personality == QED_PCI_ETH) {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.non_offload_tc;
+                       params->wrr_group = 1;
+               } else {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.offload_tc;
+                       params->wrr_group = 1;
+               }
+       }
+
+       /* Then init pure-LB PQ */
+       qm_info->pure_lb_pq = curr_queue;
+       qm_info->qm_pq_params[curr_queue].vport_id =
+           (u8) RESC_START(p_hwfn, QED_VPORT);
+       qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+       qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+       curr_queue++;
+
+       qm_info->offload_pq = 0;
+       if (init_rdma_offload_pq) {
+               qm_info->offload_pq = curr_queue;
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+               qm_info->qm_pq_params[curr_queue].tc_id =
+                   p_hwfn->hw_info.offload_tc;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               curr_queue++;
+       }
+
+       if (init_pure_ack_pq) {
+               qm_info->pure_ack_pq = curr_queue;
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+               qm_info->qm_pq_params[curr_queue].tc_id =
+                   p_hwfn->hw_info.offload_tc;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               curr_queue++;
+       }
+
+       if (init_ooo_pq) {
+               qm_info->ooo_pq = curr_queue;
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+               qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               curr_queue++;
+       }
+
+       /* Then init per-VF PQs */
+       vf_offset = curr_queue;
+       for (i = 0; i < num_vfs; i++) {
+               /* First vport is used by the PF */
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+               qm_info->qm_pq_params[curr_queue].tc_id =
+                   p_hwfn->hw_info.non_offload_tc;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+               curr_queue++;
+       }
+
+       qm_info->vf_queues_offset = vf_offset;
+       qm_info->num_pqs = num_pqs;
+       qm_info->num_vports = num_vports;
+
+       /* Initialize qm port parameters */
+       num_ports = p_hwfn->cdev->num_ports_in_engines;
+       for (i = 0; i < num_ports; i++) {
+               p_qm_port = &qm_info->qm_port_params[i];
+               p_qm_port->active = 1;
+               if (num_ports == 4)
+                       p_qm_port->active_phys_tcs = 0x7;
+               else
+                       p_qm_port->active_phys_tcs = 0x9f;
+               p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+               p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+       }
+
+       qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+       qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+       qm_info->num_vf_pqs = num_vfs;
+       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+       for (i = 0; i < qm_info->num_vports; i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+
+       qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
+       qm_info->pf_rl = pf_rl;
+       qm_info->pf_wfq = pf_wfq;
+
+       return 0;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+       qed_qm_info_free(p_hwfn);
+       return -ENOMEM;
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       bool b_rc;
+       int rc;
+
+       /* qm_info is allocated in qed_init_qm_info() which is already called
+        * from qed_resc_alloc() or previous call of qed_qm_reconf().
+        * The allocated size may change each init, so we free it before next
+        * allocation.
+        */
+       qed_qm_info_free(p_hwfn);
+
+       /* initialize qed's qm data structure */
+       rc = qed_init_qm_info(p_hwfn, false);
+       if (rc)
+               return rc;
+
+       /* stop PF's qm queues */
+       spin_lock_bh(&qm_lock);
+       b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+                                   qm_info->start_pq, qm_info->num_pqs);
+       spin_unlock_bh(&qm_lock);
+       if (!b_rc)
+               return -EINVAL;
+
+       /* clear the QM_PF runtime phase leftovers from previous init */
+       qed_init_clear_rt_data(p_hwfn);
+
+       /* prepare QM portion of runtime array */
+       qed_qm_init_pf(p_hwfn);
+
+       /* activate init tool on runtime array */
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+                         p_hwfn->hw_info.hw_mode);
+       if (rc)
+               return rc;
+
+       /* start PF's qm queues */
+       spin_lock_bh(&qm_lock);
+       b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+                                   qm_info->start_pq, qm_info->num_pqs);
+       spin_unlock_bh(&qm_lock);
+       if (!b_rc)
+               return -EINVAL;
+
+       return 0;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+       struct qed_consq *p_consq;
+       struct qed_eq *p_eq;
+       int i, rc = 0;
+
+       if (IS_VF(cdev))
+               return rc;
+
+       cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+       if (!cdev->fw_data)
+               return -ENOMEM;
+
+       /* Allocate Memory for the Queue->CID mapping */
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               int tx_size = sizeof(struct qed_hw_cid_data) *
+                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
+               int rx_size = sizeof(struct qed_hw_cid_data) *
+                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+               p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+               if (!p_hwfn->p_tx_cids) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for Tx Cids\n");
+                       goto alloc_no_mem;
+               }
+
+               p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+               if (!p_hwfn->p_rx_cids) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for Rx Cids\n");
+                       goto alloc_no_mem;
+               }
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               u32 n_eqes, num_cons;
+
+               /* First allocate the context manager structure */
+               rc = qed_cxt_mngr_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Set the HW cid/tid numbers (in the contest manager)
+                * Must be done prior to any further computations.
+                */
+               rc = qed_cxt_set_pf_params(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Prepare and process QM requirements */
+               rc = qed_init_qm_info(p_hwfn, true);
+               if (rc)
+                       goto alloc_err;
+
+               /* Compute the ILT client partition */
+               rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* CID map / ILT shadow table / T2
+                * The talbes sizes are determined by the computations above
+                */
+               rc = qed_cxt_tables_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SPQ, must follow ILT because initializes SPQ context */
+               rc = qed_spq_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SP status block allocation */
+               p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+                                                        RESERVED_PTT_DPC);
+
+               rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       goto alloc_err;
+
+               rc = qed_iov_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* EQ */
+               n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
+               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+                       num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
+                                                              PROTOCOLID_ROCE,
+                                                              0) * 2;
+                       n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+               } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+                       num_cons =
+                           qed_cxt_get_proto_cid_count(p_hwfn,
+                                                       PROTOCOLID_ISCSI, 0);
+                       n_eqes += 2 * num_cons;
+               }
+
+               if (n_eqes > 0xFFFF) {
+                       DP_ERR(p_hwfn,
+                              "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
+                              n_eqes, 0xFFFF);
+                       rc = -EINVAL;
+                       goto alloc_err;
+               }
+
+               p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+               if (!p_eq)
+                       goto alloc_no_mem;
+               p_hwfn->p_eq = p_eq;
+
+               p_consq = qed_consq_alloc(p_hwfn);
+               if (!p_consq)
+                       goto alloc_no_mem;
+               p_hwfn->p_consq = p_consq;
+
+               /* DMA info initialization */
+               rc = qed_dmae_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for dmae_info structure\n");
+                       goto alloc_err;
+               }
+
+               /* DCBX initialization */
+               rc = qed_dcbx_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for dcbx structure\n");
+                       goto alloc_err;
+               }
+       }
+
+       cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+       if (!cdev->reset_stats) {
+               DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+               rc = -ENOMEM;
+               goto alloc_err;
+       }
+
+       return 0;
+
+alloc_no_mem:
+       rc = -ENOMEM;
+alloc_err:
+       qed_resc_free(cdev);
+       return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+       int i;
+
+       if (IS_VF(cdev))
+               return;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               qed_cxt_mngr_setup(p_hwfn);
+               qed_spq_setup(p_hwfn);
+               qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+               qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+               /* Read shadow of current MFW mailbox */
+               qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+               memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+                      p_hwfn->mcp_info->mfw_mb_cur,
+                      p_hwfn->mcp_info->mfw_mb_length);
+
+               qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+               qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+       }
+}
+
+#define FINAL_CLEANUP_POLL_CNT          (100)
+#define FINAL_CLEANUP_POLL_TIME         (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt, u16 id, bool is_vf)
+{
+       u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+       int rc = -EBUSY;
+
+       addr = GTT_BAR0_MAP_REG_USDM_RAM +
+               USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+       if (is_vf)
+               id += 0x10;
+
+       command |= X_FINAL_CLEANUP_AGG_INT <<
+               SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+       command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+       command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+       command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+       /* Make sure notification is not set before initiating final cleanup */
+       if (REG_RD(p_hwfn, addr)) {
+               DP_NOTICE(
+                       p_hwfn,
+                       "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+               REG_WR(p_hwfn, addr, 0);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+                  id, command);
+
+       qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+       /* Poll until completion */
+       while (!REG_RD(p_hwfn, addr) && count--)
+               msleep(FINAL_CLEANUP_POLL_TIME);
+
+       if (REG_RD(p_hwfn, addr))
+               rc = 0;
+       else
+               DP_NOTICE(p_hwfn,
+                         "Failed to receive FW final cleanup notification\n");
+
+       /* Cleanup afterwards */
+       REG_WR(p_hwfn, addr, 0);
+
+       return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+       int hw_mode = 0;
+
+       hw_mode = (1 << MODE_BB_B0);
+
+       switch (p_hwfn->cdev->num_ports_in_engines) {
+       case 1:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+               break;
+       case 2:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+               break;
+       case 4:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+                         p_hwfn->cdev->num_ports_in_engines);
+               return;
+       }
+
+       switch (p_hwfn->cdev->mf_mode) {
+       case QED_MF_DEFAULT:
+       case QED_MF_NPAR:
+               hw_mode |= 1 << MODE_MF_SI;
+               break;
+       case QED_MF_OVLAN:
+               hw_mode |= 1 << MODE_MF_SD;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+               hw_mode |= 1 << MODE_MF_SI;
+       }
+
+       hw_mode |= 1 << MODE_ASIC;
+
+       if (p_hwfn->cdev->num_hwfns > 1)
+               hw_mode |= 1 << MODE_100G;
+
+       p_hwfn->hw_info.hw_mode = hw_mode;
+
+       DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+                  "Configuring function for hw_mode: 0x%08x\n",
+                  p_hwfn->hw_info.hw_mode);
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+       u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+       int i, sb_id;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_igu_info *p_igu_info;
+               struct qed_igu_block *p_block;
+               struct cau_sb_entry sb_entry;
+
+               p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+               for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+                    sb_id++) {
+                       p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+                       if (!p_block->is_pf)
+                               continue;
+
+                       qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                             p_block->function_id,
+                                             0, 0);
+                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+                                        sb_entry);
+               }
+       }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             int hw_mode)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_common_rt_init_params params;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u16 num_pfs, pf_id;
+       u32 concrete_fid;
+       int rc = 0;
+       u8 vf_id;
+
+       qed_init_cau_rt_data(cdev);
+
+       /* Program GTT windows */
+       qed_gtt_init(p_hwfn);
+
+       if (p_hwfn->mcp_info) {
+               if (p_hwfn->mcp_info->func_info.bandwidth_max)
+                       qm_info->pf_rl_en = 1;
+               if (p_hwfn->mcp_info->func_info.bandwidth_min)
+                       qm_info->pf_wfq_en = 1;
+       }
+
+       memset(&params, 0, sizeof(params));
+       params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+       params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+       params.pf_rl_en = qm_info->pf_rl_en;
+       params.pf_wfq_en = qm_info->pf_wfq_en;
+       params.vport_rl_en = qm_info->vport_rl_en;
+       params.vport_wfq_en = qm_info->vport_wfq_en;
+       params.port_params = qm_info->qm_port_params;
+
+       qed_qm_common_rt_init(p_hwfn, &params);
+
+       qed_cxt_hw_init_common(p_hwfn);
+
+       /* Close gate from NIG to BRB/Storm; By default they are open, but
+        * we close them to prevent NIG from passing data to reset blocks.
+        * Should have been done in the ENGINE phase, but init-tool lacks
+        * proper port-pretend capabilities.
+        */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       qed_port_unpretend(p_hwfn, p_ptt);
+
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+       if (rc != 0)
+               return rc;
+
+       qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+       qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+       if (QED_IS_BB(p_hwfn->cdev)) {
+               num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
+               for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+                       qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+                       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+                       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               }
+               /* pretend to original PF */
+               qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+       }
+
+       for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+               concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+               qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+               qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+       }
+       /* pretend to original PF */
+       qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+       return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           int hw_mode)
+{
+       int rc = 0;
+
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+       if (rc != 0)
+               return rc;
+
+       if (hw_mode & (1 << MODE_MF_SI)) {
+               u8 pf_id = 0;
+
+               if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+                       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                                  "PF[%08x] is first eth on engine\n", pf_id);
+
+                       /* We should have configured BIT for ppfid, i.e., the
+                        * relative function number in the port. But there's a
+                        * bug in LLH in BB where the ppfid is actually engine
+                        * based, so we need to take this into account.
+                        */
+                       qed_wr(p_hwfn, p_ptt,
+                              NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+               }
+
+               /* Take the protocol-based hit vector if there is a hit,
+                * otherwise take the other vector.
+                */
+               qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+       }
+       return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         struct qed_tunn_start_params *p_tunn,
+                         int hw_mode,
+                         bool b_hw_start,
+                         enum qed_int_mode int_mode,
+                         bool allow_npar_tx_switch)
+{
+       u8 rel_pf_id = p_hwfn->rel_pf_id;
+       int rc = 0;
+
+       if (p_hwfn->mcp_info) {
+               struct qed_mcp_function_info *p_info;
+
+               p_info = &p_hwfn->mcp_info->func_info;
+               if (p_info->bandwidth_min)
+                       p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+               /* Update rate limit once we'll actually have a link */
+               p_hwfn->qm_info.pf_rl = 100000;
+       }
+
+       qed_cxt_hw_init_pf(p_hwfn);
+
+       qed_int_igu_init_rt(p_hwfn);
+
+       /* Set VLAN in NIG if needed */
+       if (hw_mode & (1 << MODE_MF_SD)) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+                            p_hwfn->hw_info.ovlan);
+       }
+
+       /* Enable classification by MAC if needed */
+       if (hw_mode & (1 << MODE_MF_SI)) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "Configuring TAGMAC_CLS_TYPE\n");
+               STORE_RT_REG(p_hwfn,
+                            NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+       }
+
+       /* Protocl Configuration  */
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+                    (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+       /* Cleanup chip from previous driver if such remains exist */
+       rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
+       if (rc != 0)
+               return rc;
+
+       /* PF Init sequence */
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* Pure runtime initializations - directly to the HW  */
+       qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+       if (hw_mode & (1 << MODE_MF_SI)) {
+               u8 pf_id = 0;
+               u32 val = 0;
+
+               if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+                       if (p_hwfn->rel_pf_id == pf_id) {
+                               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                                          "PF[%d] is first ETH on engine\n",
+                                          pf_id);
+                               val = 1;
+                       }
+                       qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
+               }
+       }
+
+       if (b_hw_start) {
+               /* enable interrupts */
+               qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+               /* send function start command */
+               rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+                                    allow_npar_tx_switch);
+               if (rc)
+                       DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+       }
+       return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u8 enable)
+{
+       u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+       /* Change PF in PXP */
+       qed_wr(p_hwfn, p_ptt,
+              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+       /* wait until value is set - try for 1 second every 50us */
+       for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+               val = qed_rd(p_hwfn, p_ptt,
+                            PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+               if (val == set_val)
+                       break;
+
+               usleep_range(50, 60);
+       }
+
+       if (val != set_val) {
+               DP_NOTICE(p_hwfn,
+                         "PFID_ENABLE_MASTER wasn't changed after a second\n");
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_main_ptt)
+{
+       /* Read shadow of current MFW mailbox */
+       qed_mcp_read_mb(p_hwfn, p_main_ptt);
+       memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+              p_hwfn->mcp_info->mfw_mb_cur,
+              p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+               struct qed_tunn_start_params *p_tunn,
+               bool b_hw_start,
+               enum qed_int_mode int_mode,
+               bool allow_npar_tx_switch,
+               const u8 *bin_fw_data)
+{
+       u32 load_code, param;
+       int rc, mfw_rc, i;
+
+       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+               DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+               return -EINVAL;
+       }
+
+       if (IS_PF(cdev)) {
+               rc = qed_init_fw_data(cdev, bin_fw_data);
+               if (rc != 0)
+                       return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (IS_VF(cdev)) {
+                       p_hwfn->b_int_enabled = 1;
+                       continue;
+               }
+
+               /* Enable DMAE in PXP */
+               rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+               qed_calc_hw_mode(p_hwfn);
+
+               rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+                                     &load_code);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+                       return rc;
+               }
+
+               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+                          rc, load_code);
+
+               p_hwfn->first_on_engine = (load_code ==
+                                          FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+               if (!qm_lock_init) {
+                       spin_lock_init(&qm_lock);
+                       qm_lock_init = true;
+               }
+
+               switch (load_code) {
+               case FW_MSG_CODE_DRV_LOAD_ENGINE:
+                       rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+                                               p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+               /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_PORT:
+                       rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+                                             p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+
+               /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+                       rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+                                           p_tunn, p_hwfn->hw_info.hw_mode,
+                                           b_hw_start, int_mode,
+                                           allow_npar_tx_switch);
+                       break;
+               default:
+                       rc = -EINVAL;
+                       break;
+               }
+
+               if (rc)
+                       DP_NOTICE(p_hwfn,
+                                 "init phase failed for loadcode 0x%x (rc %d)\n",
+                                  load_code, rc);
+
+               /* ACK mfw regardless of success or failure of initialization */
+               mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                    DRV_MSG_CODE_LOAD_DONE,
+                                    0, &load_code, &param);
+               if (rc)
+                       return rc;
+               if (mfw_rc) {
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+                       return mfw_rc;
+               }
+
+               /* send DCBX attention request command */
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DCB,
+                          "sending phony dcbx set command to trigger DCBx attention handling\n");
+               mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                    DRV_MSG_CODE_SET_DCBX,
+                                    1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+                                    &load_code, &param);
+               if (mfw_rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to send DCBX attention request\n");
+                       return mfw_rc;
+               }
+
+               p_hwfn->hw_init_done = true;
+       }
+
+       return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static inline void qed_hw_timers_stop(struct qed_dev *cdev,
+                                     struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt)
+{
+       int i;
+
+       /* close timers */
+       qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+       qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+
+       for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+               if ((!qed_rd(p_hwfn, p_ptt,
+                            TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+                   (!qed_rd(p_hwfn, p_ptt,
+                            TM_REG_PF_SCAN_ACTIVE_TASK)))
+                       break;
+
+               /* Dependent on number of connection/tasks, possibly
+                * 1ms sleep is required between polls
+                */
+               usleep_range(1000, 2000);
+       }
+
+       if (i < QED_HW_STOP_RETRY_LIMIT)
+               return;
+
+       DP_NOTICE(p_hwfn,
+                 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+                 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+                 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void qed_hw_timers_stop_all(struct qed_dev *cdev)
+{
+       int j;
+
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+       }
+}
+
+int qed_hw_stop(struct qed_dev *cdev)
+{
+       int rc = 0, t_rc;
+       int j;
+
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+               if (IS_VF(cdev)) {
+                       qed_vf_pf_int_cleanup(p_hwfn);
+                       continue;
+               }
+
+               /* mark the hw as uninitialized... */
+               p_hwfn->hw_init_done = false;
+
+               rc = qed_sp_pf_stop(p_hwfn);
+               if (rc)
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+
+               /* Disable Attention Generation */
+               qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+               qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+               qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+               qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               usleep_range(1000, 2000);
+       }
+
+       if (IS_PF(cdev)) {
+               /* Disable DMAE in PXP - in CMT, this should only be done for
+                * first hw-function, and only after all transactions have
+                * stopped for all active hw-functions.
+                */
+               t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+                                          cdev->hwfns[0].p_main_ptt, false);
+               if (t_rc != 0)
+                       rc = t_rc;
+       }
+
+       return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+       int j;
+
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               if (IS_VF(cdev)) {
+                       qed_vf_pf_int_cleanup(p_hwfn);
+                       continue;
+               }
+
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_IFDOWN,
+                          "Shutting down the fastpath\n");
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               usleep_range(1000, 2000);
+       }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+       if (IS_VF(p_hwfn->cdev))
+               return;
+
+       /* Re-open incoming traffic */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+                         struct qed_ptt *ptt, u32 reg,
+                         bool expected)
+{
+       u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+       if (assert_val != expected) {
+               DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+                         reg, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+       int rc = 0;
+       u32 unload_resp, unload_param;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (IS_VF(cdev)) {
+                       rc = qed_vf_pf_reset(p_hwfn);
+                       if (rc)
+                               return rc;
+                       continue;
+               }
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+               /* Check for incorrect states */
+               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                              QM_REG_USG_CNT_PF_TX, 0);
+               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                              QM_REG_USG_CNT_PF_OTHER, 0);
+
+               /* Disable PF in HW blocks */
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      TCFC_REG_STRONG_ENABLE_PF, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      CCFC_REG_STRONG_ENABLE_PF, 0);
+
+               /* Send unload command to MCP */
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_UNLOAD_REQ,
+                                DRV_MB_PARAM_UNLOAD_WOL_MCP,
+                                &unload_resp, &unload_param);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+               }
+
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_UNLOAD_DONE,
+                                0, &unload_resp, &unload_param);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+       qed_ptt_pool_free(p_hwfn);
+       kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+       /* clear indirect access */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+       /* Clean Previous errors if such exist */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+              1 << p_hwfn->abs_pf_id);
+
+       /* enable internal target-read */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+       /* ME Register */
+       p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                     PXP_CONCRETE_FID_PFID);
+       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                   PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+       u32 *feat_num = p_hwfn->hw_info.feat_num;
+       int num_features = 1;
+
+       feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+                                               num_features,
+                                       RESC_NUM(p_hwfn, QED_L2_QUEUE));
+       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+                  "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+                  feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+                  num_features);
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+       u8 enabled_func_idx = p_hwfn->enabled_func_idx;
+       u32 *resc_start = p_hwfn->hw_info.resc_start;
+       u8 num_funcs = p_hwfn->num_funcs_on_engine;
+       u32 *resc_num = p_hwfn->hw_info.resc_num;
+       struct qed_sb_cnt_info sb_cnt_info;
+       int i, max_vf_vlan_filters;
+
+       memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+       max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+       max_vf_vlan_filters = 0;
+#endif
+
+       qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+
+       resc_num[QED_SB] = min_t(u32,
+                                (MAX_SB_PER_PATH_BB / num_funcs),
+                                sb_cnt_info.sb_cnt);
+       resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+       resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+       resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+       resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+       resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
+       resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+       resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+                            num_funcs;
+       resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+
+       for (i = 0; i < QED_MAX_RESC; i++)
+               resc_start[i] = resc_num[i] * enabled_func_idx;
+
+       /* Sanity for ILT */
+       if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+               DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
+                         RESC_START(p_hwfn, QED_ILT),
+                         RESC_END(p_hwfn, QED_ILT) - 1);
+               return -EINVAL;
+       }
+
+       qed_hw_set_feat(p_hwfn);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+                  "The numbers for each resource are:\n"
+                  "SB = %d start = %d\n"
+                  "L2_QUEUE = %d start = %d\n"
+                  "VPORT = %d start = %d\n"
+                  "PQ = %d start = %d\n"
+                  "RL = %d start = %d\n"
+                  "MAC = %d start = %d\n"
+                  "VLAN = %d start = %d\n"
+                  "ILT = %d start = %d\n",
+                  p_hwfn->hw_info.resc_num[QED_SB],
+                  p_hwfn->hw_info.resc_start[QED_SB],
+                  p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+                  p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+                  p_hwfn->hw_info.resc_num[QED_VPORT],
+                  p_hwfn->hw_info.resc_start[QED_VPORT],
+                  p_hwfn->hw_info.resc_num[QED_PQ],
+                  p_hwfn->hw_info.resc_start[QED_PQ],
+                  p_hwfn->hw_info.resc_num[QED_RL],
+                  p_hwfn->hw_info.resc_start[QED_RL],
+                  p_hwfn->hw_info.resc_num[QED_MAC],
+                  p_hwfn->hw_info.resc_start[QED_MAC],
+                  p_hwfn->hw_info.resc_num[QED_VLAN],
+                  p_hwfn->hw_info.resc_start[QED_VLAN],
+                  p_hwfn->hw_info.resc_num[QED_ILT],
+                  p_hwfn->hw_info.resc_start[QED_ILT]);
+
+       return 0;
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt)
+{
+       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+       u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+       struct qed_mcp_link_params *link;
+
+       /* Read global nvm_cfg address */
+       nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+       /* Verify MCP has initialized it */
+       if (!nvm_cfg_addr) {
+               DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+               return -EINVAL;
+       }
+
+       /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
+       nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, glob) +
+              offsetof(struct nvm_cfg1_glob, core_cfg);
+
+       core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+       switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+               NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+                         core_cfg);
+               break;
+       }
+
+       /* Read default link configuration */
+       link = &p_hwfn->mcp_info->link_input;
+       port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+                       offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+       link_temp = qed_rd(p_hwfn, p_ptt,
+                          port_cfg_addr +
+                          offsetof(struct nvm_cfg1_port, speed_cap_mask));
+       link->speed.advertised_speeds =
+               link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+       p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+                                               link->speed.advertised_speeds;
+
+       link_temp = qed_rd(p_hwfn, p_ptt,
+                          port_cfg_addr +
+                          offsetof(struct nvm_cfg1_port, link_settings));
+       switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+               NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+               link->speed.autoneg = true;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+               link->speed.forced_speed = 1000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+               link->speed.forced_speed = 10000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+               link->speed.forced_speed = 25000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+               link->speed.forced_speed = 40000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+               link->speed.forced_speed = 50000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
+               link->speed.forced_speed = 100000;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+                         link_temp);
+       }
+
+       link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+       link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+       link->pause.autoneg = !!(link_temp &
+                                NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+       link->pause.forced_rx = !!(link_temp &
+                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+       link->pause.forced_tx = !!(link_temp &
+                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+       link->loopback_mode = 0;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+                  link->speed.forced_speed, link->speed.advertised_speeds,
+                  link->speed.autoneg, link->pause.autoneg);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, glob) +
+              offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+       generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+       mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+                 NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+       switch (mf_mode) {
+       case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+               p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+               p_hwfn->cdev->mf_mode = QED_MF_NPAR;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+               p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
+               break;
+       }
+       DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+               p_hwfn->cdev->mf_mode);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+               offsetof(struct nvm_cfg1, glob) +
+               offsetof(struct nvm_cfg1_glob, device_capabilities);
+
+       device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+               __set_bit(QED_DEV_CAP_ETH,
+                         &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+               __set_bit(QED_DEV_CAP_ISCSI,
+                         &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+               __set_bit(QED_DEV_CAP_ROCE,
+                         &p_hwfn->hw_info.device_capabilities);
+
+       return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+       u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+
+       num_funcs = MAX_NUM_PFS_BB;
+
+       /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+        * in the other bits are selected.
+        * Bits 1-15 are for functions 1-15, respectively, and their value is
+        * '0' only for enabled functions (function 0 always exists and
+        * enabled).
+        * In case of CMT, only the "even" functions are enabled, and thus the
+        * number of functions for both hwfns is learnt from the same bits.
+        */
+       reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+       if (reg_function_hide & 0x1) {
+               if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+                       num_funcs = 0;
+                       eng_mask = 0xaaaa;
+               } else {
+                       num_funcs = 1;
+                       eng_mask = 0x5554;
+               }
+
+               /* Get the number of the enabled functions on the engine */
+               tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+               while (tmp) {
+                       if (tmp & 0x1)
+                               num_funcs++;
+                       tmp >>= 0x1;
+               }
+
+               /* Get the PF index within the enabled functions */
+               low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+               tmp = reg_function_hide & eng_mask & low_pfs_mask;
+               while (tmp) {
+                       if (tmp & 0x1)
+                               enabled_func_idx--;
+                       tmp >>= 0x1;
+               }
+       }
+
+       p_hwfn->num_funcs_on_engine = num_funcs;
+       p_hwfn->enabled_func_idx = enabled_func_idx;
+
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_PROBE,
+                  "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+                  p_hwfn->rel_pf_id,
+                  p_hwfn->abs_pf_id,
+                  p_hwfn->num_funcs_on_engine);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               enum qed_pci_personality personality)
+{
+       u32 port_mode;
+       int rc;
+
+       /* Since all information is common, only first hwfns should do this */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_iov_hw_info(p_hwfn);
+               if (rc)
+                       return rc;
+       }
+
+       /* Read the port mode */
+       port_mode = qed_rd(p_hwfn, p_ptt,
+                          CNIG_REG_NW_PORT_MODE_BB_B0);
+
+       if (port_mode < 3) {
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       } else if (port_mode <= 5) {
+               p_hwfn->cdev->num_ports_in_engines = 2;
+       } else {
+               DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+                         p_hwfn->cdev->num_ports_in_engines);
+
+               /* Default num_ports_in_engines to something */
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       }
+
+       qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+       rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+       if (qed_mcp_is_init(p_hwfn))
+               ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+                               p_hwfn->mcp_info->func_info.mac);
+       else
+               eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+       if (qed_mcp_is_init(p_hwfn)) {
+               if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+                       p_hwfn->hw_info.ovlan =
+                               p_hwfn->mcp_info->func_info.ovlan;
+
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+
+       if (qed_mcp_is_init(p_hwfn)) {
+               enum qed_pci_personality protocol;
+
+               protocol = p_hwfn->mcp_info->func_info.protocol;
+               p_hwfn->hw_info.personality = protocol;
+       }
+
+       qed_get_num_funcs(p_hwfn, p_ptt);
+
+       return qed_hw_get_resc(p_hwfn);
+}
+
+static int qed_get_dev_info(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       u32 tmp;
+
+       /* Read Vendor Id / Device Id */
+       pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
+                            &cdev->vendor_id);
+       pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
+                            &cdev->device_id);
+       cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                    MISCS_REG_CHIP_NUM);
+       cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                    MISCS_REG_CHIP_REV);
+       MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+       cdev->type = QED_DEV_TYPE_BB;
+       /* Learn number of HW-functions */
+       tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+                    MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+       if (tmp & (1 << p_hwfn->rel_pf_id)) {
+               DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+               cdev->num_hwfns = 2;
+       } else {
+               cdev->num_hwfns = 1;
+       }
+
+       cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                   MISCS_REG_CHIP_TEST_REG) >> 4;
+       MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+       cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                      MISCS_REG_CHIP_METAL);
+       MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+       DP_INFO(cdev->hwfns,
+               "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               cdev->chip_num, cdev->chip_rev,
+               cdev->chip_bond_id, cdev->chip_metal);
+
+       if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
+               DP_NOTICE(cdev->hwfns,
+                         "The chip type/rev (BB A0) is not supported!\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+                                void __iomem *p_regview,
+                                void __iomem *p_doorbells,
+                                enum qed_pci_personality personality)
+{
+       int rc = 0;
+
+       /* Split PCI bars evenly between hwfns */
+       p_hwfn->regview = p_regview;
+       p_hwfn->doorbells = p_doorbells;
+
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_hw_prepare(p_hwfn);
+
+       /* Validate that chip access is feasible */
+       if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+               DP_ERR(p_hwfn,
+                      "Reading the ME register returns all Fs; Preventing further chip access\n");
+               return -EINVAL;
+       }
+
+       get_function_id(p_hwfn);
+
+       /* Allocate PTT pool */
+       rc = qed_ptt_pool_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+               goto err0;
+       }
+
+       /* Allocate the main PTT */
+       p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+       /* First hwfn learns basic information, e.g., number of hwfns */
+       if (!p_hwfn->my_id) {
+               rc = qed_get_dev_info(p_hwfn->cdev);
+               if (rc != 0)
+                       goto err1;
+       }
+
+       qed_hw_hwfn_prepare(p_hwfn);
+
+       /* Initialize MCP structure */
+       rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+               goto err1;
+       }
+
+       /* Read the device configuration information from the HW and SHMEM */
+       rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+               goto err2;
+       }
+
+       /* Allocate the init RT array and initialize the init-ops engine */
+       rc = qed_init_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+               goto err2;
+       }
+
+       return rc;
+err2:
+       if (IS_LEAD_HWFN(p_hwfn))
+               qed_iov_free_hw_info(p_hwfn->cdev);
+       qed_mcp_free(p_hwfn);
+err1:
+       qed_hw_hwfn_free(p_hwfn);
+err0:
+       return rc;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+                  int personality)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       int rc;
+
+       /* Store the precompiled init data ptrs */
+       if (IS_PF(cdev))
+               qed_init_iro_array(cdev);
+
+       /* Initialize the first hwfn - will learn number of hwfns */
+       rc = qed_hw_prepare_single(p_hwfn,
+                                  cdev->regview,
+                                  cdev->doorbells, personality);
+       if (rc)
+               return rc;
+
+       personality = p_hwfn->hw_info.personality;
+
+       /* Initialize the rest of the hwfns */
+       if (cdev->num_hwfns > 1) {
+               void __iomem *p_regview, *p_doorbell;
+               u8 __iomem *addr;
+
+               /* adjust bar offset for second engine */
+               addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+               p_regview = addr;
+
+               /* adjust doorbell bar offset for second engine */
+               addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+               p_doorbell = addr;
+
+               /* prepare second hw function */
+               rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
+                                          p_doorbell, personality);
+
+               /* in case of error, need to free the previously
+                * initiliazed hwfn 0.
+                */
+               if (rc) {
+                       if (IS_PF(cdev)) {
+                               qed_init_free(p_hwfn);
+                               qed_mcp_free(p_hwfn);
+                               qed_hw_hwfn_free(p_hwfn);
+                       }
+               }
+       }
+
+       return rc;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (IS_VF(cdev)) {
+                       qed_vf_pf_release(p_hwfn);
+                       continue;
+               }
+
+               qed_init_free(p_hwfn);
+               qed_hw_hwfn_free(p_hwfn);
+               qed_mcp_free(p_hwfn);
+       }
+
+       qed_iov_free_hw_info(cdev);
+}
+
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+                                   struct qed_chain *p_chain)
+{
+       void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
+       dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+       struct qed_chain_next *p_next;
+       u32 size, i;
+
+       if (!p_virt)
+               return;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               if (!p_virt)
+                       break;
+
+               p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
+               p_virt_next = p_next->next_virt;
+               p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+               dma_free_coherent(&cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
+
+               p_virt = p_virt_next;
+               p_phys = p_phys_next;
+       }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+                                 struct qed_chain *p_chain)
+{
+       if (!p_chain->p_virt_addr)
+               return;
+
+       dma_free_coherent(&cdev->pdev->dev,
+                         QED_CHAIN_PAGE_SIZE,
+                         p_chain->p_virt_addr, p_chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+       void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+       u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+       u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+
+       if (!pp_virt_addr_tbl)
+               return;
+
+       if (!p_chain->pbl.p_virt_table)
+               goto out;
+
+       for (i = 0; i < page_cnt; i++) {
+               if (!pp_virt_addr_tbl[i])
+                       break;
+
+               dma_free_coherent(&cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE,
+                                 pp_virt_addr_tbl[i],
+                                 *(dma_addr_t *)p_pbl_virt);
+
+               p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+       dma_free_coherent(&cdev->pdev->dev,
+                         pbl_size,
+                         p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+out:
+       vfree(p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+       switch (p_chain->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               qed_chain_free_next_ptr(cdev, p_chain);
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               qed_chain_free_single(cdev, p_chain);
+               break;
+       case QED_CHAIN_MODE_PBL:
+               qed_chain_free_pbl(cdev, p_chain);
+               break;
+       }
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+                            enum qed_chain_cnt_type cnt_type,
+                            size_t elem_size, u32 page_cnt)
+{
+       u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+       /* The actual chain size can be larger than the maximal possible value
+        * after rounding up the requested elements number to pages, and after
+        * taking into acount the unusuable elements (next-ptr elements).
+        * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+        * size/capacity fields are of a u32 type.
+        */
+       if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
+            chain_size > 0x10000) ||
+           (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
+            chain_size > 0x100000000ULL)) {
+               DP_NOTICE(cdev,
+                         "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+                         chain_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+       void *p_virt = NULL, *p_virt_prev = NULL;
+       dma_addr_t p_phys = 0;
+       u32 i;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                           QED_CHAIN_PAGE_SIZE,
+                                           &p_phys, GFP_KERNEL);
+               if (!p_virt) {
+                       DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+                       return -ENOMEM;
+               }
+
+               if (i == 0) {
+                       qed_chain_init_mem(p_chain, p_virt, p_phys);
+                       qed_chain_reset(p_chain);
+               } else {
+                       qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                                    p_virt, p_phys);
+               }
+
+               p_virt_prev = p_virt;
+       }
+       /* Last page's next element should point to the beginning of the
+        * chain.
+        */
+       qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                    p_chain->p_virt_addr,
+                                    p_chain->p_phys_addr);
+
+       return 0;
+}
+
+static int
+qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+       dma_addr_t p_phys = 0;
+       void *p_virt = NULL;
+
+       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                   QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+               return -ENOMEM;
+       }
+
+       qed_chain_init_mem(p_chain, p_virt, p_phys);
+       qed_chain_reset(p_chain);
+
+       return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+       u32 page_cnt = p_chain->page_cnt, size, i;
+       dma_addr_t p_phys = 0, p_pbl_phys = 0;
+       void **pp_virt_addr_tbl = NULL;
+       u8 *p_pbl_virt = NULL;
+       void *p_virt = NULL;
+
+       size = page_cnt * sizeof(*pp_virt_addr_tbl);
+       pp_virt_addr_tbl = vmalloc(size);
+       if (!pp_virt_addr_tbl) {
+               DP_NOTICE(cdev,
+                         "Failed to allocate memory for the chain virtual addresses table\n");
+               return -ENOMEM;
+       }
+       memset(pp_virt_addr_tbl, 0, size);
+
+       /* The allocation of the PBL table is done with its full size, since it
+        * is expected to be successive.
+        * qed_chain_init_pbl_mem() is called even in a case of an allocation
+        * failure, since pp_virt_addr_tbl was previously allocated, and it
+        * should be saved to allow its freeing during the error flow.
+        */
+       size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+       p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                       size, &p_pbl_phys, GFP_KERNEL);
+       qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+                              pp_virt_addr_tbl);
+       if (!p_pbl_virt) {
+               DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < page_cnt; i++) {
+               p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                           QED_CHAIN_PAGE_SIZE,
+                                           &p_phys, GFP_KERNEL);
+               if (!p_virt) {
+                       DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+                       return -ENOMEM;
+               }
+
+               if (i == 0) {
+                       qed_chain_init_mem(p_chain, p_virt, p_phys);
+                       qed_chain_reset(p_chain);
+               }
+
+               /* Fill the PBL table with the physical address of the page */
+               *(dma_addr_t *)p_pbl_virt = p_phys;
+               /* Keep the virtual address of the page */
+               p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+               p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       return 0;
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+                   enum qed_chain_use_mode intended_use,
+                   enum qed_chain_mode mode,
+                   enum qed_chain_cnt_type cnt_type,
+                   u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
+{
+       u32 page_cnt;
+       int rc = 0;
+
+       if (mode == QED_CHAIN_MODE_SINGLE)
+               page_cnt = 1;
+       else
+               page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+       rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+       if (rc) {
+               DP_NOTICE(cdev,
+                         "Cannot allocate a chain with the given arguments:\n"
+                         "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+                         intended_use, mode, cnt_type, num_elems, elem_size);
+               return rc;
+       }
+
+       qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
+                             mode, cnt_type);
+
+       switch (mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               rc = qed_chain_alloc_next_ptr(cdev, p_chain);
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               rc = qed_chain_alloc_single(cdev, p_chain);
+               break;
+       case QED_CHAIN_MODE_PBL:
+               rc = qed_chain_alloc_pbl(cdev, p_chain);
+               break;
+       }
+       if (rc)
+               goto nomem;
+
+       return 0;
+
+nomem:
+       qed_chain_free(cdev, p_chain);
+       return rc;
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+               u16 min, max;
+
+               min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
+               max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+               DP_NOTICE(p_hwfn,
+                         "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+       return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+                u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, QED_VPORT);
+               max = min + RESC_NUM(p_hwfn, QED_VPORT);
+               DP_NOTICE(p_hwfn,
+                         "vport id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+       return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+                  u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+               max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+               DP_NOTICE(p_hwfn,
+                         "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+       return 0;
+}
+
+static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                           u32 hw_addr, void *p_eth_qzone,
+                           size_t eth_qzone_size, u8 timeset)
+{
+       struct coalescing_timeset *p_coal_timeset;
+
+       if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
+               DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
+               return -EINVAL;
+       }
+
+       p_coal_timeset = p_eth_qzone;
+       memset(p_coal_timeset, 0, eth_qzone_size);
+       SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+       SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+       qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+       return 0;
+}
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        u16 coalesce, u8 qid, u16 sb_id)
+{
+       struct ustorm_eth_queue_zone eth_qzone;
+       u8 timeset, timer_res;
+       u16 fw_qid = 0;
+       u32 address;
+       int rc;
+
+       /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+       if (coalesce <= 0x7F) {
+               timer_res = 0;
+       } else if (coalesce <= 0xFF) {
+               timer_res = 1;
+       } else if (coalesce <= 0x1FF) {
+               timer_res = 2;
+       } else {
+               DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+               return -EINVAL;
+       }
+       timeset = (u8)(coalesce >> timer_res);
+
+       rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+       if (rc)
+               return rc;
+
+       rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+       if (rc)
+               goto out;
+
+       address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+       rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+                             sizeof(struct ustorm_eth_queue_zone), timeset);
+       if (rc)
+               goto out;
+
+       p_hwfn->cdev->rx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        u16 coalesce, u8 qid, u16 sb_id)
+{
+       struct xstorm_eth_queue_zone eth_qzone;
+       u8 timeset, timer_res;
+       u16 fw_qid = 0;
+       u32 address;
+       int rc;
+
+       /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+       if (coalesce <= 0x7F) {
+               timer_res = 0;
+       } else if (coalesce <= 0xFF) {
+               timer_res = 1;
+       } else if (coalesce <= 0x1FF) {
+               timer_res = 2;
+       } else {
+               DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+               return -EINVAL;
+       }
+       timeset = (u8)(coalesce >> timer_res);
+
+       rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+       if (rc)
+               return rc;
+
+       rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+       if (rc)
+               goto out;
+
+       address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+       rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+                             sizeof(struct xstorm_eth_queue_zone), timeset);
+       if (rc)
+               goto out;
+
+       p_hwfn->cdev->tx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                            struct qed_ptt *p_ptt,
+                                            u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+               vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+                                               min_pf_rate;
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+                                      u32 min_pf_rate)
+
+{
+       int i;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+               p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                          struct qed_ptt *p_ptt,
+                                          u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ *    rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+                             u16 vport_id, u32 req_rate,
+                             u32 min_pf_rate)
+{
+       u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+       int non_requested_count = 0, req_count = 0, i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Accounting for the vports which are configured for WFQ explicitly */
+       for (i = 0; i < num_vports; i++) {
+               u32 tmp_speed;
+
+               if ((i != vport_id) &&
+                   p_hwfn->qm_info.wfq_data[i].configured) {
+                       req_count++;
+                       tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+                       total_req_min_rate += tmp_speed;
+               }
+       }
+
+       /* Include current vport data as well */
+       req_count++;
+       total_req_min_rate += req_rate;
+       non_requested_count = num_vports - req_count;
+
+       if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       if (num_vports > QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Number of vports is greater than %d\n",
+                          QED_WFQ_UNIT);
+               return -EINVAL;
+       }
+
+       if (total_req_min_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+                          total_req_min_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       total_left_rate = min_pf_rate - total_req_min_rate;
+
+       left_rate_per_vp = total_left_rate / non_requested_count;
+       if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          left_rate_per_vp, min_pf_rate);
+               return -EINVAL;
+       }
+
+       p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+       p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+       }
+
+       return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+       struct qed_mcp_link_state *p_link;
+       int rc = 0;
+
+       p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+       if (!p_link->min_pf_rate) {
+               p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+               p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+               return rc;
+       }
+
+       rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+       if (rc == 0)
+               qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+                                                p_link->min_pf_rate);
+       else
+               DP_NOTICE(p_hwfn,
+                         "Validation failed while configuring min rate\n");
+
+       return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+                                                struct qed_ptt *p_ptt,
+                                                u32 min_pf_rate)
+{
+       bool use_wfq = false;
+       int rc = 0;
+       u16 i;
+
+       /* Validate all pre configured vports for wfq */
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 rate;
+
+               if (!p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+               use_wfq = true;
+
+               rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "WFQ validation failed while configuring min rate\n");
+                       break;
+               }
+       }
+
+       if (!rc && use_wfq)
+               qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+       else
+               qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+       return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+       int i, rc = -EINVAL;
+
+       /* Currently not supported; Might change in future */
+       if (cdev->num_hwfns > 1) {
+               DP_NOTICE(cdev,
+                         "WFQ configuration is not supported for this device\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_ptt *p_ptt;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+               if (rc) {
+                       qed_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+       int i;
+
+       if (cdev->num_hwfns > 1) {
+               DP_VERBOSE(cdev,
+                          NETIF_MSG_LINK,
+                          "WFQ configuration is not supported for this device\n");
+               return;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                     p_hwfn->p_dpc_ptt,
+                                                     min_pf_rate);
+       }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 max_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+       if (!p_link->line_speed && (max_bw != 100))
+               return rc;
+
+       p_link->speed = (p_link->line_speed * max_bw) / 100;
+       p_hwfn->qm_info.pf_rl = p_link->speed;
+
+       /* Since the limiter also affects Tx-switched traffic, we don't want it
+        * to limit such traffic in case there's no actual limit.
+        * In that case, set limit to imaginary high boundary.
+        */
+       if (max_bw == 100)
+               p_hwfn->qm_info.pf_rl = 100000;
+
+       rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                           p_hwfn->qm_info.pf_rl);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MAX bandwidth to be %08x Mb/sec\n",
+                  p_link->speed);
+
+       return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (max_bw < 1 || max_bw > 100) {
+               DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, max_bw);
+
+               qed_ptt_release(p_hwfn, p_ptt);
+
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 min_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+       p_hwfn->qm_info.pf_wfq = min_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+       rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MIN bandwidth to be %d Mb/sec\n",
+                  p_link->min_pf_rate);
+
+       return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (min_bw < 1 || min_bw > 100) {
+               DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, min_bw);
+               if (rc) {
+                       qed_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               if (p_link->min_pf_rate) {
+                       u32 min_rate = p_link->min_pf_rate;
+
+                       rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                                  p_ptt,
+                                                                  min_rate);
+               }
+
+               qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_link_state *p_link;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+
+       if (p_link->min_pf_rate)
+               qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+                                              p_link->min_pf_rate);
+
+       memset(p_hwfn->qm_info.wfq_data, 0,
+              sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644 (file)
index 0000000..343bb03
--- /dev/null
@@ -0,0 +1,358 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+                u32 dp_module,
+                u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ *        its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param p_tunn
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ *                     Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+               struct qed_tunn_start_params *p_tunn,
+               bool b_hw_start,
+               enum qed_int_mode int_mode,
+               bool allow_npar_tx_switch,
+               const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_timers_stop_all - stop the timers HW block
+ *
+ * @param cdev
+ *
+ * @return void
+ */
+void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ *             slowpath is still required for the device,
+ *             but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ *             only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+                  int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+       QED_DMAE_ADDRESS_HOST_VIRT,
+       QED_DMAE_ADDRESS_HOST_PHYS,
+       QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC      0x00000001
+#define QED_DMAE_FLAG_VF_SRC           0x00000002
+#define QED_DMAE_FLAG_VF_DST           0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST   0x00000008
+
+struct qed_dmae_params {
+       u32 flags; /* consists of QED_DMAE_FLAG_* values */
+       u8 src_vfid;
+       u8 dst_vfid;
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt,
+                 u64 source_addr,
+                 u32 grc_addr,
+                 u32 size_in_dwords,
+                 u32 flags);
+
+ /**
+ * @brief qed_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                     u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+                     u32 flags);
+
+/**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      dma_addr_t source_addr,
+                      dma_addr_t dest_addr,
+                      u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+               enum qed_chain_use_mode intended_use,
+               enum qed_chain_mode mode,
+               enum qed_chain_cnt_type cnt_type,
+               u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+                   u16 src_id,
+                   u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+                u8 src_id,
+                u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+                  u8 src_id,
+                  u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt, u16 id, bool is_vf);
+
+/**
+ * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ * The fact that we can configure coalescing to up to 511, but on varying
+ * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
+ * for the highest values.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        u16 coalesce, u8 qid, u16 sb_id);
+
+/**
+ * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ * While the API allows setting coalescing per-qid, all tx queues sharing a
+ * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        u16 coalesce, u8 qid, u16 sb_id);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644 (file)
index 0000000..6f9d3b8
--- /dev/null
@@ -0,0 +1,7524 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/iscsi_common.h>
+#include <linux/qed/rdma_common.h>
+#include <linux/qed/roce_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+       COMMON_EVENT_PF_START,
+       COMMON_EVENT_PF_STOP,
+       COMMON_EVENT_VF_START,
+       COMMON_EVENT_VF_STOP,
+       COMMON_EVENT_VF_PF_CHANNEL,
+       COMMON_EVENT_VF_FLR,
+       COMMON_EVENT_PF_UPDATE,
+       COMMON_EVENT_MALICIOUS_VF,
+       COMMON_EVENT_RL_UPDATE,
+       COMMON_EVENT_EMPTY,
+       MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+       COMMON_RAMROD_UNUSED,
+       COMMON_RAMROD_PF_START,
+       COMMON_RAMROD_PF_STOP,
+       COMMON_RAMROD_VF_START,
+       COMMON_RAMROD_VF_STOP,
+       COMMON_RAMROD_PF_UPDATE,
+       COMMON_RAMROD_RL_UPDATE,
+       COMMON_RAMROD_EMPTY,
+       MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+       __le32 spq_base_lo;
+       __le32 spq_base_hi;
+       struct regpair consolid_base_addr;
+       __le16 spq_cons;
+       __le16 consolid_cons;
+       __le32 reserved0[55];
+};
+
+struct xstorm_core_conn_ag_ctx {
+       u8 reserved0;
+       u8 core_state;
+       u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT                1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT                5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT                6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT                7
+       u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT                0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT                1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT            3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT            5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK    0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT   6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT     7
+       u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT      6
+       u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT      0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT      4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT      6
+       u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT      0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK       0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT     4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT     6
+       u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT     0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT     2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT     4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT     6
+       u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK              0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT             2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK             0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT     6
+       u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK          0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT         0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK                0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT       2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK         0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT            6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT            7
+       u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT    1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT    2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT    3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT    4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT    5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT    6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT    7
+       u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT                   0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT                   1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT                   2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT                   3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT                   4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT                   5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT      6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK                    0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT                   7
+       u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT         0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK   0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT  1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT       3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT           5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT       6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT       7
+       u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT       0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT       1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK    0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT   2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT          3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT          4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT          5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT          7
+       u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT         0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT         1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT         4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT         5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT         6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT         7
+       u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT         0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT         1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+       u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT    1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT    2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT    3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT    4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK     0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT    5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK      0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT     6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 consolid_prod;
+       __le16 reserved16;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_or_spq_prod;
+       __le16 word5;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le16 word7;
+       __le16 word8;
+       __le16 word9;
+       __le16 word10;
+       __le32 reg7;
+       __le32 reg8;
+       __le32 reg9;
+       u8 byte7;
+       u8 byte8;
+       u8 byte9;
+       u8 byte10;
+       u8 byte11;
+       u8 byte12;
+       u8 byte13;
+       u8 byte14;
+       u8 byte15;
+       u8 byte16;
+       __le16 word11;
+       __le32 reg10;
+       __le32 reg11;
+       __le32 reg12;
+       __le32 reg13;
+       __le32 reg14;
+       __le32 reg15;
+       __le32 reg16;
+       __le32 reg17;
+       __le32 reg18;
+       __le32 reg19;
+       __le16 word12;
+       __le16 word13;
+       __le16 word14;
+       __le16 word15;
+};
+
+struct tstorm_core_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT     3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK      0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT     5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      6
+       u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT      4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT      6
+       u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT      0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT      2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT      4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT      6
+       u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK       0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT      0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT    7
+       u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT    0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT    1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT    3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT   6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  7
+       u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT  5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT  7
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       u8 byte4;
+       u8 byte5;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le32 reg9;
+       __le32 reg10;
+};
+
+struct ustorm_core_conn_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
+       u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT      0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT      2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT      4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK       0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT      6
+       u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT    3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT    4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT    5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT    6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  7
+       u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT  5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK   0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT  7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 rx_producers;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+       __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+       struct ystorm_core_conn_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct pstorm_core_conn_st_ctx pstorm_st_context;
+       struct regpair pstorm_st_padding[2];
+       struct xstorm_core_conn_st_ctx xstorm_st_context;
+       struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+       struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+       struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+       struct mstorm_core_conn_st_ctx mstorm_st_context;
+       struct ustorm_core_conn_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+};
+
+struct eth_mstorm_per_pf_stat {
+       struct regpair gre_discard_pkts;
+       struct regpair vxlan_discard_pkts;
+       struct regpair geneve_discard_pkts;
+       struct regpair lb_discard_pkts;
+};
+
+struct eth_mstorm_per_queue_stat {
+       struct regpair ttl0_discard;
+       struct regpair packet_too_big_discard;
+       struct regpair no_buff_discard;
+       struct regpair not_active_discard;
+       struct regpair tpa_coalesced_pkts;
+       struct regpair tpa_coalesced_events;
+       struct regpair tpa_aborts_num;
+       struct regpair tpa_coalesced_bytes;
+};
+
+/* Ethernet TX Per PF */
+struct eth_pstorm_per_pf_stat {
+       struct regpair sent_lb_ucast_bytes;
+       struct regpair sent_lb_mcast_bytes;
+       struct regpair sent_lb_bcast_bytes;
+       struct regpair sent_lb_ucast_pkts;
+       struct regpair sent_lb_mcast_pkts;
+       struct regpair sent_lb_bcast_pkts;
+       struct regpair sent_gre_bytes;
+       struct regpair sent_vxlan_bytes;
+       struct regpair sent_geneve_bytes;
+       struct regpair sent_gre_pkts;
+       struct regpair sent_vxlan_pkts;
+       struct regpair sent_geneve_pkts;
+       struct regpair gre_drop_pkts;
+       struct regpair vxlan_drop_pkts;
+       struct regpair geneve_drop_pkts;
+};
+
+/* Ethernet TX Per Queue Stats */
+struct eth_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes;
+       struct regpair sent_mcast_bytes;
+       struct regpair sent_bcast_bytes;
+       struct regpair sent_ucast_pkts;
+       struct regpair sent_mcast_pkts;
+       struct regpair sent_bcast_pkts;
+       struct regpair error_drop_pkts;
+};
+
+/* ETH Rx producers data */
+struct eth_rx_rate_limit {
+       __le16 mult;
+       __le16 cnst;
+       u8 add_sub_cnst;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+struct eth_ustorm_per_pf_stat {
+       struct regpair rcv_lb_ucast_bytes;
+       struct regpair rcv_lb_mcast_bytes;
+       struct regpair rcv_lb_bcast_bytes;
+       struct regpair rcv_lb_ucast_pkts;
+       struct regpair rcv_lb_mcast_pkts;
+       struct regpair rcv_lb_bcast_pkts;
+       struct regpair rcv_gre_bytes;
+       struct regpair rcv_vxlan_bytes;
+       struct regpair rcv_geneve_bytes;
+       struct regpair rcv_gre_pkts;
+       struct regpair rcv_vxlan_pkts;
+       struct regpair rcv_geneve_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+       struct regpair addr;
+       __le32 reserved[2];
+};
+
+/* Event Ring Element */
+union event_ring_element {
+       struct event_ring_entry entry;
+       struct event_ring_next_addr next_addr;
+};
+
+/* Major and Minor hsi Versions */
+struct hsi_fp_ver_struct {
+       u8 minor_ver_arr[2];
+       u8 major_ver_arr[2];
+};
+
+/* Mstorm non-triggering VF zone */
+struct mstorm_non_trigger_vf_zone {
+       struct eth_mstorm_per_queue_stat eth_queue_stat;
+       struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+};
+
+/* Mstorm VF zone */
+struct mstorm_vf_zone {
+       struct mstorm_non_trigger_vf_zone non_trigger;
+
+};
+
+/* personality per PF */
+enum personality_type {
+       BAD_PERSONALITY_TYP,
+       PERSONALITY_ISCSI,
+       PERSONALITY_RESERVED2,
+       PERSONALITY_RDMA_AND_ETH,
+       PERSONALITY_RESERVED3,
+       PERSONALITY_CORE,
+       PERSONALITY_ETH,
+       PERSONALITY_RESERVED4,
+       MAX_PERSONALITY_TYPE
+};
+
+/* tunnel configuration */
+struct pf_start_tunnel_config {
+       u8 set_vxlan_udp_port_flg;
+       u8 set_geneve_udp_port_flg;
+       u8 tx_enable_vxlan;
+       u8 tx_enable_l2geneve;
+       u8 tx_enable_ipgeneve;
+       u8 tx_enable_l2gre;
+       u8 tx_enable_ipgre;
+       u8 tunnel_clss_vxlan;
+       u8 tunnel_clss_l2geneve;
+       u8 tunnel_clss_ipgeneve;
+       u8 tunnel_clss_l2gre;
+       u8 tunnel_clss_ipgre;
+       __le16 vxlan_udp_port;
+       __le16 geneve_udp_port;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+       struct regpair event_ring_pbl_addr;
+       struct regpair consolid_q_pbl_addr;
+       struct pf_start_tunnel_config tunnel_config;
+       __le16 event_ring_sb_id;
+       u8 base_vf_id;
+       u8 num_vfs;
+       u8 event_ring_num_pages;
+       u8 event_ring_sb_index;
+       u8 path_id;
+       u8 warning_as_error;
+       u8 dont_log_ramrods;
+       u8 personality;
+       __le16 log_type_mask;
+       u8 mf_mode;
+       u8 integ_phase;
+       u8 allow_npar_tx_switching;
+       u8 inner_to_outer_pri_map[8];
+       u8 pri_map_valid;
+       __le32 outer_tag;
+       struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
+struct protocol_dcb_data {
+       u8 dcb_enable_flag;
+       u8 dcb_priority;
+       u8 dcb_tc;
+       u8 reserved;
+};
+
+struct pf_update_tunnel_config {
+       u8 update_rx_pf_clss;
+       u8 update_tx_pf_clss;
+       u8 set_vxlan_udp_port_flg;
+       u8 set_geneve_udp_port_flg;
+       u8 tx_enable_vxlan;
+       u8 tx_enable_l2geneve;
+       u8 tx_enable_ipgeneve;
+       u8 tx_enable_l2gre;
+       u8 tx_enable_ipgre;
+       u8 tunnel_clss_vxlan;
+       u8 tunnel_clss_l2geneve;
+       u8 tunnel_clss_ipgeneve;
+       u8 tunnel_clss_l2gre;
+       u8 tunnel_clss_ipgre;
+       __le16 vxlan_udp_port;
+       __le16 geneve_udp_port;
+       __le16 reserved[3];
+};
+
+struct pf_update_ramrod_data {
+       u8 pf_id;
+       u8 update_eth_dcb_data_flag;
+       u8 update_fcoe_dcb_data_flag;
+       u8 update_iscsi_dcb_data_flag;
+       u8 update_roce_dcb_data_flag;
+       u8 update_iwarp_dcb_data_flag;
+       u8 update_mf_vlan_flag;
+       u8 reserved;
+       struct protocol_dcb_data eth_dcb_data;
+       struct protocol_dcb_data fcoe_dcb_data;
+       struct protocol_dcb_data iscsi_dcb_data;
+       struct protocol_dcb_data roce_dcb_data;
+       struct protocol_dcb_data iwarp_dcb_data;
+       __le16 mf_vlan;
+       __le16 reserved2;
+       struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Ports mode */
+enum ports_mode {
+       ENGX2_PORTX1,
+       ENGX2_PORTX2,
+       ENGX1_PORTX1,
+       ENGX1_PORTX2,
+       ENGX1_PORTX4,
+       MAX_PORTS_MODE
+};
+
+/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
+enum protocol_version_array_key {
+       ETH_VER_KEY = 0,
+       ROCE_VER_KEY,
+       MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+/* Pstorm non-triggering VF zone */
+struct pstorm_non_trigger_vf_zone {
+       struct eth_pstorm_per_queue_stat eth_queue_stat;
+       struct regpair reserved[2];
+};
+
+/* Pstorm VF zone */
+struct pstorm_vf_zone {
+       struct pstorm_non_trigger_vf_zone non_trigger;
+       struct regpair reserved[7];
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+       __le32 cid;
+       u8 cmd_id;
+       u8 protocol_id;
+       __le16 echo;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+       struct ramrod_header hdr;
+       struct regpair data_ptr;
+};
+
+/* Tstorm non-triggering VF zone */
+struct tstorm_non_trigger_vf_zone {
+       struct regpair reserved[2];
+};
+
+struct tstorm_per_port_stat {
+       struct regpair trunc_error_discard;
+       struct regpair mac_error_discard;
+       struct regpair mftag_filter_discard;
+       struct regpair eth_mac_filter_discard;
+       struct regpair reserved[5];
+       struct regpair eth_irregular_pkt;
+       struct regpair reserved1[2];
+       struct regpair eth_gre_tunn_filter_discard;
+       struct regpair eth_vxlan_tunn_filter_discard;
+       struct regpair eth_geneve_tunn_filter_discard;
+};
+
+/* Tstorm VF zone */
+struct tstorm_vf_zone {
+       struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+       TUNNEL_CLSS_MAC_VLAN = 0,
+       TUNNEL_CLSS_MAC_VNI,
+       TUNNEL_CLSS_INNER_MAC_VLAN,
+       TUNNEL_CLSS_INNER_MAC_VNI,
+       TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+       MAX_TUNNEL_CLSS
+};
+
+/* Ustorm non-triggering VF zone */
+struct ustorm_non_trigger_vf_zone {
+       struct eth_ustorm_per_queue_stat eth_queue_stat;
+       struct regpair vf_pf_msg_addr;
+};
+
+/* Ustorm triggering VF zone */
+struct ustorm_trigger_vf_zone {
+       u8 vf_pf_msg_valid;
+       u8 reserved[7];
+};
+
+/* Ustorm VF zone */
+struct ustorm_vf_zone {
+       struct ustorm_non_trigger_vf_zone non_trigger;
+       struct ustorm_trigger_vf_zone trigger;
+};
+
+/* VF-PF channel data */
+struct vf_pf_channel_data {
+       __le32 ready;
+       u8 valid;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+/* Ramrod data for VF start ramrod */
+struct vf_start_ramrod_data {
+       u8 vf_id;
+       u8 enable_flr_ack;
+       __le16 opaque_fid;
+       u8 personality;
+       u8 reserved[7];
+       struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
+/* Ramrod data for VF start ramrod */
+struct vf_stop_ramrod_data {
+       u8 vf_id;
+       u8 reserved0;
+       __le16 reserved1;
+       __le32 reserved2;
+};
+
+/* Attentions status block */
+struct atten_status_block {
+       __le32 atten_bits;
+       __le32 atten_ack;
+       __le16 reserved0;
+       __le16 sb_index;
+       __le32 reserved1;
+};
+
+enum command_type_bit {
+       IGU_COMMAND_TYPE_NOP = 0,
+       IGU_COMMAND_TYPE_SET = 1,
+       MAX_COMMAND_TYPE_BIT
+};
+
+/* DMAE command */
+struct dmae_cmd {
+       __le32 opcode;
+#define DMAE_CMD_SRC_MASK              0x1
+#define DMAE_CMD_SRC_SHIFT             0
+#define DMAE_CMD_DST_MASK              0x3
+#define DMAE_CMD_DST_SHIFT             1
+#define DMAE_CMD_C_DST_MASK            0x1
+#define DMAE_CMD_C_DST_SHIFT           3
+#define DMAE_CMD_CRC_RESET_MASK                0x1
+#define DMAE_CMD_CRC_RESET_SHIFT       4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
+#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
+#define DMAE_CMD_COMP_FUNC_MASK                0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT       7
+#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
+#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK                0x1
+#define DMAE_CMD_RESERVED1_SHIFT       13
+#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
+#define DMAE_CMD_ERR_HANDLING_MASK     0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT    16
+#define DMAE_CMD_PORT_ID_MASK          0x3
+#define DMAE_CMD_PORT_ID_SHIFT         18
+#define DMAE_CMD_SRC_PF_ID_MASK                0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT       20
+#define DMAE_CMD_DST_PF_ID_MASK                0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT       24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK                0x3
+#define DMAE_CMD_RESERVED2_SHIFT       30
+       __le32 src_addr_lo;
+       __le32 src_addr_hi;
+       __le32 dst_addr_lo;
+       __le32 dst_addr_hi;
+       __le16 length_dw;
+       __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK                0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT       0
+#define DMAE_CMD_DST_VF_ID_MASK                0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT       8
+       __le32 comp_addr_lo;
+       __le32 comp_addr_hi;
+       __le32 comp_val;
+       __le32 crc32;
+       __le32 crc_32_c;
+       __le16 crc16;
+       __le16 crc16_c;
+       __le16 crc10;
+       __le16 reserved;
+       __le16 xsum16;
+       __le16 xsum8;
+};
+
+enum dmae_cmd_comp_crc_en_enum {
+       dmae_cmd_comp_crc_disabled,
+       dmae_cmd_comp_crc_enabled,
+       MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+enum dmae_cmd_comp_func_enum {
+       dmae_cmd_comp_func_to_src,
+       dmae_cmd_comp_func_to_dst,
+       MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+enum dmae_cmd_comp_word_en_enum {
+       dmae_cmd_comp_word_disabled,
+       dmae_cmd_comp_word_enabled,
+       MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+enum dmae_cmd_c_dst_enum {
+       dmae_cmd_c_dst_pcie,
+       dmae_cmd_c_dst_grc,
+       MAX_DMAE_CMD_C_DST_ENUM
+};
+
+enum dmae_cmd_dst_enum {
+       dmae_cmd_dst_none_0,
+       dmae_cmd_dst_pcie,
+       dmae_cmd_dst_grc,
+       dmae_cmd_dst_none_3,
+       MAX_DMAE_CMD_DST_ENUM
+};
+
+enum dmae_cmd_error_handling_enum {
+       dmae_cmd_error_handling_send_regular_comp,
+       dmae_cmd_error_handling_send_comp_with_err,
+       dmae_cmd_error_handling_dont_send_comp,
+       MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+enum dmae_cmd_src_enum {
+       dmae_cmd_src_pcie,
+       dmae_cmd_src_grc,
+       MAX_DMAE_CMD_SRC_ENUM
+};
+
+/* IGU cleanup command */
+struct igu_cleanup {
+       __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT    0
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+       __le32 reserved1;
+};
+
+/* IGU firmware driver command */
+union igu_command {
+       struct igu_prod_cons_update prod_cons_update;
+       struct igu_cleanup cleanup;
+};
+
+/* IGU firmware driver command */
+struct igu_command_reg_ctrl {
+       __le16 opaque_fid;
+       __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT        0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK     0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT    12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT        15
+};
+
+/* IGU mapping line structure */
+struct igu_mapping_line {
+       __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK            0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT           0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT                17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK                0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
+#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT                24
+};
+
+/* IGU MSIX line structure */
+struct igu_msix_vector {
+       struct regpair address;
+       __le32 data;
+       __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK          0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT         0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK         0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT                1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK      0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT     16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK         0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT                24
+};
+
+struct mstorm_core_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
+       u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+       u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK         0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT                0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK          0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT         1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK                        0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT               2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK                        0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT               3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK      0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT     4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK       0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT      5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                    0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT                   6
+};
+
+enum pxp_tph_st_hint {
+       TPH_ST_HINT_BIDIR,
+       TPH_ST_HINT_REQUESTER,
+       TPH_ST_HINT_TARGET,
+       TPH_ST_HINT_TARGET_PRIO,
+       MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+       u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK         0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT                0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK       0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT      1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK           0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT          2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK           0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT          3
+#define QM_RF_BYPASS_MASK_PFRL_MASK            0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT           4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK         0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT                5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK         0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT                6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK       0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT      7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+       __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK          0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT         0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK          0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT         1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK            0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT           2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK            0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT           3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK             0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT            4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK          0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT         5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK          0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT         6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK                0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT       7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT      8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK                0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT       9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+       __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK             0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT            0
+#define QM_RF_PQ_MAP_RL_ID_MASK                        0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT               1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK             0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT            9
+#define QM_RF_PQ_MAP_VOQ_MASK                  0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT                 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK     0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT    23
+#define QM_RF_PQ_MAP_RL_VALID_MASK             0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT            25
+#define QM_RF_PQ_MAP_RESERVED_MASK             0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT            26
+};
+
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+       __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK     0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT    0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT        6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK    0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT   7
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+       __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK     0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT    0
+#define SDM_OP_GEN_COMP_TYPE_MASK      0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT     16
+#define SDM_OP_GEN_RESERVED_MASK       0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT      20
+};
+
+struct ystorm_core_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
+       u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_addr {
+       GRCBASE_GRC = 0x50000,
+       GRCBASE_MISCS = 0x9000,
+       GRCBASE_MISC = 0x8000,
+       GRCBASE_DBU = 0xa000,
+       GRCBASE_PGLUE_B = 0x2a8000,
+       GRCBASE_CNIG = 0x218000,
+       GRCBASE_CPMU = 0x30000,
+       GRCBASE_NCSI = 0x40000,
+       GRCBASE_OPTE = 0x53000,
+       GRCBASE_BMB = 0x540000,
+       GRCBASE_PCIE = 0x54000,
+       GRCBASE_MCP = 0xe00000,
+       GRCBASE_MCP2 = 0x52000,
+       GRCBASE_PSWHST = 0x2a0000,
+       GRCBASE_PSWHST2 = 0x29e000,
+       GRCBASE_PSWRD = 0x29c000,
+       GRCBASE_PSWRD2 = 0x29d000,
+       GRCBASE_PSWWR = 0x29a000,
+       GRCBASE_PSWWR2 = 0x29b000,
+       GRCBASE_PSWRQ = 0x280000,
+       GRCBASE_PSWRQ2 = 0x240000,
+       GRCBASE_PGLCS = 0x0,
+       GRCBASE_DMAE = 0xc000,
+       GRCBASE_PTU = 0x560000,
+       GRCBASE_TCM = 0x1180000,
+       GRCBASE_MCM = 0x1200000,
+       GRCBASE_UCM = 0x1280000,
+       GRCBASE_XCM = 0x1000000,
+       GRCBASE_YCM = 0x1080000,
+       GRCBASE_PCM = 0x1100000,
+       GRCBASE_QM = 0x2f0000,
+       GRCBASE_TM = 0x2c0000,
+       GRCBASE_DORQ = 0x100000,
+       GRCBASE_BRB = 0x340000,
+       GRCBASE_SRC = 0x238000,
+       GRCBASE_PRS = 0x1f0000,
+       GRCBASE_TSDM = 0xfb0000,
+       GRCBASE_MSDM = 0xfc0000,
+       GRCBASE_USDM = 0xfd0000,
+       GRCBASE_XSDM = 0xf80000,
+       GRCBASE_YSDM = 0xf90000,
+       GRCBASE_PSDM = 0xfa0000,
+       GRCBASE_TSEM = 0x1700000,
+       GRCBASE_MSEM = 0x1800000,
+       GRCBASE_USEM = 0x1900000,
+       GRCBASE_XSEM = 0x1400000,
+       GRCBASE_YSEM = 0x1500000,
+       GRCBASE_PSEM = 0x1600000,
+       GRCBASE_RSS = 0x238800,
+       GRCBASE_TMLD = 0x4d0000,
+       GRCBASE_MULD = 0x4e0000,
+       GRCBASE_YULD = 0x4c8000,
+       GRCBASE_XYLD = 0x4c0000,
+       GRCBASE_PRM = 0x230000,
+       GRCBASE_PBF_PB1 = 0xda0000,
+       GRCBASE_PBF_PB2 = 0xda4000,
+       GRCBASE_RPB = 0x23c000,
+       GRCBASE_BTB = 0xdb0000,
+       GRCBASE_PBF = 0xd80000,
+       GRCBASE_RDIF = 0x300000,
+       GRCBASE_TDIF = 0x310000,
+       GRCBASE_CDU = 0x580000,
+       GRCBASE_CCFC = 0x2e0000,
+       GRCBASE_TCFC = 0x2d0000,
+       GRCBASE_IGU = 0x180000,
+       GRCBASE_CAU = 0x1c0000,
+       GRCBASE_UMAC = 0x51000,
+       GRCBASE_XMAC = 0x210000,
+       GRCBASE_DBG = 0x10000,
+       GRCBASE_NIG = 0x500000,
+       GRCBASE_WOL = 0x600000,
+       GRCBASE_BMBN = 0x610000,
+       GRCBASE_IPC = 0x20000,
+       GRCBASE_NWM = 0x800000,
+       GRCBASE_NWS = 0x700000,
+       GRCBASE_MS = 0x6a0000,
+       GRCBASE_PHY_PCIE = 0x620000,
+       GRCBASE_LED = 0x6b8000,
+       GRCBASE_MISC_AEU = 0x8000,
+       GRCBASE_BAR0_MAP = 0x1c00000,
+       MAX_BLOCK_ADDR
+};
+
+enum block_id {
+       BLOCK_GRC,
+       BLOCK_MISCS,
+       BLOCK_MISC,
+       BLOCK_DBU,
+       BLOCK_PGLUE_B,
+       BLOCK_CNIG,
+       BLOCK_CPMU,
+       BLOCK_NCSI,
+       BLOCK_OPTE,
+       BLOCK_BMB,
+       BLOCK_PCIE,
+       BLOCK_MCP,
+       BLOCK_MCP2,
+       BLOCK_PSWHST,
+       BLOCK_PSWHST2,
+       BLOCK_PSWRD,
+       BLOCK_PSWRD2,
+       BLOCK_PSWWR,
+       BLOCK_PSWWR2,
+       BLOCK_PSWRQ,
+       BLOCK_PSWRQ2,
+       BLOCK_PGLCS,
+       BLOCK_DMAE,
+       BLOCK_PTU,
+       BLOCK_TCM,
+       BLOCK_MCM,
+       BLOCK_UCM,
+       BLOCK_XCM,
+       BLOCK_YCM,
+       BLOCK_PCM,
+       BLOCK_QM,
+       BLOCK_TM,
+       BLOCK_DORQ,
+       BLOCK_BRB,
+       BLOCK_SRC,
+       BLOCK_PRS,
+       BLOCK_TSDM,
+       BLOCK_MSDM,
+       BLOCK_USDM,
+       BLOCK_XSDM,
+       BLOCK_YSDM,
+       BLOCK_PSDM,
+       BLOCK_TSEM,
+       BLOCK_MSEM,
+       BLOCK_USEM,
+       BLOCK_XSEM,
+       BLOCK_YSEM,
+       BLOCK_PSEM,
+       BLOCK_RSS,
+       BLOCK_TMLD,
+       BLOCK_MULD,
+       BLOCK_YULD,
+       BLOCK_XYLD,
+       BLOCK_PRM,
+       BLOCK_PBF_PB1,
+       BLOCK_PBF_PB2,
+       BLOCK_RPB,
+       BLOCK_BTB,
+       BLOCK_PBF,
+       BLOCK_RDIF,
+       BLOCK_TDIF,
+       BLOCK_CDU,
+       BLOCK_CCFC,
+       BLOCK_TCFC,
+       BLOCK_IGU,
+       BLOCK_CAU,
+       BLOCK_UMAC,
+       BLOCK_XMAC,
+       BLOCK_DBG,
+       BLOCK_NIG,
+       BLOCK_WOL,
+       BLOCK_BMBN,
+       BLOCK_IPC,
+       BLOCK_NWM,
+       BLOCK_NWS,
+       BLOCK_MS,
+       BLOCK_PHY_PCIE,
+       BLOCK_LED,
+       BLOCK_MISC_AEU,
+       BLOCK_BAR0_MAP,
+       MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+       BIN_BUF_DBG_MODE_TREE,
+       BIN_BUF_DBG_DUMP_REG,
+       BIN_BUF_DBG_DUMP_MEM,
+       BIN_BUF_DBG_IDLE_CHK_REGS,
+       BIN_BUF_DBG_IDLE_CHK_IMMS,
+       BIN_BUF_DBG_IDLE_CHK_RULES,
+       BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+       BIN_BUF_DBG_ATTN_BLOCKS,
+       BIN_BUF_DBG_ATTN_REGS,
+       BIN_BUF_DBG_ATTN_INDEXES,
+       BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+       BIN_BUF_DBG_PARSING_STRINGS,
+       MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+       CHIP_RESERVED,
+       CHIP_BB_B0,
+       CHIP_RESERVED2,
+       MAX_CHIP_IDS
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+       __le16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK                  0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT                 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK    0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT   15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+       __le16 names_offset;
+       __le16 reserved1;
+       u8 num_regs;
+       u8 reserved2;
+       __le16 regs_offset;
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+       struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+       __le32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK   0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT  0
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK  0xFF
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
+       __le16 attn_idx_offset;
+       __le16 reserved;
+       __le32 sts_val;
+       __le32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+       u8 block_id;
+       u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK   0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT  0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK    0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT   2
+       __le16 names_offset;
+       struct dbg_attn_reg_result reg_results[15];
+};
+
+/* mode header */
+struct dbg_mode_hdr {
+       __le16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK            0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT           0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK     0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT    1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+       struct dbg_mode_hdr mode;
+       __le16 attn_idx_offset;
+       __le32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK  0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT        24
+       __le32 sts_clr_address;
+       __le32 mask_address;
+};
+
+/* attention types */
+enum dbg_attn_type {
+       ATTN_TYPE_INTERRUPT,
+       ATTN_TYPE_PARITY,
+       MAX_DBG_ATTN_TYPE
+};
+
+/* Debug status codes */
+enum dbg_status {
+       DBG_STATUS_OK,
+       DBG_STATUS_APP_VERSION_NOT_SET,
+       DBG_STATUS_UNSUPPORTED_APP_VERSION,
+       DBG_STATUS_DBG_BLOCK_NOT_RESET,
+       DBG_STATUS_INVALID_ARGS,
+       DBG_STATUS_OUTPUT_ALREADY_SET,
+       DBG_STATUS_INVALID_PCI_BUF_SIZE,
+       DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+       DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+       DBG_STATUS_TOO_MANY_INPUTS,
+       DBG_STATUS_INPUT_OVERLAP,
+       DBG_STATUS_HW_ONLY_RECORDING,
+       DBG_STATUS_STORM_ALREADY_ENABLED,
+       DBG_STATUS_STORM_NOT_ENABLED,
+       DBG_STATUS_BLOCK_ALREADY_ENABLED,
+       DBG_STATUS_BLOCK_NOT_ENABLED,
+       DBG_STATUS_NO_INPUT_ENABLED,
+       DBG_STATUS_NO_FILTER_TRIGGER_64B,
+       DBG_STATUS_FILTER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_NOT_ENABLED,
+       DBG_STATUS_CANT_ADD_CONSTRAINT,
+       DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+       DBG_STATUS_TOO_MANY_CONSTRAINTS,
+       DBG_STATUS_RECORDING_NOT_STARTED,
+       DBG_STATUS_DATA_DIDNT_TRIGGER,
+       DBG_STATUS_NO_DATA_RECORDED,
+       DBG_STATUS_DUMP_BUF_TOO_SMALL,
+       DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+       DBG_STATUS_UNKNOWN_CHIP,
+       DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+       DBG_STATUS_BLOCK_IN_RESET,
+       DBG_STATUS_INVALID_TRACE_SIGNATURE,
+       DBG_STATUS_INVALID_NVRAM_BUNDLE,
+       DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+       DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+       DBG_STATUS_NVRAM_READ_FAILED,
+       DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+       DBG_STATUS_MCP_TRACE_BAD_DATA,
+       DBG_STATUS_MCP_TRACE_NO_META,
+       DBG_STATUS_MCP_COULD_NOT_HALT,
+       DBG_STATUS_MCP_COULD_NOT_RESUME,
+       DBG_STATUS_DMAE_FAILED,
+       DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+       DBG_STATUS_IGU_FIFO_BAD_DATA,
+       DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+       DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+       DBG_STATUS_REG_FIFO_BAD_DATA,
+       DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+       DBG_STATUS_DBG_ARRAY_NOT_SET,
+       MAX_DBG_STATUS
+};
+
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+       u8 active;
+       u8 active_phys_tcs;
+       __le16 num_pbf_cmd_lines;
+       __le16 num_btb_blocks;
+       __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+       u8 vport_id;
+       u8 tc_id;
+       u8 wrr_group;
+       u8 rl_valid;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+       __le32 vport_rl;
+       __le16 vport_wfq;
+       __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS  23
+#define MAX_GRC_ADDR   ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID   0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE        8192
+
+enum init_modes {
+       MODE_RESERVED,
+       MODE_BB_B0,
+       MODE_RESERVED2,
+       MODE_ASIC,
+       MODE_RESERVED3,
+       MODE_RESERVED4,
+       MODE_RESERVED5,
+       MODE_RESERVED6,
+       MODE_SF,
+       MODE_MF_SD,
+       MODE_MF_SI,
+       MODE_PORTS_PER_ENG_1,
+       MODE_PORTS_PER_ENG_2,
+       MODE_PORTS_PER_ENG_4,
+       MODE_100G,
+       MODE_40G,
+       MODE_RESERVED7,
+       MAX_INIT_MODES
+};
+
+enum init_phases {
+       PHASE_ENGINE,
+       PHASE_PORT,
+       PHASE_PF,
+       PHASE_VF,
+       PHASE_QM_PF,
+       MAX_INIT_PHASES
+};
+
+enum init_split_types {
+       SPLIT_TYPE_NONE,
+       SPLIT_TYPE_PORT,
+       SPLIT_TYPE_PF,
+       SPLIT_TYPE_PORT_PF,
+       SPLIT_TYPE_VF,
+       MAX_INIT_SPLIT_TYPES
+};
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+       __le32 offset;
+       __le32 length;
+};
+
+/* binary init buffer types */
+enum bin_init_buffer_type {
+       BIN_BUF_FW_VER_INFO,
+       BIN_BUF_INIT_CMD,
+       BIN_BUF_INIT_VAL,
+       BIN_BUF_INIT_MODE_TREE,
+       BIN_BUF_IRO,
+       MAX_BIN_INIT_BUFFER_TYPE
+};
+
+/* init array header: raw */
+struct init_array_raw_hdr {
+       __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK   0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT  0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT        4
+};
+
+/* init array header: standard */
+struct init_array_standard_hdr {
+       __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK      0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT     0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK      0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT     4
+};
+
+/* init array header: zipped */
+struct init_array_zipped_hdr {
+       __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK                0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT       0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT        4
+};
+
+/* init array header: pattern */
+struct init_array_pattern_hdr {
+       __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK               0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT              0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK       0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT      4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK                0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT       8
+};
+
+/* init array header union */
+union init_array_hdr {
+       struct init_array_raw_hdr raw;
+       struct init_array_standard_hdr standard;
+       struct init_array_zipped_hdr zipped;
+       struct init_array_pattern_hdr pattern;
+};
+
+/* init array types */
+enum init_array_types {
+       INIT_ARR_STANDARD,
+       INIT_ARR_ZIPPED,
+       INIT_ARR_PATTERN,
+       MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+       __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK       0xF
+#define INIT_CALLBACK_OP_OP_SHIFT      0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT        4
+       __le16 callback_id;
+       __le16 block_id;
+};
+
+/* init operation: delay */
+struct init_delay_op {
+       __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK          0xF
+#define INIT_DELAY_OP_OP_SHIFT         0
+#define INIT_DELAY_OP_RESERVED_MASK    0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT   4
+       __le32 delay;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+       __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK                        0xF
+#define INIT_IF_MODE_OP_OP_SHIFT               0
+#define INIT_IF_MODE_OP_RESERVED1_MASK         0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT                4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK                0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT       16
+       __le16 reserved2;
+       __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+       __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK               0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT              0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK      0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT     4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK                0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT       5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK       0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT      16
+       __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK            0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT           0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK                0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT       8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK         0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT                16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+       INIT_MODE_OP_NOT,
+       INIT_MODE_OP_OR,
+       INIT_MODE_OP_AND,
+       MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+       __le32 op_data;
+#define INIT_RAW_OP_OP_MASK            0xF
+#define INIT_RAW_OP_OP_SHIFT           0
+#define INIT_RAW_OP_PARAM1_MASK                0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT       4
+       __le32 param2;
+};
+
+/* init array params */
+struct init_op_array_params {
+       __le16 size;
+       __le16 offset;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+       __le32 inline_val;
+       __le32 zeros_count;
+       __le32 array_offset;
+       struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+       __le32 data;
+#define INIT_WRITE_OP_OP_MASK          0xF
+#define INIT_WRITE_OP_OP_SHIFT         0
+#define INIT_WRITE_OP_SOURCE_MASK      0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT     4
+#define INIT_WRITE_OP_RESERVED_MASK    0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT   7
+#define INIT_WRITE_OP_WIDE_BUS_MASK    0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT   8
+#define INIT_WRITE_OP_ADDRESS_MASK     0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT    9
+       union init_write_args args;
+};
+
+/* init operation: read */
+struct init_read_op {
+       __le32 op_data;
+#define INIT_READ_OP_OP_MASK           0xF
+#define INIT_READ_OP_OP_SHIFT          0
+#define INIT_READ_OP_POLL_TYPE_MASK    0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT   4
+#define INIT_READ_OP_RESERVED_MASK     0x1
+#define INIT_READ_OP_RESERVED_SHIFT    8
+#define INIT_READ_OP_ADDRESS_MASK      0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT     9
+       __le32 expected_val;
+
+};
+
+/* Init operations union */
+union init_op {
+       struct init_raw_op raw;
+       struct init_write_op write;
+       struct init_read_op read;
+       struct init_if_mode_op if_mode;
+       struct init_if_phase_op if_phase;
+       struct init_callback_op callback;
+       struct init_delay_op delay;
+};
+
+/* Init command operation types */
+enum init_op_types {
+       INIT_OP_READ,
+       INIT_OP_WRITE,
+       INIT_OP_IF_MODE,
+       INIT_OP_IF_PHASE,
+       INIT_OP_DELAY,
+       INIT_OP_CALLBACK,
+       MAX_INIT_OP_TYPES
+};
+
+/* init polling types */
+enum init_poll_types {
+       INIT_POLL_NONE,
+       INIT_POLL_EQ,
+       INIT_POLL_OR,
+       INIT_POLL_AND,
+       MAX_INIT_POLL_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+       INIT_SRC_INLINE,
+       INIT_SRC_ZEROS,
+       INIT_SRC_ARRAY,
+       INIT_SRC_RUNTIME,
+       MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+       __le32 base;
+       __le16 m1;
+       __le16 m2;
+       __le16 m3;
+       __le16 size;
+};
+
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ *
+ * @param p_hwfn
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+                                  struct dbg_attn_block_result *results);
+
+#define MAX_NAME_LEN   16
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+       0x00f000UL
+
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+       0x010000UL
+
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+       0x011000UL
+
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+       0x012000UL
+
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+       0x013000UL
+
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+       0x014000UL
+
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+       0x015000UL
+
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+       0x016000UL
+
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+       0x017000UL
+
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+       0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8 pf_id,
+                      u32 num_pf_cids,
+                      u32 num_vf_cids,
+                      u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+       u8 max_ports_per_engine;
+       u8 max_phys_tcs_per_port;
+       bool pf_rl_en;
+       bool pf_wfq_en;
+       bool vport_rl_en;
+       bool vport_wfq_en;
+       struct init_qm_port_params *port_params;
+};
+
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+                         struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+       u8 port_id;
+       u8 pf_id;
+       u8 max_phys_tcs_per_port;
+       bool is_first_pf;
+       u32 num_pf_cids;
+       u32 num_vf_cids;
+       u32 num_tids;
+       u16 start_pq;
+       u16 num_pf_pqs;
+       u16 num_vf_pqs;
+       u8 start_vport;
+       u8 num_vports;
+       u8 pf_wfq;
+       u32 pf_rl;
+       struct init_qm_pq_params *pq_params;
+       struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+       struct qed_ptt *p_ptt,
+       struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+
+/**
+ * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+
+/**
+ * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ *       with the VPORT for each TC. This array is filled by
+ *       qed_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+
+/**
+ * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ */
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         bool is_release_cmd,
+                         bool is_tx_pq, u16 start_pq, u16 num_pqs);
+
+/**
+ * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, bool vxlan_enable);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       bool eth_gre_enable, bool ip_gre_enable);
+
+/**
+ * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          bool eth_geneve_enable, bool ip_geneve_enable);
+
+#define        YSTORM_FLOW_CONTROL_MODE_OFFSET                 (IRO[0].base)
+#define        YSTORM_FLOW_CONTROL_MODE_SIZE                   (IRO[0].size)
+#define        TSTORM_PORT_STAT_OFFSET(port_id) \
+       (IRO[1].base + ((port_id) * IRO[1].m1))
+#define        TSTORM_PORT_STAT_SIZE                           (IRO[1].size)
+#define        USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+       (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define        USTORM_VF_PF_CHANNEL_READY_SIZE                 (IRO[3].size)
+#define        USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+       (IRO[4].base + (pf_id) * IRO[4].m1)
+#define        USTORM_FLR_FINAL_ACK_SIZE                       (IRO[4].size)
+#define        USTORM_EQE_CONS_OFFSET(pf_id) \
+       (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define        USTORM_EQE_CONS_SIZE                            (IRO[5].size)
+#define        USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+       (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
+#define        USTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[6].size)
+#define        USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+       (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
+#define        USTORM_COMMON_QUEUE_CONS_SIZE                   (IRO[7].size)
+#define        MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+       (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
+#define        MSTORM_QUEUE_STAT_SIZE                          (IRO[18].size)
+#define        MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+       (IRO[19].base + ((queue_id) * IRO[19].m1))
+#define        MSTORM_ETH_PF_PRODS_SIZE                        (IRO[19].size)
+#define        MSTORM_TPA_TIMEOUT_US_OFFSET                    (IRO[20].base)
+#define        MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[20].size)
+#define        MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+       (IRO[21].base + ((pf_id) * IRO[21].m1))
+#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[21].size)
+#define        USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+       (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define        USTORM_QUEUE_STAT_SIZE                          (IRO[22].size)
+#define        USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+       (IRO[23].base + ((pf_id) * IRO[23].m1))
+#define        USTORM_ETH_PF_STAT_SIZE                         (IRO[23].size)
+#define        PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+       (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
+#define        PSTORM_QUEUE_STAT_SIZE                          (IRO[24].size)
+#define        PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+       (IRO[25].base + ((pf_id) * IRO[25].m1))
+#define        PSTORM_ETH_PF_STAT_SIZE                         (IRO[25].size)
+#define        PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
+       (IRO[26].base + ((ethtype) * IRO[26].m1))
+#define        PSTORM_CTL_FRAME_ETHTYPE_SIZE                   (IRO[26].size)
+#define        TSTORM_ETH_PRS_INPUT_OFFSET                     (IRO[27].base)
+#define        TSTORM_ETH_PRS_INPUT_SIZE                       (IRO[27].size)
+#define        ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+       (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define        ETH_RX_RATE_LIMIT_SIZE                          (IRO[28].size)
+#define        XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+       (IRO[29].base + ((queue_id) * IRO[29].m1))
+#define        XSTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[29].size)
+
+static const struct iro iro_arr[46] = {
+       {0x0, 0x0, 0x0, 0x0, 0x8},
+       {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+       {0x6318, 0x20, 0x0, 0x0, 0x20},
+       {0xb00, 0x8, 0x0, 0x0, 0x4},
+       {0xa80, 0x8, 0x0, 0x0, 0x4},
+       {0x0, 0x8, 0x0, 0x0, 0x2},
+       {0x80, 0x8, 0x0, 0x0, 0x4},
+       {0x84, 0x8, 0x0, 0x0, 0x2},
+       {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+       {0x3df0, 0x0, 0x0, 0x0, 0x78},
+       {0x29b0, 0x0, 0x0, 0x0, 0x78},
+       {0x4c38, 0x0, 0x0, 0x0, 0x78},
+       {0x4a48, 0x0, 0x0, 0x0, 0x78},
+       {0x7e48, 0x0, 0x0, 0x0, 0x78},
+       {0xa28, 0x8, 0x0, 0x0, 0x8},
+       {0x60f8, 0x10, 0x0, 0x0, 0x10},
+       {0xb820, 0x30, 0x0, 0x0, 0x30},
+       {0x95b8, 0x30, 0x0, 0x0, 0x30},
+       {0x4c18, 0x80, 0x0, 0x0, 0x40},
+       {0x1f8, 0x4, 0x0, 0x0, 0x4},
+       {0xc9a8, 0x0, 0x0, 0x0, 0x4},
+       {0x4c58, 0x80, 0x0, 0x0, 0x20},
+       {0x8050, 0x40, 0x0, 0x0, 0x30},
+       {0xe770, 0x60, 0x0, 0x0, 0x60},
+       {0x2b48, 0x80, 0x0, 0x0, 0x38},
+       {0xdf88, 0x78, 0x0, 0x0, 0x78},
+       {0x1f8, 0x4, 0x0, 0x0, 0x4},
+       {0xacf0, 0x0, 0x0, 0x0, 0xf0},
+       {0xade0, 0x8, 0x0, 0x0, 0x8},
+       {0x1f8, 0x8, 0x0, 0x0, 0x8},
+       {0xac0, 0x8, 0x0, 0x0, 0x8},
+       {0x2578, 0x8, 0x0, 0x0, 0x8},
+       {0x24f8, 0x8, 0x0, 0x0, 0x8},
+       {0x0, 0x8, 0x0, 0x0, 0x8},
+       {0x200, 0x10, 0x8, 0x0, 0x8},
+       {0xb78, 0x10, 0x8, 0x0, 0x2},
+       {0xd888, 0x38, 0x0, 0x0, 0x24},
+       {0x12120, 0x10, 0x0, 0x0, 0x8},
+       {0x11b20, 0x38, 0x0, 0x0, 0x18},
+       {0xa8c0, 0x30, 0x0, 0x0, 0x10},
+       {0x86f8, 0x28, 0x0, 0x0, 0x18},
+       {0xeff8, 0x10, 0x0, 0x0, 0x10},
+       {0xdd08, 0x48, 0x0, 0x0, 0x38},
+       {0xf460, 0x20, 0x0, 0x0, 0x20},
+       {0x2b80, 0x80, 0x0, 0x0, 0x10},
+       {0x5000, 0x10, 0x0, 0x0, 0x10},
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30799
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30849
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31523
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32547
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33059
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
+
+#define RUNTIME_ARRAY_SIZE 33925
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+       __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+       u8 reserved0;
+       u8 eth_state;
+       u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT         1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT         5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT         6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT         7
+               u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT         0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT         1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT             4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT             5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT    6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT      7
+       u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT               2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT               6
+       u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT               2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT               6
+               u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT               2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT              6
+       u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT              0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT              6
+       u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK   0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT  0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK   0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT  2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK              0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT             4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK       0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT      6
+       u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK         0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT                2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK          0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT             7
+       u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT             0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT             1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT             2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT             4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT             5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT             7
+       u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT            0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT            1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT            2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT            3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT            4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT            5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+       u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT          0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK    0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT   1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT                3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT                6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT                7
+       u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT                0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT                1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT    2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT           5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT           7
+       u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT          0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT          1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT          4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT          5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT          6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT          7
+       u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT          0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK           0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT          1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+       u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK   0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT  0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT        1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT    4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK   0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT  5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK         0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT                6
+       u8 edpm_event_id;
+       __le16 physical_q0;
+       __le16 quota;
+       __le16 edpm_num_bds;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_prod;
+       __le16 tx_class;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le16 word7;
+       __le16 word8;
+       __le16 word9;
+       __le16 word10;
+       __le32 reg7;
+       __le32 reg8;
+       __le32 reg9;
+       u8 byte7;
+       u8 byte8;
+       u8 byte9;
+       u8 byte10;
+       u8 byte11;
+       u8 byte12;
+       u8 byte13;
+       u8 byte14;
+       u8 byte15;
+       u8 byte16;
+       __le16 word11;
+       __le32 reg10;
+       __le32 reg11;
+       __le32 reg12;
+       __le32 reg13;
+       __le32 reg14;
+       __le32 reg15;
+       __le32 reg16;
+       __le32 reg17;
+       __le32 reg18;
+       __le32 reg19;
+       __le16 word12;
+       __le16 word13;
+       __le16 word14;
+       __le16 word15;
+};
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+       u8 byte0;
+       u8 state;
+       u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT              0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT              1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK  0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK   0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT  4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                        0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               6
+       u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK       0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT      0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK                0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT       1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                      0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                     2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                    0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                   3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                    0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                   4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                    0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                   5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                    0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                   6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                    0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                   7
+       u8 tx_q0_int_coallecing_timeset;
+       u8 byte3;
+       __le16 word0;
+       __le32 terminate_spqe;
+       __le32 reg1;
+       __le16 tx_bd_cons_upd;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT              0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT              1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT              2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT              3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT              4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK               0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT              5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT               6
+       u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT               0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT               4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT               6
+       u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT               0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT               2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT               4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT               6
+       u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK                        0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT               0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK               0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT              2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT             4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT             5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT             7
+       u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT             0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT             1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT             2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT             3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT             4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK              0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT             5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK             0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT            6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT           7
+       u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT           0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT           1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT           2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT           3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT           4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK           0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT          5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT           6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK            0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT           7
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 byte2;
+       u8 byte3;
+       __le16 rx_bd_cons;
+       u8 byte4;
+       u8 byte5;
+       __le16 rx_bd_prod;
+       __le16 word2;
+       __le16 word3;
+       __le32 reg9;
+       __le32 reg10;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                       0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                      0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                       0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                      1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK                0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT       2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK                0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT       4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                                0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                       6
+       u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                                0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                       0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK                  0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT                 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK                  0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT                 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK          0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT         6
+       u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK     0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT    0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK     0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT    1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                      0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                     2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                      0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                     3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK               0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT              4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK               0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT              5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK       0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT      6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                   7
+       u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                   0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                   1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                   2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                   3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                   4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                   5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                   6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                   7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 tx_bd_cons;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 tx_int_coallecing_timeset;
+       __le16 tx_drv_bd_cons;
+       __le16 rx_drv_cqe_cons;
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+       __le32 reserved[40];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+       struct tstorm_eth_conn_st_ctx tstorm_st_context;
+       struct regpair tstorm_st_padding[2];
+       struct pstorm_eth_conn_st_ctx pstorm_st_context;
+       struct xstorm_eth_conn_st_ctx xstorm_st_context;
+       struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+       struct ystorm_eth_conn_st_ctx ystorm_st_context;
+       struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+       struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+       struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+       struct ustorm_eth_conn_st_ctx ustorm_st_context;
+       struct mstorm_eth_conn_st_ctx mstorm_st_context;
+};
+
+/* opcodes for the event ring */
+enum eth_event_opcode {
+       ETH_EVENT_UNUSED,
+       ETH_EVENT_VPORT_START,
+       ETH_EVENT_VPORT_UPDATE,
+       ETH_EVENT_VPORT_STOP,
+       ETH_EVENT_TX_QUEUE_START,
+       ETH_EVENT_TX_QUEUE_STOP,
+       ETH_EVENT_RX_QUEUE_START,
+       ETH_EVENT_RX_QUEUE_UPDATE,
+       ETH_EVENT_RX_QUEUE_STOP,
+       ETH_EVENT_FILTERS_UPDATE,
+       ETH_EVENT_RESERVED,
+       ETH_EVENT_RESERVED2,
+       ETH_EVENT_RESERVED3,
+       ETH_EVENT_RX_ADD_UDP_FILTER,
+       ETH_EVENT_RX_DELETE_UDP_FILTER,
+       ETH_EVENT_RESERVED4,
+       ETH_EVENT_RESERVED5,
+       MAX_ETH_EVENT_OPCODE
+};
+
+/* Classify rule types in E2/E3 */
+enum eth_filter_action {
+       ETH_FILTER_ACTION_UNUSED,
+       ETH_FILTER_ACTION_REMOVE,
+       ETH_FILTER_ACTION_ADD,
+       ETH_FILTER_ACTION_REMOVE_ALL,
+       MAX_ETH_FILTER_ACTION
+};
+
+/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
+struct eth_filter_cmd {
+       u8 type;
+       u8 vport_id;
+       u8 action;
+       u8 reserved0;
+       __le32 vni;
+       __le16 mac_lsb;
+       __le16 mac_mid;
+       __le16 mac_msb;
+       __le16 vlan_id;
+};
+
+/*     $$KEEP_ENDIANNESS$$ */
+struct eth_filter_cmd_header {
+       u8 rx;
+       u8 tx;
+       u8 cmd_cnt;
+       u8 assert_on_error;
+       u8 reserved1[4];
+};
+
+/* Ethernet filter types: mac/vlan/pair */
+enum eth_filter_type {
+       ETH_FILTER_TYPE_UNUSED,
+       ETH_FILTER_TYPE_MAC,
+       ETH_FILTER_TYPE_VLAN,
+       ETH_FILTER_TYPE_PAIR,
+       ETH_FILTER_TYPE_INNER_MAC,
+       ETH_FILTER_TYPE_INNER_VLAN,
+       ETH_FILTER_TYPE_INNER_PAIR,
+       ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+       ETH_FILTER_TYPE_MAC_VNI_PAIR,
+       ETH_FILTER_TYPE_VNI,
+       MAX_ETH_FILTER_TYPE
+};
+
+/* Ethernet Ramrod Command IDs */
+enum eth_ramrod_cmd_id {
+       ETH_RAMROD_UNUSED,
+       ETH_RAMROD_VPORT_START,
+       ETH_RAMROD_VPORT_UPDATE,
+       ETH_RAMROD_VPORT_STOP,
+       ETH_RAMROD_RX_QUEUE_START,
+       ETH_RAMROD_RX_QUEUE_STOP,
+       ETH_RAMROD_TX_QUEUE_START,
+       ETH_RAMROD_TX_QUEUE_STOP,
+       ETH_RAMROD_FILTERS_UPDATE,
+       ETH_RAMROD_RX_QUEUE_UPDATE,
+       ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+       ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+       ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+       ETH_RAMROD_RX_ADD_UDP_FILTER,
+       ETH_RAMROD_RX_DELETE_UDP_FILTER,
+       ETH_RAMROD_RX_CREATE_GFT_ACTION,
+       ETH_RAMROD_GFT_UPDATE_FILTER,
+       MAX_ETH_RAMROD_CMD_ID
+};
+
+/* return code from eth sp ramrods */
+struct eth_return_code {
+       u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK  0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK  0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK     0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT    7
+};
+
+/* What to do in case an error occurs */
+enum eth_tx_err {
+       ETH_TX_ERR_DROP,
+       ETH_TX_ERR_ASSERT_MALICIOUS,
+       MAX_ETH_TX_ERR
+};
+
+/* Array of the different error type behaviors */
+struct eth_tx_err_vals {
+       __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK                 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT                        0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK                  0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT                 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK                 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT                        2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK               0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT              3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK       0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT      4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK                     0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT                    5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK             0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT            6
+#define ETH_TX_ERR_VALS_RESERVED_MASK                          0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT                         7
+};
+
+/* vport rss configuration data */
+struct eth_vport_rss_config {
+       __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK              0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT             0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK              0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT             1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK          0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT         2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK          0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT         3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK          0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT         4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK          0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT         5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK                0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT       6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK                    0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT                   7
+       u8 rss_id;
+       u8 rss_mode;
+       u8 update_rss_key;
+       u8 update_rss_ind_table;
+       u8 update_rss_capabilities;
+       u8 tbl_size;
+       __le32 reserved2[2];
+       __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+
+       __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+       __le32 reserved3[2];
+};
+
+/* eth vport RSS mode */
+enum eth_vport_rss_mode {
+       ETH_VPORT_RSS_MODE_DISABLED,
+       ETH_VPORT_RSS_MODE_REGULAR,
+       MAX_ETH_VPORT_RSS_MODE
+};
+
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
+struct eth_vport_rx_mode {
+       __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT         0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT         3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK               0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT              6
+       __le16 reserved2[3];
+};
+
+/* Command for setting tpa parameters */
+struct eth_vport_tpa_param {
+       u8 tpa_ipv4_en_flg;
+       u8 tpa_ipv6_en_flg;
+       u8 tpa_ipv4_tunn_en_flg;
+       u8 tpa_ipv6_tunn_en_flg;
+       u8 tpa_pkt_split_flg;
+       u8 tpa_hdr_data_split_flg;
+       u8 tpa_gro_consistent_flg;
+
+       u8 tpa_max_aggs_num;
+
+       __le16 tpa_max_size;
+       __le16 tpa_min_size_to_start;
+
+       __le16 tpa_min_size_to_cont;
+       u8 max_buff_num;
+       u8 reserved;
+};
+
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
+struct eth_vport_tx_mode {
+       __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT         0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT         2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT       3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK                0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK               0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT              5
+       __le16 reserved2[3];
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct rx_queue_start_ramrod_data {
+       __le16 rx_queue_id;
+       __le16 num_of_pbl_pages;
+       __le16 bd_max_bytes;
+       __le16 sb_id;
+       u8 sb_index;
+       u8 vport_id;
+       u8 default_rss_queue_flg;
+       u8 complete_cqe_flg;
+       u8 complete_event_flg;
+       u8 stats_counter_id;
+       u8 pin_context;
+       u8 pxp_tph_valid_bd;
+       u8 pxp_tph_valid_pkt;
+       u8 pxp_st_hint;
+
+       __le16 pxp_st_index;
+       u8 pmd_mode;
+
+       u8 notify_en;
+       u8 toggle_val;
+
+       u8 vf_rx_prod_index;
+
+       u8 reserved[6];
+       __le16 reserved1;
+       struct regpair cqe_pbl_addr;
+       struct regpair bd_base;
+       struct regpair reserved2;
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct rx_queue_stop_ramrod_data {
+       __le16 rx_queue_id;
+       u8 complete_cqe_flg;
+       u8 complete_event_flg;
+       u8 vport_id;
+       u8 reserved[3];
+};
+
+/* Ramrod data for rx queue update ramrod */
+struct rx_queue_update_ramrod_data {
+       __le16 rx_queue_id;
+       u8 complete_cqe_flg;
+       u8 complete_event_flg;
+       u8 vport_id;
+       u8 reserved[4];
+       u8 reserved1;
+       u8 reserved2;
+       u8 reserved3;
+       __le16 reserved4;
+       __le16 reserved5;
+       struct regpair reserved6;
+};
+
+/* Ramrod data for rx Add UDP Filter */
+struct rx_udp_filter_data {
+       __le16 action_icid;
+       __le16 vlan_id;
+       u8 ip_type;
+       u8 tenant_id_exists;
+       __le16 reserved1;
+       __le32 ip_dst_addr[4];
+       __le32 ip_src_addr[4];
+       __le16 udp_dst_port;
+       __le16 udp_src_port;
+       __le32 tenant_id;
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct tx_queue_start_ramrod_data {
+       __le16 sb_id;
+       u8 sb_index;
+       u8 vport_id;
+       u8 reserved0;
+       u8 stats_counter_id;
+       __le16 qm_pq_id;
+       u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK  0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK               0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT              3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK              0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT             4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK            0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT           5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK              0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT             6
+       u8 pxp_st_hint;
+       u8 pxp_tph_valid_bd;
+       u8 pxp_tph_valid_pkt;
+       __le16 pxp_st_index;
+       __le16 comp_agg_size;
+       __le16 queue_zone_id;
+       __le16 test_dup_count;
+       __le16 pbl_size;
+       __le16 tx_queue_id;
+
+       struct regpair pbl_base_addr;
+       struct regpair bd_cons_address;
+};
+
+/* Ramrod data for tx queue stop ramrod */
+struct tx_queue_stop_ramrod_data {
+       __le16 reserved[4];
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_filter_update_ramrod_data {
+       struct eth_filter_cmd_header filter_cmd_hdr;
+       struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+/* Ramrod data for vport start ramrod */
+struct vport_start_ramrod_data {
+       u8 vport_id;
+       u8 sw_fid;
+       __le16 mtu;
+       u8 drop_ttl0_en;
+       u8 inner_vlan_removal_en;
+       struct eth_vport_rx_mode rx_mode;
+       struct eth_vport_tx_mode tx_mode;
+       struct eth_vport_tpa_param tpa_param;
+       __le16 default_vlan;
+       u8 tx_switching_en;
+       u8 anti_spoofing_en;
+
+       u8 default_vlan_en;
+
+       u8 handle_ptp_pkts;
+       u8 silent_vlan_removal_en;
+       u8 untagged;
+       struct eth_tx_err_vals tx_err_behav;
+
+       u8 zero_placement_offset;
+       u8 ctl_frame_mac_check_en;
+       u8 ctl_frame_ethtype_check_en;
+       u8 reserved[5];
+};
+
+/* Ramrod data for vport stop ramrod */
+struct vport_stop_ramrod_data {
+       u8 vport_id;
+       u8 reserved[7];
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_update_ramrod_data_cmn {
+       u8 vport_id;
+       u8 update_rx_active_flg;
+       u8 rx_active_flg;
+       u8 update_tx_active_flg;
+       u8 tx_active_flg;
+       u8 update_rx_mode_flg;
+       u8 update_tx_mode_flg;
+       u8 update_approx_mcast_flg;
+
+       u8 update_rss_flg;
+       u8 update_inner_vlan_removal_en_flg;
+
+       u8 inner_vlan_removal_en;
+       u8 update_tpa_param_flg;
+       u8 update_tpa_en_flg;
+       u8 update_tx_switching_en_flg;
+
+       u8 tx_switching_en;
+       u8 update_anti_spoofing_en_flg;
+
+       u8 anti_spoofing_en;
+       u8 update_handle_ptp_pkts;
+
+       u8 handle_ptp_pkts;
+       u8 update_default_vlan_en_flg;
+
+       u8 default_vlan_en;
+
+       u8 update_default_vlan_flg;
+
+       __le16 default_vlan;
+       u8 update_accept_any_vlan_flg;
+
+       u8 accept_any_vlan;
+       u8 silent_vlan_removal_en;
+       u8 update_mtu_flg;
+
+       __le16 mtu;
+       u8 reserved[2];
+};
+
+struct vport_update_ramrod_mcast {
+       __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_update_ramrod_data {
+       struct vport_update_ramrod_data_cmn common;
+
+       struct eth_vport_rx_mode rx_mode;
+       struct eth_vport_tx_mode tx_mode;
+       struct eth_vport_tpa_param tpa_param;
+       struct vport_update_ramrod_mcast approx_mcast;
+       struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_rdma_task_st_ctx {
+       struct regpair temp[4];
+};
+
+struct rdma_close_func_ramrod_data {
+       u8 cnq_start_offset;
+       u8 num_cnqs;
+       u8 vf_id;
+       u8 vf_valid;
+       u8 reserved[4];
+};
+
+struct rdma_cnq_params {
+       __le16 sb_num;
+       u8 sb_index;
+       u8 num_pbl_pages;
+       __le32 reserved;
+       struct regpair pbl_base_addr;
+       __le16 queue_zone_num;
+       u8 reserved1[6];
+};
+
+struct rdma_create_cq_ramrod_data {
+       struct regpair cq_handle;
+       struct regpair pbl_addr;
+       __le32 max_cqes;
+       __le16 pbl_num_pages;
+       __le16 dpi;
+       u8 is_two_level_pbl;
+       u8 cnq_id;
+       u8 pbl_log_page_size;
+       u8 toggle_bit;
+       __le16 int_timeout;
+       __le16 reserved1;
+};
+
+struct rdma_deregister_tid_ramrod_data {
+       __le32 itid;
+       __le32 reserved;
+};
+
+struct rdma_destroy_cq_output_params {
+       __le16 cnq_num;
+       __le16 reserved0;
+       __le32 reserved1;
+};
+
+struct rdma_destroy_cq_ramrod_data {
+       struct regpair output_params_addr;
+};
+
+enum rdma_event_opcode {
+       RDMA_EVENT_UNUSED,
+       RDMA_EVENT_FUNC_INIT,
+       RDMA_EVENT_FUNC_CLOSE,
+       RDMA_EVENT_REGISTER_MR,
+       RDMA_EVENT_DEREGISTER_MR,
+       RDMA_EVENT_CREATE_CQ,
+       RDMA_EVENT_RESIZE_CQ,
+       RDMA_EVENT_DESTROY_CQ,
+       RDMA_EVENT_CREATE_SRQ,
+       RDMA_EVENT_MODIFY_SRQ,
+       RDMA_EVENT_DESTROY_SRQ,
+       MAX_RDMA_EVENT_OPCODE
+};
+
+enum rdma_fw_return_code {
+       RDMA_RETURN_OK = 0,
+       RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
+       RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
+       RDMA_RETURN_RESIZE_CQ_ERR,
+       RDMA_RETURN_NIG_DRAIN_REQ,
+       MAX_RDMA_FW_RETURN_CODE
+};
+
+struct rdma_init_func_hdr {
+       u8 cnq_start_offset;
+       u8 num_cnqs;
+       u8 cq_ring_mode;
+       u8 cnp_vlan_priority;
+       __le32 cnp_send_timeout;
+       u8 cnp_dscp;
+       u8 vf_id;
+       u8 vf_valid;
+       u8 reserved[5];
+};
+
+struct rdma_init_func_ramrod_data {
+       struct rdma_init_func_hdr params_header;
+       struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
+};
+
+enum rdma_ramrod_cmd_id {
+       RDMA_RAMROD_UNUSED,
+       RDMA_RAMROD_FUNC_INIT,
+       RDMA_RAMROD_FUNC_CLOSE,
+       RDMA_RAMROD_REGISTER_MR,
+       RDMA_RAMROD_DEREGISTER_MR,
+       RDMA_RAMROD_CREATE_CQ,
+       RDMA_RAMROD_RESIZE_CQ,
+       RDMA_RAMROD_DESTROY_CQ,
+       RDMA_RAMROD_CREATE_SRQ,
+       RDMA_RAMROD_MODIFY_SRQ,
+       RDMA_RAMROD_DESTROY_SRQ,
+       MAX_RDMA_RAMROD_CMD_ID
+};
+
+struct rdma_register_tid_ramrod_data {
+       __le32 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK             0x3FFFF
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT            0
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK      0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT     18
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK      0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT     23
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK         0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT        24
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK             0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT            25
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK        0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT       26
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK       0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT      27
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK      0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT     28
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK        0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT       29
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK         0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT        30
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK     0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT    31
+       u8 flags1;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK  0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK           0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT          5
+       u8 flags2;
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK             0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT            0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK    0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT   1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK          0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT         2
+       u8 key;
+       u8 length_hi;
+       u8 vf_id;
+       u8 vf_valid;
+       __le16 pd;
+       __le32 length_lo;
+       __le32 itid;
+       __le32 reserved2;
+       struct regpair va;
+       struct regpair pbl_base;
+       struct regpair dif_error_addr;
+       struct regpair dif_runt_addr;
+       __le32 reserved3[2];
+};
+
+struct rdma_resize_cq_output_params {
+       __le32 old_cq_cons;
+       __le32 old_cq_prod;
+};
+
+struct rdma_resize_cq_ramrod_data {
+       u8 flags;
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK        0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT       0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK  0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK          0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT         2
+       u8 pbl_log_page_size;
+       __le16 pbl_num_pages;
+       __le32 max_cqes;
+       struct regpair pbl_addr;
+       struct regpair output_params_addr;
+};
+
+struct rdma_srq_context {
+       struct regpair temp[8];
+};
+
+struct rdma_srq_create_ramrod_data {
+       struct regpair pbl_base_addr;
+       __le16 pages_in_srq_pbl;
+       __le16 pd_id;
+       struct rdma_srq_id srq_id;
+       __le16 page_size;
+       __le16 reserved1;
+       __le32 reserved2;
+       struct regpair producers_addr;
+};
+
+struct rdma_srq_destroy_ramrod_data {
+       struct rdma_srq_id srq_id;
+       __le32 reserved;
+};
+
+struct rdma_srq_modify_ramrod_data {
+       struct rdma_srq_id srq_id;
+       __le32 wqe_limit;
+};
+
+struct ystorm_rdma_task_st_ctx {
+       struct regpair temp[4];
+};
+
+struct ystorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 msem_ctx_upd_seq;
+       u8 flags0;
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK            0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT           6
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
+       u8 flags1;
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK       0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT      4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
+       u8 flags2;
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK             0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT            0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
+       u8 key;
+       __le32 mw_cnt;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+       __le16 tx_ref_count;
+       __le16 last_used_ltid;
+       __le16 parent_mr_lo;
+       __le16 parent_mr_hi;
+       __le32 fbo_lo;
+       __le32 fbo_hi;
+};
+
+struct mstorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK             0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT            6
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
+       u8 flags1;
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK              0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT             4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
+       u8 flags2;
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK            0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT           0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
+       u8 key;
+       __le32 mw_cnt;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+       __le16 tx_ref_count;
+       __le16 last_used_ltid;
+       __le16 parent_mr_lo;
+       __le16 parent_mr_hi;
+       __le32 fbo_lo;
+       __le32 fbo_hi;
+};
+
+struct ustorm_rdma_task_st_ctx {
+       struct regpair temp[2];
+};
+
+struct ustorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK         0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT        0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT           4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK          0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT         5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK     0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT    6
+       u8 flags1;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK   0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT  0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK           0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT          2
+#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK                     0x3
+#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT                    4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK            0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT           6
+       u8 flags2;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK  0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK               0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT              1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK               0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT              2
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK                   0x1
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT                  3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK         0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT        4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT                5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT                6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT                7
+       u8 flags3;
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT                0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT                1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT                2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT                3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK          0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT         4
+       __le32 dif_err_intervals;
+       __le32 dif_error_1st_interval;
+       __le32 reg2;
+       __le32 dif_runt_value;
+       __le32 reg4;
+       __le32 reg5;
+};
+
+struct rdma_task_context {
+       struct ystorm_rdma_task_st_ctx ystorm_st_context;
+       struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
+       struct tdif_task_context tdif_context;
+       struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
+       struct mstorm_rdma_task_st_ctx mstorm_st_context;
+       struct rdif_task_context rdif_context;
+       struct ustorm_rdma_task_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+       struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+enum rdma_tid_type {
+       RDMA_TID_REGISTERED_MR,
+       RDMA_TID_FMR,
+       RDMA_TID_MW_TYPE1,
+       RDMA_TID_MW_TYPE2A,
+       MAX_RDMA_TID_TYPE
+};
+
+struct mstorm_rdma_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct tstorm_rdma_conn_ag_ctx {
+       u8 reserved0;
+       u8 byte1;
+       u8 flags0;
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK                  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT                 1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK                  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT                 2
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK                  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT                 3
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK                  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT                 4
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK                  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT                 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT                  6
+       u8 flags1;
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT                  0
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT                  2
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          6
+       u8 flags2;
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK       0x3
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT      0
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT                  2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT                  4
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT                  6
+       u8 flags3;
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK                   0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT                  0
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK                  0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT                 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT                4
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                5
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                6
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+       u8 flags4;
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       0
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK    0x1
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT   1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT                3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT                4
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK                 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT                5
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK                0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT               6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT              7
+       u8 flags5;
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT              0
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT              1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT              2
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT              3
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT              4
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT              5
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT              6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK               0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT              7
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       u8 byte4;
+       u8 byte5;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le32 reg9;
+       __le32 reg10;
+};
+
+struct tstorm_rdma_task_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       __le16 word0;
+       u8 flags0;
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK  0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT    4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT    5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT    6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT    7
+       u8 flags1;
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT    0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK     0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT    1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT     2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT     4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT     6
+       u8 flags2;
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT     0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT     2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT     4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT     6
+       u8 flags3;
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK      0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT     0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT   2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT   3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT   4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT   5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT   6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT   7
+       u8 flags4;
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT   0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK    0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT   1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK  0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+       u8 byte2;
+       __le16 word1;
+       __le32 reg0;
+       u8 byte3;
+       u8 byte4;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg1;
+       __le32 reg2;
+};
+
+struct ustorm_rdma_conn_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK             0x1
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT            1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK      0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT     2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK              0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT             4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK              0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT             6
+       u8 flags1;
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK              0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT             0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK     0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT    2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK        0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT       4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK              0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT             6
+       u8 flags2;
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK   0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT  0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK            0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT           1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK            0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT           2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK            0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT           3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK  0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK     0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT    5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK            0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT           6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK         0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT        7
+       u8 flags3;
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK            0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT           0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT         1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT         2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT         3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT         4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT         5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT         6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK          0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT         7
+       u8 byte2;
+       u8 byte3;
+       __le16 conn_dpi;
+       __le16 word1;
+       __le32 cq_cons;
+       __le32 cq_se_prod;
+       __le32 cq_prod;
+       __le32 reg3;
+       __le16 int_timeout;
+       __le16 word3;
+};
+
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT             1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT             2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT     3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT             4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT             5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT             6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT             7
+       u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT             0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK              0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT             1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT            2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT            4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT            5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT            6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT     7
+       u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT              0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT              2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT              4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT              6
+       u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT              0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT              2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT              4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK       0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT      6
+       u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT              0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK               0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT              2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT             4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT             6
+       u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT             0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT             2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT             4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT             6
+       u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT             0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT             2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT             4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT             6
+       u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT             0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT             2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK         0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT        4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT            6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT            7
+       u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT            0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT            1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT            2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT            3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT            4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK    0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT   5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT            6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT            7
+       u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT           0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT           1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT           2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT           3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT           4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT           5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT           6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT           7
+       u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT           0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT           1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT           2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT           3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT     4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK            0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT           5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT          6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT          7
+       u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT          0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT          1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT          2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT          3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT          4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT          5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT     6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK           0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT          7
+       u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT         0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT         1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT     2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT     3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT         4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT         5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT         6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT         7
+       u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT         0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT         1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT     2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT     3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT     4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT     5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT     6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK      0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT     7
+       u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK         0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT        0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK             0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT            1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK      0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT     2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK          0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT         4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK  0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK              0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT             6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le16 word5;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 snd_nxt_psn;
+       __le32 reg4;
+};
+
+struct xstorm_rdma_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT             1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT             2
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT             4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT             5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT             6
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT             7
+       u8 flags1;
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT             0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK              0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT             1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT            2
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT            3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT            4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT            5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT            6
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+       u8 flags2;
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT              0
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT              2
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT              4
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT              6
+       u8 flags3;
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT              0
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT              2
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT              4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+       u8 flags4;
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT              0
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK               0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT              2
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT             4
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT             6
+       u8 flags5;
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT             0
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT             2
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT             4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT             6
+       u8 flags6;
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT             0
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT             2
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT             4
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT             6
+       u8 flags7;
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT             0
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT             2
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK         0x3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT        4
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT            6
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT            7
+       u8 flags8;
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT            0
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT            1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT            2
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT            3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT            4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT            6
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT            7
+       u8 flags9;
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT           0
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT           1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT           2
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT           3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT           4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT           5
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT           6
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT           7
+       u8 flags10;
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT           0
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT           1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT           2
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT           3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK            0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT           5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT          6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT          7
+       u8 flags11;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT          0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT          1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT          2
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT          3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT          4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT          5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK           0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT          7
+       u8 flags12;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT         0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT         1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT         4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT         5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT         6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT         7
+       u8 flags13;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT         0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT         1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+       u8 flags14;
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK         0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT        0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK             0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT            1
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK      0x3
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT     2
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK          0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT         4
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK  0x1
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK              0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT             6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le16 word5;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 snd_nxt_psn;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+};
+
+struct ystorm_rdma_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
+struct mstorm_roce_conn_st_ctx {
+       struct regpair temp[6];
+};
+
+struct pstorm_roce_conn_st_ctx {
+       struct regpair temp[16];
+};
+
+struct ystorm_roce_conn_st_ctx {
+       struct regpair temp[2];
+};
+
+struct xstorm_roce_conn_st_ctx {
+       struct regpair temp[22];
+};
+
+struct tstorm_roce_conn_st_ctx {
+       struct regpair temp[30];
+};
+
+struct ustorm_roce_conn_st_ctx {
+       struct regpair temp[12];
+};
+
+struct roce_conn_context {
+       struct ystorm_roce_conn_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct pstorm_roce_conn_st_ctx pstorm_st_context;
+       struct xstorm_roce_conn_st_ctx xstorm_st_context;
+       struct regpair xstorm_st_padding[2];
+       struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+       struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+       struct timers_context timer_context;
+       struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+       struct tstorm_roce_conn_st_ctx tstorm_st_context;
+       struct mstorm_roce_conn_st_ctx mstorm_st_context;
+       struct ustorm_roce_conn_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+};
+
+struct roce_create_qp_req_ramrod_data {
+       __le16 flags;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK  0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK        0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT       3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT                 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK             0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT            7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         12
+       u8 max_ord;
+       u8 traffic_class;
+       u8 hop_limit;
+       u8 orq_num_pages;
+       __le16 p_key;
+       __le32 flow_label;
+       __le32 dst_qp_id;
+       __le32 ack_timeout_val;
+       __le32 initial_psn;
+       __le16 mtu;
+       __le16 pd;
+       __le16 sq_num_pages;
+       __le16 reseved2;
+       struct regpair sq_pbl_addr;
+       struct regpair orq_pbl_addr;
+       __le16 local_mac_addr[3];
+       __le16 remote_mac_addr[3];
+       __le16 vlan_id;
+       __le16 udp_src_port;
+       __le32 src_gid[4];
+       __le32 dst_gid[4];
+       struct regpair qp_handle_for_cqe;
+       struct regpair qp_handle_for_async;
+       u8 stats_counter_id;
+       u8 reserved3[7];
+       __le32 cq_cid;
+       __le16 physical_queue0;
+       __le16 dpi;
+};
+
+struct roce_create_qp_resp_ramrod_data {
+       __le16 flags;
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK           0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT          2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK           0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT          3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK            0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT           4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK              0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT             5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK  0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK            0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT           7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                  0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK    0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT   11
+       u8 max_ird;
+       u8 traffic_class;
+       u8 hop_limit;
+       u8 irq_num_pages;
+       __le16 p_key;
+       __le32 flow_label;
+       __le32 dst_qp_id;
+       u8 stats_counter_id;
+       u8 reserved1;
+       __le16 mtu;
+       __le32 initial_psn;
+       __le16 pd;
+       __le16 rq_num_pages;
+       struct rdma_srq_id srq_id;
+       struct regpair rq_pbl_addr;
+       struct regpair irq_pbl_addr;
+       __le16 local_mac_addr[3];
+       __le16 remote_mac_addr[3];
+       __le16 vlan_id;
+       __le16 udp_src_port;
+       __le32 src_gid[4];
+       __le32 dst_gid[4];
+       struct regpair qp_handle_for_cqe;
+       struct regpair qp_handle_for_async;
+       __le32 reserved2[2];
+       __le32 cq_cid;
+       __le16 physical_queue0;
+       __le16 dpi;
+};
+
+struct roce_destroy_qp_req_output_params {
+       __le32 num_bound_mw;
+       __le32 reserved;
+};
+
+struct roce_destroy_qp_req_ramrod_data {
+       struct regpair output_params_addr;
+};
+
+struct roce_destroy_qp_resp_output_params {
+       __le32 num_invalidated_mw;
+       __le32 reserved;
+};
+
+struct roce_destroy_qp_resp_ramrod_data {
+       struct regpair output_params_addr;
+};
+
+enum roce_event_opcode {
+       ROCE_EVENT_CREATE_QP = 11,
+       ROCE_EVENT_MODIFY_QP,
+       ROCE_EVENT_QUERY_QP,
+       ROCE_EVENT_DESTROY_QP,
+       MAX_ROCE_EVENT_OPCODE
+};
+
+struct roce_modify_qp_req_ramrod_data {
+       __le16 flags;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK      0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT     0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK      0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT     1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK  0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT           3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK   0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT  4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK          0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT         5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK      0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT     6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK    0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT   7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK      0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT     8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK              0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT             9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK            0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT           13
+       u8 fields;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         4
+       u8 max_ord;
+       u8 traffic_class;
+       u8 hop_limit;
+       __le16 p_key;
+       __le32 flow_label;
+       __le32 ack_timeout_val;
+       __le16 mtu;
+       __le16 reserved2;
+       __le32 reserved3[3];
+       __le32 src_gid[4];
+       __le32 dst_gid[4];
+};
+
+struct roce_modify_qp_resp_ramrod_data {
+       __le16 flags;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK        0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT       0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK             0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT            1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK             0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT            2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK              0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT             3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK              0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT             4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK     0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT    5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT           6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK                0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT               7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK  0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK        0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT       9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK              0x3F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT             10
+       u8 fields;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK                    0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT                   0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK      0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT     3
+       u8 max_ird;
+       u8 traffic_class;
+       u8 hop_limit;
+       __le16 p_key;
+       __le32 flow_label;
+       __le16 mtu;
+       __le16 reserved2;
+       __le32 src_gid[4];
+       __le32 dst_gid[4];
+};
+
+struct roce_query_qp_req_output_params {
+       __le32 psn;
+       __le32 flags;
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK          0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT         0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK  0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK        0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT       2
+};
+
+struct roce_query_qp_req_ramrod_data {
+       struct regpair output_params_addr;
+};
+
+struct roce_query_qp_resp_output_params {
+       __le32 psn;
+       __le32 err_flag;
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK  0x1
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK  0x7FFFFFFF
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+};
+
+struct roce_query_qp_resp_ramrod_data {
+       struct regpair output_params_addr;
+};
+
+enum roce_ramrod_cmd_id {
+       ROCE_RAMROD_CREATE_QP = 11,
+       ROCE_RAMROD_MODIFY_QP,
+       ROCE_RAMROD_QUERY_QP,
+       ROCE_RAMROD_DESTROY_QP,
+       MAX_ROCE_RAMROD_CMD_ID
+};
+
+struct mstorm_roce_req_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct mstorm_roce_resp_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+enum roce_flavor {
+       PLAIN_ROCE /* RoCE v1 */ ,
+       RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
+       RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
+       MAX_ROCE_FLAVOR
+};
+
+struct tstorm_roce_req_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK            0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT           1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK        0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT       2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK                        0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT                       3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK                0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT               4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK                  0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT                 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK                    0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT                   6
+       u8 flags1;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                         0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                        0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK                 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT                2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK           0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT          4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT                6
+       u8 flags2;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK             0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT            0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK                0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT               2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK           0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT          4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK               0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT              6
+       u8 flags3;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK     0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT    0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK       0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT      2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK                 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT                4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK                       0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT                      5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK              0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT             6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK        0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT       7
+       u8 flags4;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK              0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT             0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK          0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT         1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK             0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT            2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK        0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT       3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK            0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT           4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK  0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK    0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT   6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT                    7
+       u8 flags5;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT                    0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT                    1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT                    2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT                    3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT                    4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK              0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT             5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT                    6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK                     0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT                    7
+       __le32 reg0;
+       __le32 snd_nxt_psn;
+       __le32 snd_max_psn;
+       __le32 orq_prod;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 tx_cqe_error_type;
+       u8 orq_cache_idx;
+       __le16 snd_sq_cons_th;
+       u8 byte4;
+       u8 byte5;
+       __le16 snd_sq_cons;
+       __le16 word2;
+       __le16 word3;
+       __le32 reg9;
+       __le32 reg10;
+};
+
+struct tstorm_roce_resp_conn_ag_ctx {
+       u8 byte0;
+       u8 state;
+       u8 flags0;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK                0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT               1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK                0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT               2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK                0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT               3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK        0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT       4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK                0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT               5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT                6
+       u8 flags1;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT        0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK         0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT        2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT                4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+       u8 flags2;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK     0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT    0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT                2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT                4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT                6
+       u8 flags3;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK                 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT                0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK                0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT               2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT              4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK      0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT     6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT              7
+       u8 flags4;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK  0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT              2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT              3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT              4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK               0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT              5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK              0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT             6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT            7
+       u8 flags5;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT            0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT            1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT            2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT            3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT            4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK          0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT         5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT            6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK             0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT            7
+       __le32 psn_and_rxmit_id_echo;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 tx_async_error_type;
+       u8 byte3;
+       __le16 rq_cons;
+       u8 byte4;
+       u8 byte5;
+       __le16 rq_prod;
+       __le16 conn_dpi;
+       __le16 irq_cons;
+       __le32 num_invlidated_mw;
+       __le32 reg10;
+};
+
+struct ustorm_roce_req_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+struct ustorm_roce_resp_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+struct xstorm_roce_req_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT          1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT          2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT       3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT          4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT          5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT          6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT          7
+       u8 flags1;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT          0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK           0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT          1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT              2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT              3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT              4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT              5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK         0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT        6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT       7
+       u8 flags2;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT                0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT                4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT                6
+       u8 flags3;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK         0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT        0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT        2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK        0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT       4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+       u8 flags4;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT                0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK                 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT                2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT               4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT               6
+       u8 flags5;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT               0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT               2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK        0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT       4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT               6
+       u8 flags6;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT               0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT               2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT               4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT               6
+       u8 flags7;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT               0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT               2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK           0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT          4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT              6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT              7
+       u8 flags8;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT              0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT              1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK      0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT     2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK     0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT    4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT              6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT              7
+       u8 flags9;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT             0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT             1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT             2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT             3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK     0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT    4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT             5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT             6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT             7
+       u8 flags10;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT             0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT             1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT             2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT             3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT       4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK              0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT             5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT            6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT            7
+       u8 flags11;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT            0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT            1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT            2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT            3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT            4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK  0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT       6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK             0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT            7
+       u8 flags12;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK          0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT         0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK            0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT           1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT       2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT       3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK   0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT  4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK            0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT           5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK   0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT  6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK     0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT    7
+       u8 flags13;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK            0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT           0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK            0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT           1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT       2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT       3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT       4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT       5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT       6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK        0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT       7
+       u8 flags14;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK      0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT     0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK               0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT              1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK        0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT       2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK            0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT           4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK    0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT   5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK                0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT               6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 word1;
+       __le16 sq_cmp_cons;
+       __le16 sq_cons;
+       __le16 sq_prod;
+       __le16 word5;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 lsn;
+       __le32 ssn;
+       __le32 snd_una_psn;
+       __le32 snd_nxt_psn;
+       __le32 reg4;
+       __le32 orq_cons_th;
+       __le32 orq_cons;
+};
+
+struct xstorm_roce_resp_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT        1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT        2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT        4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT        5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT        6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT        7
+       u8 flags1;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT        0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK         0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT        1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT            2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT            3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT            4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT            5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK       0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT      6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+       u8 flags2;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT              0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT              2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT              4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT              6
+       u8 flags3;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK          0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT         0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK      0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT     4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+       u8 flags4;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT              0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK               0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT              2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT             4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT             6
+       u8 flags5;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT             0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT             2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT             4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT             6
+       u8 flags6;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT             0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT             2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT             4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT             6
+       u8 flags7;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT             0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT             2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK         0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT        4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT            6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT            7
+       u8 flags8;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT            0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK       0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT      2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK   0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT  4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT            6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT            7
+       u8 flags9;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT           0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT           1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT           2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT           3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT           4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT           5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT           6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT           7
+       u8 flags10;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT           0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT           1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT           2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT           3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK            0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT           5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT          6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          7
+       u8 flags11;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT          4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK           0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT          7
+       u8 flags12;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT         0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK  0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT         4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT         5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT         6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT         7
+       u8 flags13;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT         0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK          0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT         1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+       u8 flags14;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT            0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT            1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT            2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT            3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT            4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK             0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT            5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK              0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT             6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 word1;
+       __le16 irq_prod;
+       __le16 word3;
+       __le16 word4;
+       __le16 word5;
+       __le16 irq_cons;
+       u8 rxmit_opcode;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 rxmit_psn_and_id;
+       __le32 rxmit_bytes_length;
+       __le32 psn;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 msn_and_syndrome;
+};
+
+struct ystorm_roce_req_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
+struct ystorm_roce_resp_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
+struct ystorm_iscsi_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+struct pstorm_iscsi_tcp_conn_st_ctx {
+       __le32 tcp[32];
+       __le32 iscsi[4];
+};
+
+struct xstorm_iscsi_tcp_conn_st_ctx {
+       __le32 reserved_iscsi[40];
+       __le32 reserved_tcp[4];
+};
+
+struct xstorm_iscsi_conn_ag_ctx {
+       u8 cdu_validation;
+       u8 state;
+       u8 flags0;
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT               1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK                   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT                  2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT               3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK                        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT                       4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK                   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT                  5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK                        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT                       6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK                        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT                       7
+       u8 flags1;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK                        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT                       0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK                        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT                       1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT                      2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT                      3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT                      4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT                      5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT                      6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK                 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT                7
+       u8 flags2;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT                        0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT                        2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT                        4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK              0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT             6
+       u8 flags3;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT                        0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT                        2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT                        4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT                        6
+       u8 flags4;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT                        0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK                         0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT                        2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT                       4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT                       6
+       u8 flags5;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT                       0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT                       2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT                       4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK     0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT    6
+       u8 flags6;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT                       0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT                       2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK                        0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT                       4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
+       u8 flags7;
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK                    0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT                   0
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK                    0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT                   2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                      6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                      7
+       u8 flags8;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                      0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK           0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT          1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                      2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                      3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                      4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                      5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                      6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT                      7
+       u8 flags9;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT                     0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT                     1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT                     2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT                     3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT                     4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK  0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT                     6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT                     7
+       u8 flags10;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK                      0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK                 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                2
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK                 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT                3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT       5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT                    6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
+       u8 flags11;
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT                    0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT                  2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT                    3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT                    4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT                    5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT               6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK                     0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT                    7
+       u8 flags12;
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK              0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT             0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK                    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT                   1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT               2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT               3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK                    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT                   4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK                    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT                   5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK                    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT                   6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK                    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT                   7
+       u8 flags13;
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK            0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT           0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK              0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT             1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT               2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT               3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT               4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT               5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT               6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK                0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT               7
+       u8 flags14;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT                      0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT                      1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT                      2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT                      3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK                       0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT                      4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK             0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT            5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK           0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT          6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 physical_q1;
+       __le16 dummy_dorq_var;
+       __le16 sq_cons;
+       __le16 sq_prod;
+       __le16 word5;
+       __le16 slow_io_total_data_tx_update;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 more_to_send_seq;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 hq_scan_next_relevant_ack;
+       __le16 r2tq_prod;
+       __le16 r2tq_cons;
+       __le16 hq_prod;
+       __le16 hq_cons;
+       __le32 remain_seq;
+       __le32 bytes_to_next_pdu;
+       __le32 hq_tcp_seq;
+       u8 byte7;
+       u8 byte8;
+       u8 byte9;
+       u8 byte10;
+       u8 byte11;
+       u8 byte12;
+       u8 byte13;
+       u8 byte14;
+       u8 byte15;
+       u8 byte16;
+       __le16 word11;
+       __le32 reg10;
+       __le32 reg11;
+       __le32 exp_stat_sn;
+       __le32 reg13;
+       __le32 reg14;
+       __le32 reg15;
+       __le32 reg16;
+       __le32 reg17;
+};
+
+struct tstorm_iscsi_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK               0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT              1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK               0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT              2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK               0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT              3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK               0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT              4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK               0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT              5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
+       u8 flags1;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT               0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT               6
+       u8 flags2;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT               0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT               4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT               6
+       u8 flags3;
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK               0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT             5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+       u8 flags4;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT             0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT             1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT             2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT             3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK              0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT             4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK             0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT            6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT           7
+       u8 flags5;
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT           0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT           1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT           2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT           3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT           4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT           5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT           6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK            0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT           7
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+};
+
+struct ustorm_iscsi_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+struct tstorm_iscsi_conn_st_ctx {
+       __le32 reserved[40];
+};
+
+struct mstorm_iscsi_conn_ag_ctx {
+       u8 reserved;
+       u8 state;
+       u8 flags0;
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct mstorm_iscsi_tcp_conn_st_ctx {
+       __le32 reserved_tcp[20];
+       __le32 reserved_iscsi[8];
+};
+
+struct ustorm_iscsi_conn_st_ctx {
+       __le32 reserved[52];
+};
+
+struct iscsi_conn_context {
+       struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
+       struct regpair pstorm_st_padding[2];
+       struct pb_context xpb2_context;
+       struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
+       struct regpair xstorm_st_padding[2];
+       struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+       struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+       struct regpair tstorm_ag_padding[2];
+       struct timers_context timer_context;
+       struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+       struct pb_context upb_context;
+       struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
+       struct regpair tstorm_st_padding[2];
+       struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+       struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
+       struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
+};
+
+struct iscsi_init_ramrod_params {
+       struct iscsi_spe_func_init iscsi_init_spe;
+       struct tcp_init_params tcp_init;
+};
+
+struct ystorm_iscsi_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+#define VF_MAX_STATIC 192
+
+#define MCP_GLOB_PATH_MAX      2
+#define MCP_PORT_MAX           2
+#define MCP_GLOB_PORT_MAX      4
+#define MCP_GLOB_FUNC_MAX      16
+
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT   0
+#define OFFSIZE_OFFSET_MASK    0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT     16
+#define OFFSIZE_SIZE_MASK      0xffff0000
+
+#define SECTION_OFFSET(_offsize) ((((_offsize &                        \
+                                    OFFSIZE_OFFSET_MASK) >>    \
+                                   OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) (((_offsize &               \
+                                     OFFSIZE_SIZE_MASK) >>     \
+                                    OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH +                 \
+                                    SECTION_OFFSET(_offsize) +         \
+                                    (QED_SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section)      \
+       (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+       u32 speed;
+#define ETH_SPEED_AUTONEG      0
+#define ETH_SPEED_SMARTLINQ    0x8
+
+       u32 pause;
+#define ETH_PAUSE_NONE         0x0
+#define ETH_PAUSE_AUTONEG      0x1
+#define ETH_PAUSE_RX           0x2
+#define ETH_PAUSE_TX           0x4
+
+       u32 adv_speed;
+       u32 loopback_mode;
+#define ETH_LOOPBACK_NONE              (0)
+#define ETH_LOOPBACK_INT_PHY           (1)
+#define ETH_LOOPBACK_EXT_PHY           (2)
+#define ETH_LOOPBACK_EXT               (3)
+#define ETH_LOOPBACK_MAC               (4)
+
+       u32 feature_config_flags;
+#define ETH_EEE_MODE_ADV_LPI           (1 << 0)
+};
+
+struct port_mf_cfg {
+       u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK                0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT       0
+#define PORT_MF_CFG_OV_TAG_DEFAULT     PORT_MF_CFG_OV_TAG_MASK
+
+       u32 reserved[1];
+};
+
+struct eth_stats {
+       u64 r64;
+       u64 r127;
+       u64 r255;
+       u64 r511;
+       u64 r1023;
+       u64 r1518;
+       u64 r1522;
+       u64 r2047;
+       u64 r4095;
+       u64 r9216;
+       u64 r16383;
+       u64 rfcs;
+       u64 rxcf;
+       u64 rxpf;
+       u64 rxpp;
+       u64 raln;
+       u64 rfcr;
+       u64 rovr;
+       u64 rjbr;
+       u64 rund;
+       u64 rfrg;
+       u64 t64;
+       u64 t127;
+       u64 t255;
+       u64 t511;
+       u64 t1023;
+       u64 t1518;
+       u64 t2047;
+       u64 t4095;
+       u64 t9216;
+       u64 t16383;
+       u64 txpf;
+       u64 txpp;
+       u64 tlpiec;
+       u64 tncl;
+       u64 rbyte;
+       u64 rxuca;
+       u64 rxmca;
+       u64 rxbca;
+       u64 rxpok;
+       u64 tbyte;
+       u64 txuca;
+       u64 txmca;
+       u64 txbca;
+       u64 txcf;
+};
+
+struct brb_stats {
+       u64 brb_truncate[8];
+       u64 brb_discard[8];
+};
+
+struct port_stats {
+       struct brb_stats brb;
+       struct eth_stats eth;
+};
+
+struct couple_mode_teaming {
+       u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM       (1 << 0)
+
+#define PORT_CMT_PORT_ROLE     (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE   (1 << 1)
+
+#define PORT_CMT_TEAM_MASK     (1 << 2)
+#define PORT_CMT_TEAM0         (0 << 2)
+#define PORT_CMT_TEAM1         (1 << 2)
+};
+
+#define LLDP_CHASSIS_ID_STAT_LEN       4
+#define LLDP_PORT_ID_STAT_LEN          4
+#define DCBX_MAX_APP_PROTOCOL          32
+#define MAX_SYSTEM_LLDP_TLV_DATA       32
+
+enum _lldp_agent {
+       LLDP_NEAREST_BRIDGE = 0,
+       LLDP_NEAREST_NON_TPMR_BRIDGE,
+       LLDP_NEAREST_CUSTOMER_BRIDGE,
+       LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+       u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK   0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT  0
+#define LLDP_CONFIG_HOLD_MASK          0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT         8
+#define LLDP_CONFIG_MAX_CREDIT_MASK    0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT   12
+#define LLDP_CONFIG_ENABLE_RX_MASK     0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT    30
+#define LLDP_CONFIG_ENABLE_TX_MASK     0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT    31
+       u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+       u32 prefix_seq_num;
+       u32 status;
+       u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+       u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+       u32 flags;
+#define DCBX_ETS_ENABLED_MASK  0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK  0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK    0x00000004
+#define DCBX_ETS_ERROR_SHIFT   2
+#define DCBX_ETS_CBS_MASK      0x00000008
+#define DCBX_ETS_CBS_SHIFT     3
+#define DCBX_ETS_MAX_TCS_MASK  0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
+#define DCBX_ISCSI_OOO_TC_SHIFT        8
+       u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC      (4)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET        (DCBX_ISCSI_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY       0xf
+       u32 tc_bw_tbl[2];
+       u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT    0
+#define DCBX_ETS_TSA_CBS       1
+#define DCBX_ETS_TSA_ETS       2
+};
+
+struct dcbx_app_priority_entry {
+       u32 entry;
+#define DCBX_APP_PRI_MAP_MASK          0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT         0
+#define DCBX_APP_PRI_0                 0x01
+#define DCBX_APP_PRI_1                 0x02
+#define DCBX_APP_PRI_2                 0x04
+#define DCBX_APP_PRI_3                 0x08
+#define DCBX_APP_PRI_4                 0x10
+#define DCBX_APP_PRI_5                 0x20
+#define DCBX_APP_PRI_6                 0x40
+#define DCBX_APP_PRI_7                 0x80
+#define DCBX_APP_SF_MASK               0x00000300
+#define DCBX_APP_SF_SHIFT              8
+#define DCBX_APP_SF_ETHTYPE            0
+#define DCBX_APP_SF_PORT               1
+#define DCBX_APP_SF_IEEE_MASK          0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT         12
+#define DCBX_APP_SF_IEEE_RESERVED      0
+#define DCBX_APP_SF_IEEE_ETHTYPE       1
+#define DCBX_APP_SF_IEEE_TCP_PORT      2
+#define DCBX_APP_SF_IEEE_UDP_PORT      3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT  4
+
+#define DCBX_APP_PROTOCOL_ID_MASK      0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT     16
+};
+
+struct dcbx_app_priority_feature {
+       u32 flags;
+#define DCBX_APP_ENABLED_MASK          0x00000001
+#define DCBX_APP_ENABLED_SHIFT         0
+#define DCBX_APP_WILLING_MASK          0x00000002
+#define DCBX_APP_WILLING_SHIFT         1
+#define DCBX_APP_ERROR_MASK            0x00000004
+#define DCBX_APP_ERROR_SHIFT           2
+#define DCBX_APP_MAX_TCS_MASK          0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT         12
+#define DCBX_APP_NUM_ENTRIES_MASK      0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT     16
+       struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+       struct dcbx_ets_feature ets;
+       u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK    0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT   0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0   0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1   0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2   0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3   0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4   0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5   0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6   0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7   0x80
+
+#define DCBX_PFC_FLAGS_MASK            0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT           8
+#define DCBX_PFC_CAPS_MASK             0x00000f00
+#define DCBX_PFC_CAPS_SHIFT            8
+#define DCBX_PFC_MBC_MASK              0x00004000
+#define DCBX_PFC_MBC_SHIFT             14
+#define DCBX_PFC_WILLING_MASK          0x00008000
+#define DCBX_PFC_WILLING_SHIFT         15
+#define DCBX_PFC_ENABLED_MASK          0x00010000
+#define DCBX_PFC_ENABLED_SHIFT         16
+#define DCBX_PFC_ERROR_MASK            0x00020000
+#define DCBX_PFC_ERROR_SHIFT           17
+
+       struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+       u32 config;
+#define DCBX_CONFIG_VERSION_MASK       0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT      0
+#define DCBX_CONFIG_VERSION_DISABLED   0
+#define DCBX_CONFIG_VERSION_IEEE       1
+#define DCBX_CONFIG_VERSION_CEE                2
+#define DCBX_CONFIG_VERSION_STATIC     4
+
+       u32 flags;
+       struct dcbx_features features;
+};
+
+struct dcbx_mib {
+       u32 prefix_seq_num;
+       u32 flags;
+       struct dcbx_features features;
+       u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+       u16 valid;
+       u16 length;
+       u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct dcb_dscp_map {
+       u32 flags;
+#define DCB_DSCP_ENABLE_MASK   0x1
+#define DCB_DSCP_ENABLE_SHIFT  0
+#define DCB_DSCP_ENABLE        1
+       u32 dscp_pri_map[8];
+};
+
+struct public_global {
+       u32 max_path;
+       u32 max_ports;
+       u32 debug_mb_offset;
+       u32 phymod_dbg_mb_offset;
+       struct couple_mode_teaming cmt;
+       s32 internal_temperature;
+       u32 mfw_ver;
+       u32 running_bundle_id;
+       s32 external_temperature;
+       u32 mdump_reason;
+};
+
+struct fw_flr_mb {
+       u32 aggint;
+       u32 opgen_addr;
+       u32 accum_ack;
+};
+
+struct public_path {
+       struct fw_flr_mb flr_mb;
+       u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+       u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK      0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT     0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT        16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+struct public_port {
+       u32 validity_map;
+
+       u32 link_status;
+#define LINK_STATUS_LINK_UP                    0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK      0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD   (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD   (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G       (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G       (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G       (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G       (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G      (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G       (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED     0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE    0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED    0x00000080
+
+#define LINK_STATUS_PFC_ENABLED                                0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE           0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE           0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE           0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE           0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE          0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE           0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE            (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT                       0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED            0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED            0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT                  0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT                    0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT                   0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ                        0x04000000
+
+       u32 link_status1;
+       u32 ext_phy_fw_version;
+       u32 drv_phy_cfg_addr;
+
+       u32 port_stx;
+
+       u32 stat_nig_timer;
+
+       struct port_mf_cfg port_mf_config;
+       struct port_stats stats;
+
+       u32 media_type;
+#define MEDIA_UNSPECIFIED      0x0
+#define MEDIA_SFPP_10G_FIBER   0x1
+#define MEDIA_XFP_FIBER                0x2
+#define MEDIA_DA_TWINAX                0x3
+#define MEDIA_BASE_T           0x4
+#define MEDIA_SFP_1G_FIBER     0x5
+#define MEDIA_MODULE_FIBER     0x6
+#define MEDIA_KR               0xf0
+#define MEDIA_NOT_PRESENT      0xff
+
+       u32 lfa_status;
+       u32 link_change_count;
+
+       struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+       /* DCBX related MIB */
+       struct dcbx_local_params local_admin_dcbx_mib;
+       struct dcbx_mib remote_dcbx_mib;
+       struct dcbx_mib operational_dcbx_mib;
+
+       u32 reserved[2];
+       u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK     0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT    0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED        0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT  0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID    0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+
+       u32 wol_info;
+       u32 wol_pkt_len;
+       u32 wol_pkt_details;
+       struct dcb_dscp_map dcb_dscp_map;
+};
+
+struct public_func {
+       u32 reserved0[2];
+
+       u32 mtu_size;
+
+       u32 reserved[7];
+
+       u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE                  0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING         0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT   0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK      0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT     4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET  0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000010
+#define FUNC_MF_CFG_PROTOCOL_ROCE               0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX       0x00000030
+
+#define FUNC_MF_CFG_MIN_BW_MASK                0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT       8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT     0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK                0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT       16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT     0x00640000
+
+       u32 status;
+#define FUNC_STATUS_VLINK_DOWN         0x00000001
+
+       u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK      0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT     0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT   FUNC_MF_CFG_UPPERMAC_MASK
+       u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT   0xffffffff
+
+       u32 fcoe_wwn_port_name_upper;
+       u32 fcoe_wwn_port_name_lower;
+
+       u32 fcoe_wwn_node_name_upper;
+       u32 fcoe_wwn_node_name_lower;
+
+       u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK       0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT      0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT    FUNC_MF_CFG_OV_STAG_MASK
+
+       u32 pf_allocation;
+
+       u32 preserve_data;
+
+       u32 driver_last_activity_ts;
+
+       u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
+
+       u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK       0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT      0
+
+#define DRV_ID_MCP_HSI_VER_MASK                0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT       16
+#define DRV_ID_MCP_HSI_VER_CURRENT     (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK           0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT          24
+#define DRV_ID_DRV_TYPE_UNKNOWN                (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX          (1 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK                0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT       31
+#define DRV_ID_DRV_INIT_HW_FLAG                (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+};
+
+struct mcp_mac {
+       u32 mac_upper;
+       u32 mac_lower;
+};
+
+struct mcp_val64 {
+       u32 lo;
+       u32 hi;
+};
+
+struct mcp_file_att {
+       u32 nvm_start_addr;
+       u32 len;
+};
+
+struct bist_nvm_image_att {
+       u32 return_code;
+       u32 image_type;
+       u32 nvm_start_addr;
+       u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+       u32 version;
+       u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct lan_stats_stc {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+       u32 rserved;
+};
+
+struct ocbb_data_stc {
+       u32 ocbb_host_addr;
+       u32 ocsd_host_addr;
+       u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+       u32 num_of_sensors;
+       u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+       u32 version;
+       u32 config;
+       u32 epoc;
+       u32 num_of_logs;
+       u32 valid_logs;
+};
+
+union drv_union_data {
+       u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+       struct mcp_mac wol_mac;
+
+       struct eth_phy_cfg drv_phy_cfg;
+
+       struct mcp_val64 val64;
+
+       u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+       struct mcp_file_att file_att;
+
+       u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+       struct drv_version_stc drv_version;
+
+       struct lan_stats_stc lan_stats;
+       u64 reserved_stats[11];
+       struct ocbb_data_stc ocbb_info;
+       struct temperature_status_stc temp_info;
+       struct bist_nvm_image_att nvm_image_att;
+       struct mdump_config_stc mdump_config;
+};
+
+struct public_drv_mb {
+       u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                      0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                  0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                 0x11000000
+#define DRV_MSG_CODE_INIT_HW                   0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ                        0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE               0x21000000
+#define DRV_MSG_CODE_INIT_PHY                  0x22000000
+#define DRV_MSG_CODE_LINK_RESET                        0x23000000
+#define DRV_MSG_CODE_SET_DCBX                  0x25000000
+
+#define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
+#define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE          0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX               0xc0010000
+#define DRV_MSG_CODE_MCP_RESET                 0x00090000
+#define DRV_MSG_CODE_SET_VERSION               0x000f0000
+
+#define DRV_MSG_CODE_BIST_TEST                 0x001e0000
+#define DRV_MSG_CODE_SET_LED_MODE              0x00200000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                        0x0000ffff
+
+       u32 drv_mb_param;
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP            0x00000001
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK          0x000000FF
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT         3
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT  8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK   0x0000FF00
+#define DRV_MB_PARAM_LLDP_SEND_MASK            0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT           0
+
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
+
+#define DRV_MB_PARAM_BIST_REGISTER_TEST                1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST           2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN           0
+#define DRV_MB_PARAM_BIST_RC_PASSED            1
+#define DRV_MB_PARAM_BIST_RC_FAILED            2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT     0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK      0x000000FF
+
+       u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                       0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE            0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT              0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION          0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA       0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI       0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG      0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE              0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE          0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT            0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION                0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE            0x21100000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE       0xb0010000
+#define FW_MSG_CODE_OK                         0x00160000
+
+#define FW_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+       u32 fw_mb_param;
+
+       u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                     0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK             0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE                 0x00008000
+
+       u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                     0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                 0x00008000
+#define MCP_EVENT_MASK                         0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ       0x00010000
+
+       union drv_union_data union_data;
+};
+
+enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_LINK_CHANGE,
+       MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+       MFW_DRV_MSG_VF_DISABLED,
+       MFW_DRV_MSG_LLDP_DATA_UPDATED,
+       MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+       MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+       MFW_DRV_MSG_RESERVED4,
+       MFW_DRV_MSG_BW_UPDATE,
+       MFW_DRV_MSG_BW_UPDATE5,
+       MFW_DRV_MSG_BW_UPDATE6,
+       MFW_DRV_MSG_BW_UPDATE7,
+       MFW_DRV_MSG_BW_UPDATE8,
+       MFW_DRV_MSG_BW_UPDATE9,
+       MFW_DRV_MSG_BW_UPDATE10,
+       MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+       MFW_DRV_MSG_BW_UPDATE11,
+       MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs)   (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id)      (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id)     ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id)       (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+       u32 sup_msgs;
+       u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+       u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+enum public_sections {
+       PUBLIC_DRV_MB,
+       PUBLIC_MFW_MB,
+       PUBLIC_GLOBAL,
+       PUBLIC_PATH,
+       PUBLIC_PORT,
+       PUBLIC_FUNC,
+       PUBLIC_MAX_SECTIONS
+};
+
+struct mcp_public_data {
+       u32 num_sections;
+       u32 sections[PUBLIC_MAX_SECTIONS];
+       struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+       struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+       struct public_global global;
+       struct public_path path[MCP_GLOB_PATH_MAX];
+       struct public_port port[MCP_GLOB_PORT_MAX];
+       struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+struct nvm_cfg_mac_address {
+       u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK    0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET  0
+       u32 mac_addr_lo;
+};
+
+struct nvm_cfg1_glob {
+       u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK             0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET           4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED       0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT          0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4            0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0          0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5          0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0          0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD               0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP              0x7
+       u32 engineering_change[3];
+       u32 manufacturing_id;
+       u32 serial_number[4];
+       u32 pcie_cfg;
+       u32 mgmt_traffic;
+       u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK           0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET         0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G       0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G          0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G      0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F                0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E     0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G       0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G          0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G          0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G          0xD
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G          0xE
+       u32 e_lane_cfg1;
+       u32 e_lane_cfg2;
+       u32 f_lane_cfg1;
+       u32 f_lane_cfg2;
+       u32 mps10_preemphasis;
+       u32 mps10_driver_current;
+       u32 mps25_preemphasis;
+       u32 mps25_driver_current;
+       u32 pci_id;
+       u32 pci_subsys_id;
+       u32 bar;
+       u32 mps10_txfir_main;
+       u32 mps10_txfir_post;
+       u32 mps25_txfir_main;
+       u32 mps25_txfir_post;
+       u32 manufacture_ver;
+       u32 manufacture_time;
+       u32 led_global_settings;
+       u32 generic_cont1;
+       u32 mbi_version;
+       u32 mbi_date;
+       u32 misc_sig;
+       u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET     0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI                0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE         0x8
+       u32 power_dissipated;
+       u32 power_consumed;
+       u32 efi_version;
+       u32 multi_network_modes_capability;
+       u32 reserved[41];
+};
+
+struct nvm_cfg1_path {
+       u32 reserved[30];
+};
+
+struct nvm_cfg1_port {
+       u32 reserved__m_relocated_to_option_123;
+       u32 reserved__m_relocated_to_option_124;
+       u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK                           0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET                         16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED                       0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE                           0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE                            0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC                                0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK           0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET         20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET       0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE           0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI          0x4
+       u32 pcie_cfg;
+       u32 features;
+       u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK           0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET         0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G             0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G            0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G            0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G            0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G            0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G                0x40
+       u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                      0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                    0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                   0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                                0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                       0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                       0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                       0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                       0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G                   0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ                 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK                    0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET                  4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                      0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                      0x4
+       u32 phy_cfg;
+       u32 mgmt_traffic;
+       u32 ext_phy;
+       u32 mba_cfg1;
+       u32 mba_cfg2;
+       u32 vf_cfg;
+       struct nvm_cfg_mac_address lldp_mac_address;
+       u32 led_port_settings;
+       u32 transceiver_00;
+       u32 device_ids;
+       u32 board_cfg;
+       u32 mnm_10g_cap;
+       u32 mnm_10g_ctrl;
+       u32 mnm_10g_misc;
+       u32 mnm_25g_cap;
+       u32 mnm_25g_ctrl;
+       u32 mnm_25g_misc;
+       u32 mnm_40g_cap;
+       u32 mnm_40g_ctrl;
+       u32 mnm_40g_misc;
+       u32 mnm_50g_cap;
+       u32 mnm_50g_ctrl;
+       u32 mnm_50g_misc;
+       u32 mnm_100g_cap;
+       u32 mnm_100g_ctrl;
+       u32 mnm_100g_misc;
+       u32 reserved[116];
+};
+
+struct nvm_cfg1_func {
+       struct nvm_cfg_mac_address mac_address;
+       u32 rsrv1;
+       u32 rsrv2;
+       u32 device_id;
+       u32 cmn_cfg;
+       u32 pci_cfg;
+       struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+       struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+       u32 preboot_generic_cfg;
+       u32 reserved[8];
+};
+
+struct nvm_cfg1 {
+       struct nvm_cfg1_glob glob;
+       struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+       struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+       struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644 (file)
index 0000000..e178853
--- /dev/null
@@ -0,0 +1,864 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
+
+struct qed_ptt {
+       struct list_head        list_entry;
+       unsigned int            idx;
+       struct pxp_ptt_entry    pxp;
+};
+
+struct qed_ptt_pool {
+       struct list_head        free_list;
+       spinlock_t              lock; /* ptt synchronized access */
+       struct qed_ptt          ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+                                             GFP_KERNEL);
+       int i;
+
+       if (!p_pool)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&p_pool->free_list);
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_pool->ptts[i].idx = i;
+               p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+               p_pool->ptts[i].pxp.pretend.control = 0;
+               if (i >= RESERVED_PTT_MAX)
+                       list_add(&p_pool->ptts[i].list_entry,
+                                &p_pool->free_list);
+       }
+
+       p_hwfn->p_ptt_pool = p_pool;
+       spin_lock_init(&p_pool->lock);
+
+       return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt *p_ptt;
+       int i;
+
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+               p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+       }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->p_ptt_pool);
+       p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt *p_ptt;
+       unsigned int i;
+
+       /* Take the free PTT from the list */
+       for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+               spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+               if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+                       p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+                                                struct qed_ptt, list_entry);
+                       list_del(&p_ptt->list_entry);
+
+                       spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+                       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                                  "allocated ptt %d\n", p_ptt->idx);
+                       return p_ptt;
+               }
+
+               spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+               usleep_range(1000, 2000);
+       }
+
+       DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+       return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+       list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+       spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt)
+{
+       /* The HW is using DWORDS and we need to translate it to Bytes */
+       return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+       return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+              p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+       return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+              p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 new_hw_addr)
+{
+       u32 prev_hw_addr;
+
+       prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+       if (new_hw_addr == prev_hw_addr)
+               return;
+
+       /* Update PTT entery in admin window */
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "Updating PTT entry %d to offset 0x%x\n",
+                  p_ptt->idx, new_hw_addr);
+
+       /* The HW is using DWORDS and the address is in Bytes */
+       p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, offset),
+              le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u32 hw_addr)
+{
+       u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+       u32 offset;
+
+       offset = hw_addr - win_hw_addr;
+
+       /* Verify the address is within the window */
+       if (hw_addr < win_hw_addr ||
+           offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+               qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+               offset = 0;
+       }
+
+       return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+                                    enum reserved_ptts ptt_idx)
+{
+       if (ptt_idx >= RESERVED_PTT_MAX) {
+               DP_NOTICE(p_hwfn,
+                         "Requested PTT %d is out of range\n", ptt_idx);
+               return NULL;
+       }
+
+       return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+           struct qed_ptt *p_ptt,
+           u32 hw_addr, u32 val)
+{
+       u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+       REG_WR(p_hwfn, bar_addr, val);
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+          struct qed_ptt *p_ptt,
+          u32 hw_addr)
+{
+       u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+       u32 val = REG_RD(p_hwfn, bar_addr);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+       return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         void *addr,
+                         u32 hw_addr,
+                         size_t n,
+                         bool to_device)
+{
+       u32 dw_count, *host_addr, hw_offset;
+       size_t quota, done = 0;
+       u32 __iomem *reg_addr;
+
+       while (done < n) {
+               quota = min_t(size_t, n - done,
+                             PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+               if (IS_PF(p_hwfn->cdev)) {
+                       qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+                       hw_offset = qed_ptt_get_bar_addr(p_ptt);
+               } else {
+                       hw_offset = hw_addr + done;
+               }
+
+               dw_count = quota / 4;
+               host_addr = (u32 *)((u8 *)addr + done);
+               reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+               if (to_device)
+                       while (dw_count--)
+                               DIRECT_REG_WR(reg_addr++, *host_addr++);
+               else
+                       while (dw_count--)
+                               *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+               done += quota;
+       }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    void *dest, u32 hw_addr, size_t n)
+{
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+                  hw_addr, dest, hw_addr, (unsigned long)n);
+
+       qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  u32 hw_addr, void *src, size_t n)
+{
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+                  hw_addr, hw_addr, src, (unsigned long)n);
+
+       qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u16 fid)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+       /* Every pretend undos previous pretends, including
+        * previous port pretend.
+        */
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+               fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+       p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u8 port_id)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+       u32 concrete_fid = 0;
+
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+       return concrete_fid;
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+                           const u8 is_src_type_grc,
+                           const u8 is_dst_type_grc,
+                           struct qed_dmae_params *p_params)
+{
+       u16 opcode_b = 0;
+       u32 opcode = 0;
+
+       /* Whether the source is the PCIe or the GRC.
+        * 0- The source is the PCIe
+        * 1- The source is the GRC.
+        */
+       opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+                                  : DMAE_CMD_SRC_MASK_PCIE) <<
+                  DMAE_CMD_SRC_SHIFT;
+       opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+                  DMAE_CMD_SRC_PF_ID_SHIFT);
+
+       /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+       opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+                                  : DMAE_CMD_DST_MASK_PCIE) <<
+                  DMAE_CMD_DST_SHIFT;
+       opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+                  DMAE_CMD_DST_PF_ID_SHIFT);
+
+       /* Whether to write a completion word to the completion destination:
+        * 0-Do not write a completion word
+        * 1-Write the completion word
+        */
+       opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+       opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+                  DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+       if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+               opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+       opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+       opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+       /* reset source address in next go */
+       opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+                  DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+       /* reset dest address in next go */
+       opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+                  DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+       /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+       if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+               opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+               opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+       } else {
+               opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+                           DMAE_CMD_SRC_VF_ID_SHIFT;
+       }
+
+       if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+               opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+               opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+       } else {
+               opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+       }
+
+       p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+       p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+       /* All the DMAE 'go' registers form an array in internal memory */
+       return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt)
+{
+       struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+       u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+       int qed_status = 0;
+
+       /* verify address is not NULL */
+       if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+            ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+               DP_NOTICE(p_hwfn,
+                         "source or destination address 0 idx_cmd=%d\n"
+                         "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                          idx_cmd,
+                          le32_to_cpu(command->opcode),
+                          le16_to_cpu(command->opcode_b),
+                          le16_to_cpu(command->length_dw),
+                          le32_to_cpu(command->src_addr_hi),
+                          le32_to_cpu(command->src_addr_lo),
+                          le32_to_cpu(command->dst_addr_hi),
+                          le32_to_cpu(command->dst_addr_lo));
+
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                  idx_cmd,
+                  le32_to_cpu(command->opcode),
+                  le16_to_cpu(command->opcode_b),
+                  le16_to_cpu(command->length_dw),
+                  le32_to_cpu(command->src_addr_hi),
+                  le32_to_cpu(command->src_addr_lo),
+                  le32_to_cpu(command->dst_addr_hi),
+                  le32_to_cpu(command->dst_addr_lo));
+
+       /* Copy the command to DMAE - need to do it before every call
+        * for source/dest address no reset.
+        * The first 9 DWs are the command registers, the 10 DW is the
+        * GO register, and the rest are result registers
+        * (which are read only by the client).
+        */
+       for (i = 0; i < DMAE_CMD_SIZE; i++) {
+               u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+                          *(((u32 *)command) + i) : 0;
+
+               qed_wr(p_hwfn, p_ptt,
+                      DMAE_REG_CMD_MEM +
+                      (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+                      (i * sizeof(u32)), data);
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              qed_dmae_idx_to_go_cmd(idx_cmd),
+              DMAE_GO_VALUE);
+
+       return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+       dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+       struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+       u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+       u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+       *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    sizeof(u32),
+                                    p_addr,
+                                    GFP_KERNEL);
+       if (!*p_comp) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+               goto err;
+       }
+
+       p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+       *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   sizeof(struct dmae_cmd),
+                                   p_addr, GFP_KERNEL);
+       if (!*p_cmd) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+               goto err;
+       }
+
+       p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    sizeof(u32) * DMAE_MAX_RW_SIZE,
+                                    p_addr, GFP_KERNEL);
+       if (!*p_buff) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+               goto err;
+       }
+
+       p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+       return 0;
+err:
+       qed_dmae_info_free(p_hwfn);
+       return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+       dma_addr_t p_phys;
+
+       /* Just make sure no one is in the middle */
+       mutex_lock(&p_hwfn->dmae_info.mutex);
+
+       if (p_hwfn->dmae_info.p_completion_word) {
+               p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(u32),
+                                 p_hwfn->dmae_info.p_completion_word,
+                                 p_phys);
+               p_hwfn->dmae_info.p_completion_word = NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_dmae_cmd) {
+               p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(struct dmae_cmd),
+                                 p_hwfn->dmae_info.p_dmae_cmd,
+                                 p_phys);
+               p_hwfn->dmae_info.p_dmae_cmd = NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_intermediate_buffer) {
+               p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(u32) * DMAE_MAX_RW_SIZE,
+                                 p_hwfn->dmae_info.p_intermediate_buffer,
+                                 p_phys);
+               p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+       }
+
+       mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+       u32 wait_cnt = 0;
+       u32 wait_cnt_limit = 10000;
+
+       int qed_status = 0;
+
+       barrier();
+       while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+               udelay(DMAE_MIN_WAIT_TIME);
+               if (++wait_cnt > wait_cnt_limit) {
+                       DP_NOTICE(p_hwfn->cdev,
+                                 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+                                 *p_hwfn->dmae_info.p_completion_word,
+                                DMAE_COMPLETION_VAL);
+                       qed_status = -EBUSY;
+                       break;
+               }
+
+               /* to sync the completion_word since we are not
+                * using the volatile keyword for p_completion_word
+                */
+               barrier();
+       }
+
+       if (qed_status == 0)
+               *p_hwfn->dmae_info.p_completion_word = 0;
+
+       return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt,
+                                         u64 src_addr,
+                                         u64 dst_addr,
+                                         u8 src_type,
+                                         u8 dst_type,
+                                         u32 length)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       int qed_status = 0;
+
+       switch (src_type) {
+       case QED_DMAE_ADDRESS_GRC:
+       case QED_DMAE_ADDRESS_HOST_PHYS:
+               cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+               cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+               break;
+       /* for virtual source addresses we use the intermediate buffer. */
+       case QED_DMAE_ADDRESS_HOST_VIRT:
+               cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+               cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+               memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+                      (void *)(uintptr_t)src_addr,
+                      length * sizeof(u32));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (dst_type) {
+       case QED_DMAE_ADDRESS_GRC:
+       case QED_DMAE_ADDRESS_HOST_PHYS:
+               cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+               cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+               break;
+       /* for virtual source addresses we use the intermediate buffer. */
+       case QED_DMAE_ADDRESS_HOST_VIRT:
+               cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+               cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       cmd->length_dw = cpu_to_le16((u16)length);
+
+       qed_dmae_post_command(p_hwfn, p_ptt);
+
+       qed_status = qed_dmae_operation_wait(p_hwfn);
+
+       if (qed_status) {
+               DP_NOTICE(p_hwfn,
+                         "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+                         src_addr,
+                         dst_addr,
+                         length);
+               return qed_status;
+       }
+
+       if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+               memcpy((void *)(uintptr_t)(dst_addr),
+                      &p_hwfn->dmae_info.p_intermediate_buffer[0],
+                      length * sizeof(u32));
+
+       return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   u64 src_addr, u64 dst_addr,
+                                   u8 src_type, u8 dst_type,
+                                   u32 size_in_dwords,
+                                   struct qed_dmae_params *p_params)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+       u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       u64 src_addr_split = 0, dst_addr_split = 0;
+       u16 length_limit = DMAE_MAX_RW_SIZE;
+       int qed_status = 0;
+       u32 offset = 0;
+
+       qed_dmae_opcode(p_hwfn,
+                       (src_type == QED_DMAE_ADDRESS_GRC),
+                       (dst_type == QED_DMAE_ADDRESS_GRC),
+                       p_params);
+
+       cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+       cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+       cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+       /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+       cnt_split = size_in_dwords / length_limit;
+       length_mod = size_in_dwords % length_limit;
+
+       src_addr_split = src_addr;
+       dst_addr_split = dst_addr;
+
+       for (i = 0; i <= cnt_split; i++) {
+               offset = length_limit * i;
+
+               if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+                       if (src_type == QED_DMAE_ADDRESS_GRC)
+                               src_addr_split = src_addr + offset;
+                       else
+                               src_addr_split = src_addr + (offset * 4);
+               }
+
+               if (dst_type == QED_DMAE_ADDRESS_GRC)
+                       dst_addr_split = dst_addr + offset;
+               else
+                       dst_addr_split = dst_addr + (offset * 4);
+
+               length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+               /* might be zero on last iteration */
+               if (!length_cur)
+                       continue;
+
+               qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+                                                           p_ptt,
+                                                           src_addr_split,
+                                                           dst_addr_split,
+                                                           src_type,
+                                                           dst_type,
+                                                           length_cur);
+               if (qed_status) {
+                       DP_NOTICE(p_hwfn,
+                                 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+                                 qed_status,
+                                 src_addr,
+                                 dst_addr,
+                                 length_cur);
+                       break;
+               }
+       }
+
+       return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u64 source_addr,
+                     u32 grc_addr,
+                     u32 size_in_dwords,
+                     u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct qed_dmae_params params;
+       int rc;
+
+       memset(&params, 0, sizeof(struct qed_dmae_params));
+       params.flags = flags;
+
+       mutex_lock(&p_hwfn->dmae_info.mutex);
+
+       rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                     grc_addr_in_dw,
+                                     QED_DMAE_ADDRESS_HOST_VIRT,
+                                     QED_DMAE_ADDRESS_GRC,
+                                     size_in_dwords, &params);
+
+       mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+int
+qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
+                 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct qed_dmae_params params;
+       int rc;
+
+       memset(&params, 0, sizeof(struct qed_dmae_params));
+       params.flags = flags;
+
+       mutex_lock(&p_hwfn->dmae_info.mutex);
+
+       rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+                                     dest_addr, QED_DMAE_ADDRESS_GRC,
+                                     QED_DMAE_ADDRESS_HOST_VIRT,
+                                     size_in_dwords, &params);
+
+       mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  dma_addr_t source_addr,
+                  dma_addr_t dest_addr,
+                  u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+       int rc;
+
+       mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+       rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                     dest_addr,
+                                     QED_DMAE_ADDRESS_HOST_PHYS,
+                                     QED_DMAE_ADDRESS_HOST_PHYS,
+                                     size_in_dwords, p_params);
+
+       mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+       return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+                 enum protocol_type proto, union qed_qm_pq_params *p_params)
+{
+       u16 pq_id = 0;
+
+       if ((proto == PROTOCOLID_CORE ||
+            proto == PROTOCOLID_ETH ||
+            proto == PROTOCOLID_ISCSI ||
+            proto == PROTOCOLID_ROCE) && !p_params) {
+               DP_NOTICE(p_hwfn,
+                         "Protocol %d received NULL PQ params\n", proto);
+               return 0;
+       }
+
+       switch (proto) {
+       case PROTOCOLID_CORE:
+               if (p_params->core.tc == LB_TC)
+                       pq_id = p_hwfn->qm_info.pure_lb_pq;
+               else if (p_params->core.tc == OOO_LB_TC)
+                       pq_id = p_hwfn->qm_info.ooo_pq;
+               else
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       case PROTOCOLID_ETH:
+               pq_id = p_params->eth.tc;
+               if (p_params->eth.is_vf)
+                       pq_id += p_hwfn->qm_info.vf_queues_offset +
+                                p_params->eth.vf_id;
+               break;
+       case PROTOCOLID_ISCSI:
+               if (p_params->iscsi.q_idx == 1)
+                       pq_id = p_hwfn->qm_info.pure_ack_pq;
+               break;
+       case PROTOCOLID_ROCE:
+               if (p_params->roce.dcqcn)
+                       pq_id = p_params->roce.qpid;
+               else
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               if (pq_id > p_hwfn->qm_info.num_pf_rls)
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       default:
+               pq_id = 0;
+       }
+
+       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+       return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644 (file)
index 0000000..d015570
--- /dev/null
@@ -0,0 +1,281 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+       RESERVED_PTT_EDIAG,
+       RESERVED_PTT_USER_SPACE,
+       RESERVED_PTT_MAIN,
+       RESERVED_PTT_DPC,
+       RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+       DMAE_CMD_DST_MASK_NONE  = 0,
+       DMAE_CMD_DST_MASK_PCIE  = 1,
+       DMAE_CMD_DST_MASK_GRC   = 2
+};
+
+enum _dmae_cmd_src_mask {
+       DMAE_CMD_SRC_MASK_PCIE  = 0,
+       DMAE_CMD_SRC_MASK_GRC   = 1
+};
+
+enum _dmae_cmd_crc_mask {
+       DMAE_CMD_COMP_CRC_EN_MASK_NONE  = 0,
+       DMAE_CMD_COMP_CRC_EN_MASK_SET   = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE   0x1
+
+#define DMAE_COMPLETION_VAL     0xD1AE
+#define DMAE_CMD_ENDIANITY      0x2
+
+#define DMAE_CMD_SIZE   14
+#define DMAE_CMD_SIZE_TO_FILL   (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME      0x2
+#define DMAE_MAX_CLIENTS        32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+                                    enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+           struct qed_ptt *p_ptt,
+           u32 hw_addr,
+           u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+          struct qed_ptt *p_ptt,
+          u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    void *dest,
+                    u32 hw_addr,
+                    size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  u32 hw_addr,
+                  void *src,
+                  size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ *        accessing the ptt window. There is no way to unpretend
+ *        a function. The only way to cancel a pretend is to
+ *        pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ *            either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ *        accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ *        pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ *        given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+       struct {
+               u8 q_idx;
+       } iscsi;
+
+       struct {
+               u8 tc;
+       }       core;
+
+       struct {
+               u8      is_vf;
+               u8      vf_id;
+               u8      tc;
+       }       eth;
+
+       struct {
+               u8 dcqcn;
+               u8 qpid;        /* roce relative */
+       } roce;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+                 enum protocol_type proto, union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+                    const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644 (file)
index 0000000..23e455f
--- /dev/null
@@ -0,0 +1,933 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+       MCM_SEC,
+       MCM_PRI,
+       UCM_SEC,
+       UCM_PRI,
+       TCM_SEC,
+       TCM_PRI,
+       YCM_SEC,
+       YCM_PRI,
+       XCM_SEC,
+       XCM_PRI,
+       NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+                                                       QM_PQ_ELEMENT_SIZE, \
+                                                       0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size)       (pq_size ? DIV_ROUND_UP(pq_size, \
+                                                               0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID                        0xffff
+/* feature enable */
+#define QM_BYPASS_EN                            1
+#define QM_BYTE_CRD_EN                          1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF                     4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND             62500000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
+#define QM_WFQ_VP_PQ_PF_SHIFT           5
+#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL                      43750000
+
+/* RL constants */
+#define QM_RL_UPPER_BOUND                       62500000
+#define QM_RL_PERIOD                            5               /* in us */
+#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
+#define QM_RL_MAX_INC_VAL                       43750000
+#define QM_RL_INC_VAL(rate)            max_t(u32,      \
+                                             (u32)(((rate ? rate : \
+                                                     1000000) *    \
+                                                    QM_RL_PERIOD * \
+                                                    101) / (8 * 100)), 1)
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF           1
+#define QM_OPPOR_FW_STOP_DEF            0
+#define QM_OPPOR_PQ_EMPTY_DEF           1
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES                          150
+#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (               \
+               PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+               (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -      \
+                PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (            \
+               PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+               (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
+                PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
+                                                  4) *              \
+                                                 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS            38
+#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR                      10
+#define BTB_PURE_LB_RATIO                       7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH                   32
+#define QM_STOP_CMD_ADDR                                0x2
+#define QM_STOP_CMD_STRUCT_SIZE                 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
+#define QM_STOP_CMD_PAUSE_MASK_MASK             -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET             1
+#define QM_STOP_CMD_GROUP_ID_SHIFT              16
+#define QM_STOP_CMD_GROUP_ID_MASK               15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET              1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT               24
+#define QM_STOP_CMD_PQ_TYPE_MASK                1
+#define QM_STOP_CMD_MAX_POLL_COUNT              100
+#define QM_STOP_CMD_POLL_PERIOD_US              500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd)                        cmd ## \
+       _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field,                                \
+                        value)        SET_FIELD(var[cmd ## _ ## field ## \
+                                                    _OFFSET],            \
+                                                cmd ## _ ## field,       \
+                                                value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *    \
+                                                  (max_phys_tcs_per_port) + \
+                                                  (tc))
+#define LB_VOQ(port)                           ( \
+               MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port)     \
+       ((tc) <         \
+        LB_TC ? PHYS_VOQ(port,         \
+                         tc,                    \
+                         max_phy_tcs_pr_port) \
+               : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+       if (pf_rl_en) {
+               /* enable RLs for all VOQs */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (1 << MAX_NUM_VOQS) - 1);
+               /* write RL period */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (pf_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+                    vport_rl_en ? 1 : 0);
+       if (vport_rl_en) {
+               /* write RL period (use timer 0 only) */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+                    vport_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (vport_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+                                      u8 voq, u16 cmdq_lines)
+{
+       u32 qm_line_crd;
+
+       /* In A0 - Limit the size of pbf queue so that only 511 commands with
+        * the minimum size of 4 (FCoE minimum size)
+        */
+       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+       if (is_bb_a0)
+               cmdq_lines = min_t(u32, cmdq_lines, 1022);
+       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+                        (u32)cmdq_lines);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+                    qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+       struct qed_hwfn *p_hwfn,
+       u8 max_ports_per_engine,
+       u8 max_phys_tcs_per_port,
+       struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id, num_tcs_in_port;
+
+       /* clear PBF lines for all VOQs */
+       for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       u16 phys_lines, phys_lines_per_tc;
+
+                       /* find #lines to divide between active phys TCs */
+                       phys_lines = port_params[port_id].num_pbf_cmd_lines -
+                                    PBF_CMDQ_PURE_LB_LINES;
+                       /* find #lines per active physical TC */
+                       num_tcs_in_port = 0;
+                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+                               if (((port_params[port_id].active_phys_tcs >>
+                                     tc) & 0x1) == 1)
+                                       num_tcs_in_port++;
+                       }
+
+                       phys_lines_per_tc = phys_lines / num_tcs_in_port;
+                       /* init registers per active TC */
+                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+                               if (((port_params[port_id].active_phys_tcs >>
+                                     tc) & 0x1) != 1)
+                                       continue;
+
+                               voq = PHYS_VOQ(port_id, tc,
+                                              max_phys_tcs_per_port);
+                               qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                                          phys_lines_per_tc);
+                       }
+
+                       /* init registers for pure LB TC */
+                       qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+                                                  PBF_CMDQ_PURE_LB_LINES);
+               }
+       }
+}
+
+static void qed_btb_blocks_rt_init(
+       struct qed_hwfn *p_hwfn,
+       u8 max_ports_per_engine,
+       u8 max_phys_tcs_per_port,
+       struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+       u32 usable_blocks, pure_lb_blocks, phys_blocks;
+       u8 tc, voq, port_id, num_tcs_in_port;
+
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               u32 temp;
+
+               if (!port_params[port_id].active)
+                       continue;
+
+               /* subtract headroom blocks */
+               usable_blocks = port_params[port_id].num_btb_blocks -
+                               BTB_HEADROOM_BLOCKS;
+
+               /* find blocks per physical TC */
+               num_tcs_in_port = 0;
+               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) == 1)
+                               num_tcs_in_port++;
+               }
+
+               pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+                                (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+                                 BTB_PURE_LB_RATIO);
+               pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+                                      pure_lb_blocks / BTB_PURE_LB_FACTOR);
+               phys_blocks = (usable_blocks - pure_lb_blocks) /
+                             num_tcs_in_port;
+
+               /* init physical TCs */
+               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) != 1)
+                               continue;
+
+                       voq = PHYS_VOQ(port_id, tc,
+                                      max_phys_tcs_per_port);
+                       STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                    phys_blocks);
+               }
+
+               /* init pure LB TC */
+               temp = LB_VOQ(port_id);
+               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+                            pure_lb_blocks);
+       }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+       struct qed_hwfn *p_hwfn,
+       struct qed_ptt *p_ptt,
+       struct qed_qm_pf_rt_init_params *p_params,
+       u32 base_mem_addr_4kb)
+{
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
+       u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+       u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+                           QM_PF_QUEUE_GROUP_SIZE;
+       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+       u16 i, pq_id, pq_group;
+
+       /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+       u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+
+       /* set mapping from PQ group to PF */
+       for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+               STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+                            (u32)(p_params->pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+                    QM_PQ_SIZE_256B(p_params->num_pf_cids));
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+                    QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+       /* go over all Tx PQs */
+       for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+                            p_params->max_phys_tcs_per_port);
+               bool is_vf_pq = (i >= p_params->num_pf_pqs);
+               struct qm_rf_pq_map tx_pq_map;
+
+               /* update first Tx PQ of VPORT/TC */
+               u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+                                   p_params->start_vport;
+               u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+               u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       /* create new VP PQ */
+                       pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+                       first_tx_pq_id = pq_id;
+                       /* map VP PQ to VOQ and PF */
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_WFQVPMAP_RT_OFFSET +
+                                    first_tx_pq_id,
+                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+                                    (p_params->pf_id <<
+                                     QM_WFQ_VP_PQ_PF_SHIFT));
+               }
+               /* fill PQ map entry */
+               memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+                         p_params->pq_params[i].rl_valid ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+                         p_params->pq_params[i].rl_valid ?
+                         p_params->pq_params[i].vport_id : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+                         p_params->pq_params[i].wrr_group);
+               /* write PQ map entry to CAM */
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+                            *((u32 *)&tx_pq_map));
+               /* set base address */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               /* check if VF PQ */
+               if (is_vf_pq) {
+                       /* if PQ is associated with a VF, add indication
+                        * to PQ VF mask
+                        */
+                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+                               (1 << (pq_id % tx_pq_vf_mask_width));
+                       mem_addr_4kb += vport_pq_mem_4kb;
+               } else {
+                       mem_addr_4kb += pq_mem_4kb;
+               }
+       }
+
+       /* store Tx PQ VF mask to size select register */
+       for (i = 0; i < num_tx_pq_vf_masks; i++) {
+               if (tx_pq_vf_mask[i]) {
+                       u32 addr;
+
+                       addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+                       STORE_RT_REG(p_hwfn, addr,
+                                    tx_pq_vf_mask[i]);
+               }
+       }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+                                    u8 port_id,
+                                    u8 pf_id,
+                                    u32 num_pf_cids,
+                                    u32 num_tids, u32 base_mem_addr_4kb)
+{
+       u16 i, pq_id;
+
+       /* a single other PQ group is used in each PF,
+        * where PQ group i is used in PF i.
+        */
+       u16 pq_group = pf_id;
+       u32 pq_size = num_pf_cids + num_tids;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+
+       /* map PQ group to PF */
+       STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+                    (u32)(pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+                    QM_PQ_SIZE_256B(pq_size));
+       /* set base address */
+       for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+            i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               mem_addr_4kb += pq_mem_4kb;
+       }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+                             struct qed_qm_pf_rt_init_params *p_params)
+{
+       u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+       u32 crd_reg_offset;
+       u32 inc_val;
+       u16 i;
+
+       if (p_params->pf_id < MAX_NUM_PFS_BB)
+               crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+       else
+               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+                                (p_params->pf_id % MAX_NUM_PFS_BB);
+
+       inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+       if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+
+       for (i = 0; i < num_tx_pqs; i++) {
+               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+                            p_params->max_phys_tcs_per_port);
+
+               OVERWRITE_RT_REG(p_hwfn,
+                                crd_reg_offset + voq * MAX_NUM_PFS_BB,
+                                QM_WFQ_CRD_REG_SIGN_BIT);
+       }
+
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+                    inc_val);
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+       return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+                             u8 num_vports,
+                             struct init_qm_vport_params *vport_params)
+{
+       u32 inc_val;
+       u8 tc, i;
+
+       /* go over all PF VPORTs */
+       for (i = 0; i < num_vports; i++) {
+
+               if (!vport_params[i].vport_wfq)
+                       continue;
+
+               inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+               if (inc_val > QM_WFQ_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT WFQ weight configuration");
+                       return -1;
+               }
+
+               /* each VPORT can have several VPORT PQ IDs for
+                * different TCs
+                */
+               for (tc = 0; tc < NUM_OF_TCS; tc++) {
+                       u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+
+                       if (vport_pq_id != QM_INVALID_PQ_ID) {
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_WFQVPCRD_RT_OFFSET +
+                                            vport_pq_id,
+                                            QM_WFQ_CRD_REG_SIGN_BIT);
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_WFQVPWEIGHT_RT_OFFSET +
+                                            vport_pq_id, inc_val);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+                               u8 start_vport,
+                               u8 num_vports,
+                               struct init_qm_vport_params *vport_params)
+{
+       u8 i, vport_id;
+
+       /* go over all PF VPORTs */
+       for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+               if (inc_val > QM_RL_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT rate-limit configuration");
+                       return -1;
+               }
+
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            inc_val);
+       }
+
+       return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt)
+{
+       u32 reg_val, i;
+
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+            i++) {
+               udelay(QM_STOP_CMD_POLL_PERIOD_US);
+               reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+       }
+
+       /* check if timeout while waiting for SDM command ready */
+       if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "Timeout when waiting for QM SDM command ready signal\n");
+               return false;
+       }
+
+       return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+       if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+               return false;
+
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+       return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+                      u32 num_pf_cids,
+                      u32 num_vf_cids,
+                      u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+       return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+              QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+              QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+       struct qed_hwfn *p_hwfn,
+       struct qed_qm_common_rt_init_params *p_params)
+{
+       /* init AFullOprtnstcCrdMask */
+       u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+                  (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+                  (p_params->pf_wfq_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+                  (p_params->vport_wfq_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+                  (p_params->pf_rl_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+                  (p_params->vport_rl_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+                  (QM_OPPOR_FW_STOP_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+                  (QM_OPPOR_PQ_EMPTY_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+       STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+       qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+       qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+       qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+       qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+       qed_cmdq_lines_rt_init(p_hwfn,
+                              p_params->max_ports_per_engine,
+                              p_params->max_phys_tcs_per_port,
+                              p_params->port_params);
+       qed_btb_blocks_rt_init(p_hwfn,
+                              p_params->max_ports_per_engine,
+                              p_params->max_phys_tcs_per_port,
+                              p_params->port_params);
+       return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_qm_pf_rt_init_params *p_params)
+{
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
+       u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+                                              p_params->num_tids) *
+                                QM_OTHER_PQS_PER_PF;
+       u8 tc, i;
+
+       /* clear first Tx PQ ID array for each VPORT */
+       for (i = 0; i < p_params->num_vports; i++)
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+       /* map Other PQs (if any) */
+       qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+                                p_params->num_pf_cids, p_params->num_tids, 0);
+
+       /* map Tx PQs */
+       qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+       if (p_params->pf_wfq)
+               if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+                       return -1;
+
+       if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+               return -1;
+
+       if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
+               return -1;
+
+       if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+                                p_params->num_vports, vport_params))
+               return -1;
+
+       return 0;
+}
+
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+       if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+       return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              QM_REG_RLPFCRD + pf_id * 4,
+              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+       return 0;
+}
+
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+       u8 tc;
+
+       if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+               return -1;
+       }
+
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               u16 vport_pq_id = first_tx_pq_id[tc];
+
+               if (vport_pq_id != QM_INVALID_PQ_ID)
+                       qed_wr(p_hwfn, p_ptt,
+                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+                              inc_val);
+       }
+
+       return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              QM_REG_RLGLBLCRD + vport_id * 4,
+              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+       return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         bool is_release_cmd,
+                         bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+       u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+       /* set command's PQ type */
+       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+       for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+               /* set PQ bit in mask (stop command only) */
+               if (!is_release_cmd)
+                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+               /* if last PQ or end of PQ mask, write command */
+               if ((pq_id == last_pq) ||
+                   (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+                    (QM_STOP_PQ_MASK_WIDTH - 1))) {
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                                        PAUSE_MASK, pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                                        GROUP_ID,
+                                        pq_id / QM_STOP_PQ_MASK_WIDTH);
+                       if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+                                            cmd_arr[0], cmd_arr[1]))
+                               return false;
+                       pq_mask = 0;
+               }
+       }
+
+       return true;
+}
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+       if (enable)
+               set_bit(bit, var);
+       else
+               clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT        -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt, u16 dest_port)
+{
+       qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, bool vxlan_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+              vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                       bool eth_gre_enable, bool ip_gre_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+       shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+              eth_gre_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+              ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt, u16 dest_port)
+{
+       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          bool eth_geneve_enable, bool ip_geneve_enable)
+{
+       unsigned long reg_val = 0;
+       u8 shift;
+
+       reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+       shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                      PRS_ETH_TUNN_FIC_FORMAT);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+              eth_geneve_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+       /* comp ver */
+       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+       /* EDPM with geneve tunnel not supported in BB_B0 */
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               return;
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+              eth_geneve_enable ? 1 : 0);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+              ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644 (file)
index 0000000..9866a20
--- /dev/null
@@ -0,0 +1,575 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+       0,
+       0,
+       0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+       0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+       0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+       0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+       0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+       0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+       0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+       0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+       0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+       0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+       cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+       int i;
+
+       for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+               p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 val)
+{
+       p_hwfn->rt_data.init_val[rt_offset] = val;
+       p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset, u32 *p_val,
+                          size_t size)
+{
+       size_t i;
+
+       for (i = 0; i < size / sizeof(u32); i++) {
+               p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+               p_hwfn->rt_data.b_valid[rt_offset + i]  = true;
+       }
+}
+
+static int qed_init_rt(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u32 addr,
+                      u16 rt_offset,
+                      u16 size,
+                      bool b_must_dmae)
+{
+       u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+       bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+       u16 i, segment;
+       int rc = 0;
+
+       /* Since not all RT entries are initialized, go over the RT and
+        * for each segment of initialized values use DMA.
+        */
+       for (i = 0; i < size; i++) {
+               if (!p_valid[i])
+                       continue;
+
+               /* In case there isn't any wide-bus configuration here,
+                * simply write the data instead of using dmae.
+                */
+               if (!b_must_dmae) {
+                       qed_wr(p_hwfn, p_ptt, addr + (i << 2),
+                              p_init_val[i]);
+                       continue;
+               }
+
+               /* Start of a new segment */
+               for (segment = 1; i + segment < size; segment++)
+                       if (!p_valid[i + segment])
+                               break;
+
+               rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+                                      (uintptr_t)(p_init_val + i),
+                                      addr + (i << 2), segment, 0);
+               if (rc != 0)
+                       return rc;
+
+               /* Jump over the entire segment, including invalid entry */
+               i += segment;
+       }
+
+       return rc;
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+
+       if (IS_VF(p_hwfn->cdev))
+               return 0;
+
+       rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
+                                  GFP_KERNEL);
+       if (!rt_data->b_valid)
+               return -ENOMEM;
+
+       rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
+                                   GFP_KERNEL);
+       if (!rt_data->init_val) {
+               kfree(rt_data->b_valid);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->rt_data.init_val);
+       kfree(p_hwfn->rt_data.b_valid);
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u32 addr,
+                              u32 dmae_data_offset,
+                              u32 size,
+                              const u32 *buf,
+                              bool b_must_dmae,
+                              bool b_can_dmae)
+{
+       int rc = 0;
+
+       /* Perform DMAE only for lengthy enough sections or for wide-bus */
+       if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+               const u32 *data = buf + dmae_data_offset;
+               u32 i;
+
+               for (i = 0; i < size; i++)
+                       qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+       } else {
+               rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+                                      (uintptr_t)(buf + dmae_data_offset),
+                                      addr, size, 0);
+       }
+
+       return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             u32 addr,
+                             u32 fill,
+                             u32 fill_count)
+{
+       static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+       memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+       /* invoke the DMAE virtual/physical buffer API with
+        * 1. DMAE init channel
+        * 2. addr,
+        * 3. p_hwfb->temp_data,
+        * 4. fill_count
+        */
+
+       return qed_dmae_host2grc(p_hwfn, p_ptt,
+                                (uintptr_t)(&zero_buffer[0]),
+                                addr, fill_count,
+                                QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         u32 addr,
+                         u32 fill,
+                         u32 fill_count)
+{
+       u32 i;
+
+       for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+               qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             struct init_write_op *cmd,
+                             bool b_must_dmae,
+                             bool b_can_dmae)
+{
+       u32 data = le32_to_cpu(cmd->data);
+       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+       u32 offset, output_len, input_len, max_size;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       union init_array_hdr *hdr;
+       const u32 *array_data;
+       int rc = 0;
+       u32 size;
+
+       array_data = cdev->fw_data->arr_data;
+
+       hdr = (union init_array_hdr *)(array_data +
+                                      dmae_array_offset);
+       data = le32_to_cpu(hdr->raw.data);
+       switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+       case INIT_ARR_ZIPPED:
+               offset = dmae_array_offset + 1;
+               input_len = GET_FIELD(data,
+                                     INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+               max_size = MAX_ZIPPED_SIZE * 4;
+               memset(p_hwfn->unzip_buf, 0, max_size);
+
+               output_len = qed_unzip_data(p_hwfn, input_len,
+                                           (u8 *)&array_data[offset],
+                                           max_size, (u8 *)p_hwfn->unzip_buf);
+               if (output_len) {
+                       rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+                                                output_len,
+                                                p_hwfn->unzip_buf,
+                                                b_must_dmae, b_can_dmae);
+               } else {
+                       DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+                       rc = -EINVAL;
+               }
+               break;
+       case INIT_ARR_PATTERN:
+       {
+               u32 repeats = GET_FIELD(data,
+                                       INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+               u32 i;
+
+               size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+               for (i = 0; i < repeats; i++, addr += size << 2) {
+                       rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+                                                dmae_array_offset + 1,
+                                                size, array_data,
+                                                b_must_dmae, b_can_dmae);
+                       if (rc)
+                               break;
+               }
+               break;
+       }
+       case INIT_ARR_STANDARD:
+               size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+               rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+                                        dmae_array_offset + 1,
+                                        size, array_data,
+                                        b_must_dmae, b_can_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          struct init_write_op *cmd,
+                          bool b_can_dmae)
+{
+       u32 data = le32_to_cpu(cmd->data);
+       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+       union init_write_args *arg = &cmd->args;
+       int rc = 0;
+
+       /* Sanitize */
+       if (b_must_dmae && !b_can_dmae) {
+               DP_NOTICE(p_hwfn,
+                         "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+                         addr);
+               return -EINVAL;
+       }
+
+       switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+       case INIT_SRC_INLINE:
+               qed_wr(p_hwfn, p_ptt, addr,
+                      le32_to_cpu(arg->inline_val));
+               break;
+       case INIT_SRC_ZEROS:
+               if (b_must_dmae ||
+                   (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+                       rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+                                               le32_to_cpu(arg->zeros_count));
+               else
+                       qed_init_fill(p_hwfn, p_ptt, addr, 0,
+                                     le32_to_cpu(arg->zeros_count));
+               break;
+       case INIT_SRC_ARRAY:
+               rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+                                       b_must_dmae, b_can_dmae);
+               break;
+       case INIT_SRC_RUNTIME:
+               qed_init_rt(p_hwfn, p_ptt, addr,
+                           le16_to_cpu(arg->runtime.offset),
+                           le16_to_cpu(arg->runtime.size),
+                           b_must_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+       return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+       return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+       return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct init_read_op *cmd)
+{
+       bool (*comp_check)(u32 val, u32 expected_val);
+       u32 delay = QED_INIT_POLL_PERIOD_US, val;
+       u32 data, addr, poll;
+       int i;
+
+       data = le32_to_cpu(cmd->op_data);
+       addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+       poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+
+       val = qed_rd(p_hwfn, p_ptt, addr);
+
+       if (poll == INIT_POLL_NONE)
+               return;
+
+       switch (poll) {
+       case INIT_POLL_EQ:
+               comp_check = comp_eq;
+               break;
+       case INIT_POLL_OR:
+               comp_check = comp_or;
+               break;
+       case INIT_POLL_AND:
+               comp_check = comp_and;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+                      cmd->op_data);
+               return;
+       }
+
+       data = le32_to_cpu(cmd->expected_val);
+       for (i = 0;
+            i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
+            i++) {
+               udelay(delay);
+               val = qed_rd(p_hwfn, p_ptt, addr);
+       }
+
+       if (i == QED_INIT_MAX_POLL_COUNT) {
+               DP_ERR(p_hwfn,
+                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+                      addr, le32_to_cpu(cmd->expected_val),
+                      val, le32_to_cpu(cmd->op_data));
+       }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct init_callback_op *p_cmd)
+{
+       DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+                                 u16 *offset,
+                                 int modes)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       const u8 *modes_tree_buf;
+       u8 arg1, arg2, tree_val;
+
+       modes_tree_buf = cdev->fw_data->modes_tree_buf;
+       tree_val = modes_tree_buf[(*offset)++];
+       switch (tree_val) {
+       case INIT_MODE_OP_NOT:
+               return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+       case INIT_MODE_OP_OR:
+               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               return arg1 | arg2;
+       case INIT_MODE_OP_AND:
+               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               return arg1 & arg2;
+       default:
+               tree_val -= MAX_INIT_MODE_OPS;
+               return (modes & (1 << tree_val)) ? 1 : 0;
+       }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+                            struct init_if_mode_op *p_cmd,
+                            int modes)
+{
+       u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+       if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+               return 0;
+       else
+               return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+                                INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+                             struct init_if_phase_op *p_cmd,
+                             u32 phase,
+                             u32 phase_id)
+{
+       u32 data = le32_to_cpu(p_cmd->phase_data);
+       u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+       if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+             (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+              GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+               return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+       else
+               return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+                struct qed_ptt *p_ptt,
+                int phase,
+                int phase_id,
+                int modes)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u32 cmd_num, num_init_ops;
+       union init_op *init_ops;
+       bool b_dmae = false;
+       int rc = 0;
+
+       num_init_ops = cdev->fw_data->init_ops_size;
+       init_ops = cdev->fw_data->init_ops;
+
+       p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+       if (!p_hwfn->unzip_buf) {
+               DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+               return -ENOMEM;
+       }
+
+       for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+               union init_op *cmd = &init_ops[cmd_num];
+               u32 data = le32_to_cpu(cmd->raw.op_data);
+
+               switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+               case INIT_OP_WRITE:
+                       rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+                                            b_dmae);
+                       break;
+               case INIT_OP_READ:
+                       qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+                       break;
+               case INIT_OP_IF_MODE:
+                       cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+                                                    modes);
+                       break;
+               case INIT_OP_IF_PHASE:
+                       cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+                                                     phase, phase_id);
+                       b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+                       break;
+               case INIT_OP_DELAY:
+                       /* qed_init_run is always invoked from
+                        * sleep-able context
+                        */
+                       udelay(le32_to_cpu(cmd->delay.delay));
+                       break;
+
+               case INIT_OP_CALLBACK:
+                       qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       break;
+               }
+
+               if (rc)
+                       break;
+       }
+
+       kfree(p_hwfn->unzip_buf);
+       return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+       u32 gtt_base;
+       u32 i;
+
+       /* Set the global windows */
+       gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+       for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+               if (pxp_global_win[i])
+                       REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+                              pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
+{
+       struct qed_fw_data *fw = cdev->fw_data;
+       struct bin_buffer_hdr *buf_hdr;
+       u32 offset, len;
+
+       if (!data) {
+               DP_NOTICE(cdev, "Invalid fw data\n");
+               return -EINVAL;
+       }
+
+       /* First Dword contains metadata and should be skipped */
+       buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+
+       offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+       fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
+
+       offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+       fw->init_ops = (union init_op *)(data + offset);
+
+       offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+       fw->arr_data = (u32 *)(data + offset);
+
+       offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+       fw->modes_tree_buf = (u8 *)(data + offset);
+       len = buf_hdr[BIN_BUF_INIT_CMD].length;
+       fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644 (file)
index 0000000..1e83204
--- /dev/null
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+                struct qed_ptt *p_ptt,
+                int phase,
+                int phase_id,
+                int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val)        \
+       qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+       qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 *val,
+                          size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+       qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ *      Initialize GTT global windows and set admin window
+ *      related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644 (file)
index 0000000..8fa50fa
--- /dev/null
@@ -0,0 +1,3266 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+struct qed_pi_info {
+       qed_int_comp_cb_t       comp_cb;
+       void                    *cookie;
+};
+
+struct qed_sb_sp_info {
+       struct qed_sb_info      sb_info;
+
+       /* per protocol index data */
+       struct qed_pi_info      pi_info_arr[PIS_PER_SB];
+};
+
+enum qed_attention_type {
+       QED_ATTN_TYPE_ATTN,
+       QED_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+       char bit_name[30];
+
+#define ATTENTION_PARITY                (1 << 0)
+
+#define ATTENTION_LENGTH_MASK           (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT          (4)
+#define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
+                                        ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
+                                        ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK           (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT          (12)
+       unsigned int flags;
+
+       /* Callback to call if attention will be triggered */
+       int (*cb)(struct qed_hwfn *p_hwfn);
+
+       enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+       struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS           (8)
+#define NUM_ATTN_REGS           (9)
+
+/* HW Attention register */
+struct attn_hw_reg {
+       u16 reg_idx;             /* Index of this register in its block */
+       u16 num_of_bits;         /* number of valid attention bits */
+       u32 sts_addr;            /* Address of the STS register */
+       u32 sts_clr_addr;        /* Address of the STS_CLR register */
+       u32 sts_wr_addr;         /* Address of the STS_WR register */
+       u32 mask_addr;           /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+       u16 num_of_int_regs;            /* Number of interrupt regs */
+       u16 num_of_prty_regs;           /* Number of parity regs */
+       struct attn_hw_reg **int_regs;  /* interrupt regs */
+       struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+       const char *name;                 /* Block name */
+       struct attn_hw_regs chip_regs[1];
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+       0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+       &grc_int0_bb_b0};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+       0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+       &grc_prty1_bb_b0};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+       0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+       1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+       &miscs_int0_bb_b0, &miscs_int1_bb_b0};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+       0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+       &miscs_prty0_bb_b0};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+       0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+       &misc_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+       0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+       &pglue_b_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+       0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+       1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+       &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+       0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+       &cnig_int0_bb_b0};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+       0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+       &cnig_prty0_bb_b0};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+       0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+       &cpmu_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+       0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+       &ncsi_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+       0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+       &ncsi_prty1_bb_b0};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+       0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+       1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+       &opte_prty1_bb_b0, &opte_prty0_bb_b0};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+       0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+       1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+       2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+       3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+       4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+       5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+       6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+       7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+       8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+       9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+       10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+       11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+       &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+       &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+       &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+       0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+       1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+       2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+       &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+       0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+       &pcie_prty1_bb_b0};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+       0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+       1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+       &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+       0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+       &pswhst_int0_bb_b0};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+       0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+       1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+       &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+       0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+       &pswhst2_int0_bb_b0};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+       0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+       &pswhst2_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+       0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+       &pswrd_int0_bb_b0};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+       0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+       &pswrd_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+       0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+       &pswrd2_int0_bb_b0};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+       0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+       1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+       2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+       &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+       0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+       &pswwr_int0_bb_b0};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+       0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+       &pswwr_prty0_bb_b0};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+       0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+       &pswwr2_int0_bb_b0};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+       0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+       1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+       2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+       3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+       4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+       &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+       &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+       0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+       &pswrq_int0_bb_b0};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+       0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+       &pswrq_prty0_bb_b0};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+       0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+       &pswrq2_int0_bb_b0};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+       0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+       &pswrq2_prty1_bb_b0};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+       0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+       &pglcs_int0_bb_b0};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+       0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+       &dmae_int0_bb_b0};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+       0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+       &dmae_prty1_bb_b0};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+       0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+       &ptu_int0_bb_b0};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+       0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+       &ptu_prty1_bb_b0};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+       0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+       1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+       2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+       &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+       0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+       1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+       &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+       0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+       1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+       2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+       &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+       0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+       1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+       &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+       0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+       1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+       2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+       &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+       0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+       1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+       &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+       0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+       1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+       2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+       &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+       0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+       1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+       &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+       0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+       1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+       2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+       &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+       0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+       1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+       &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+       0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+       1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+       2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+       &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+       0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+       &pcm_prty1_bb_b0};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+       0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+       &qm_int0_bb_b0};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+       0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+       1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+       2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+       3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+       &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+       0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+       1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+       &tm_int0_bb_b0, &tm_int1_bb_b0};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+       0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+       &tm_prty1_bb_b0};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+       0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+       &dorq_int0_bb_b0};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+       0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+       1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+       &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+       0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+       1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+       2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+       3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+       4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+       5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+       6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+       7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+       8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+       9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+       10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+       11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+       &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+       &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+       &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+       0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+       1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+       2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+       &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+       0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+       &src_int0_bb_b0};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+       0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+       &prs_int0_bb_b0};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+       0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+       1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+       2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+       &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+       0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+       &tsdm_int0_bb_b0};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+       0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+       &tsdm_prty1_bb_b0};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+       0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+       &msdm_int0_bb_b0};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+       0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+       &msdm_prty1_bb_b0};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+       0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+       &usdm_int0_bb_b0};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+       0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+       &usdm_prty1_bb_b0};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+       0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+       &xsdm_int0_bb_b0};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+       0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+       &xsdm_prty1_bb_b0};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+       0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+       &ysdm_int0_bb_b0};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+       0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+       &ysdm_prty1_bb_b0};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+       0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+       &psdm_int0_bb_b0};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+       0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+       &psdm_prty1_bb_b0};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+       0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+       1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+       &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+       0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+       1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+       2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+       &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+       &tsem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+       0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+       1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+       &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+       0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+       1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+       &msem_prty0_bb_b0, &msem_prty1_bb_b0};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+       0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+       1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+       &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+       0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+       1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+       &usem_prty0_bb_b0, &usem_prty1_bb_b0};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+       0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+       1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+       &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+       0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+       1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+       &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+       0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+       1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+       &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+       0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+       1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+       &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+       0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+       1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+       2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+       &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+       0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+       1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+       2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+       &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+       &psem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+       0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+       &rss_int0_bb_b0};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+       0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+       &rss_prty1_bb_b0};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+       0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+       &tmld_int0_bb_b0};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+       0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+       &tmld_prty1_bb_b0};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+       0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+       &muld_int0_bb_b0};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+       0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+       &muld_prty1_bb_b0};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+       0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+       &yuld_int0_bb_b0};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+       0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+       &yuld_prty1_bb_b0};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+       0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+       &xyld_int0_bb_b0};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+       0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+       &xyld_prty1_bb_b0};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+       0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+       &prm_int0_bb_b0};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+       0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+       1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+       &prm_prty0_bb_b0, &prm_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+       0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+       &pbf_pb1_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+       0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+       &pbf_pb1_prty0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+       0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+       &pbf_pb2_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+       0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+       &pbf_pb2_prty0_bb_b0};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+       0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+       &rpb_int0_bb_b0};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+       0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+       &rpb_prty0_bb_b0};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+       0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+       1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+       2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+       3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+       4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+       5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+       6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+       7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+       8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+       9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+       10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+       &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+       &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+       &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+       0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+       1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+       &btb_prty0_bb_b0, &btb_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+       0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+       &pbf_int0_bb_b0};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+       0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+       1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+       2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+       &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+       0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+       &rdif_int0_bb_b0};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+       0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+       &rdif_prty0_bb_b0};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+       0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+       &tdif_int0_bb_b0};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+       0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+       1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+       &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+       0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+       &cdu_int0_bb_b0};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+       0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+       &cdu_prty1_bb_b0};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+       0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+       &ccfc_int0_bb_b0};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+       0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+       1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+       &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+       0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+       &tcfc_int0_bb_b0};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+       0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+       1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+       &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+       0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+       &igu_int0_bb_b0};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+       0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+       1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+       2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+       &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+       0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+       &cau_int0_bb_b0};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+       0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+       &cau_prty1_bb_b0};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+       0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+       &dbg_int0_bb_b0};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+       0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+       &dbg_prty1_bb_b0};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+       0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+       1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+       2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+       3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+       4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+       5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+       &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+       &nig_int4_bb_b0, &nig_int5_bb_b0};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+       0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+       1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+       2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+       3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+       4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+       &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
+       &nig_prty3_bb_b0, &nig_prty4_bb_b0};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+       0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+       &ipc_int0_bb_b0};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+       0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+       &ipc_prty0_bb_b0};
+
+static struct attn_hw_block attn_blocks[] = {
+       {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
+       {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
+       {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
+       {"dbu", {{0, 0, NULL, NULL} } },
+       {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
+                     pglue_b_prty_bb_b0_regs} } },
+       {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
+       {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
+       {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
+       {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
+       {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
+       {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
+       {"mcp", {{0, 0, NULL, NULL} } },
+       {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
+       {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
+       {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
+                     pswhst2_prty_bb_b0_regs} } },
+       {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
+       {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
+       {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
+       {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
+       {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
+       {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
+       {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
+       {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
+       {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
+       {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
+       {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
+       {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
+       {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
+       {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
+       {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
+       {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
+       {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
+       {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
+       {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
+       {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
+       {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
+       {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
+       {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
+       {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
+       {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
+       {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
+       {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
+       {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
+       {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
+       {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
+       {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
+       {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
+       {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
+       {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
+       {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
+       {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
+       {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
+       {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
+       {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
+       {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
+                     pbf_pb1_prty_bb_b0_regs} } },
+       {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
+                     pbf_pb2_prty_bb_b0_regs} } },
+       {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
+       {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
+       {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
+       {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
+       {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
+       {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
+       {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
+       {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
+       {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
+       {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
+       {"umac", { {0, 0, NULL, NULL} } },
+       {"xmac", { {0, 0, NULL, NULL} } },
+       {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
+       {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
+       {"wol", { {0, 0, NULL, NULL} } },
+       {"bmbn", { {0, 0, NULL, NULL} } },
+       {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
+       {"nwm", { {0, 0, NULL, NULL} } },
+       {"nws", { {0, 0, NULL, NULL} } },
+       {"ms", { {0, 0, NULL, NULL} } },
+       {"phy_pcie", { {0, 0, NULL, NULL} } },
+       {"misc_aeu", { {0, 0, NULL, NULL} } },
+       {"bar0_map", { {0, 0, NULL, NULL} } },};
+
+/* Specific HW attention callbacks */
+static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+       /* This might occur on certain instances; Log it once then mask it */
+       DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+               tmp);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+              0xffffffff);
+
+       return 0;
+}
+
+#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS          (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_MASK             (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT            (0)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK         (0xf)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT                (1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK       (0x1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT      (5)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK          (0xff)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT         (6)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK          (0xf)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT         (14)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK                (0xff)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT       (18)
+static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                        PSWHST_REG_INCORRECT_ACCESS_VALID);
+
+       if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+               u32 addr, data, length;
+
+               addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                             PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+               data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                             PSWHST_REG_INCORRECT_ACCESS_DATA);
+               length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+               DP_INFO(p_hwfn->cdev,
+                       "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
+                       addr, length,
+                       (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
+                       (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
+                       (u8) GET_FIELD(data,
+                                      ATTENTION_INCORRECT_ACCESS_VF_VALID),
+                       (u8) GET_FIELD(data,
+                                      ATTENTION_INCORRECT_ACCESS_CLIENT),
+                       (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
+                       (u8) GET_FIELD(data,
+                                      ATTENTION_INCORRECT_ACCESS_BYTE_EN),
+                       data);
+       }
+
+       return 0;
+}
+
+#define QED_GRC_ATTENTION_VALID_BIT    (1 << 0)
+#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define QED_GRC_ATTENTION_ADDRESS_SHIFT        (0)
+#define QED_GRC_ATTENTION_RDWR_BIT     (1 << 23)
+#define QED_GRC_ATTENTION_MASTER_MASK  (0xf)
+#define QED_GRC_ATTENTION_MASTER_SHIFT (24)
+#define QED_GRC_ATTENTION_PF_MASK      (0xf)
+#define QED_GRC_ATTENTION_PF_SHIFT     (0)
+#define QED_GRC_ATTENTION_VF_MASK      (0xff)
+#define QED_GRC_ATTENTION_VF_SHIFT     (4)
+#define QED_GRC_ATTENTION_PRIV_MASK    (0x3)
+#define QED_GRC_ATTENTION_PRIV_SHIFT   (14)
+#define QED_GRC_ATTENTION_PRIV_VF      (0)
+static const char *attn_master_to_str(u8 master)
+{
+       switch (master) {
+       case 1: return "PXP";
+       case 2: return "MCP";
+       case 3: return "MSDM";
+       case 4: return "PSDM";
+       case 5: return "YSDM";
+       case 6: return "USDM";
+       case 7: return "TSDM";
+       case 8: return "XSDM";
+       case 9: return "DBU";
+       case 10: return "DMAE";
+       default:
+               return "Unkown";
+       }
+}
+
+static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       u32 tmp, tmp2;
+
+       /* We've already cleared the timeout interrupt register, so we learn
+        * of interrupts via the validity register
+        */
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+       if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
+               goto out;
+
+       /* Read the GRC timeout information */
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+       tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                     GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+       DP_INFO(p_hwfn->cdev,
+               "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+               tmp2, tmp,
+               (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+               GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
+               attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
+               GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
+               (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
+                QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+               GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
+
+out:
+       /* Regardles of anything else, clean the validity bit */
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+              GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+       return 0;
+}
+
+#define PGLUE_ATTENTION_VALID                  (1 << 29)
+#define PGLUE_ATTENTION_RD_VALID               (1 << 26)
+#define PGLUE_ATTENTION_DETAILS_PFID_MASK      (0xf)
+#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT     (20)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK  (0x1)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
+#define PGLUE_ATTENTION_DETAILS_VFID_MASK      (0xff)
+#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT     (24)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK  (0x1)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
+#define PGLUE_ATTENTION_DETAILS2_BME_MASK      (0x1)
+#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT     (22)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK   (0x1)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT  (23)
+#define PGLUE_ATTENTION_ICPL_VALID             (1 << 23)
+#define PGLUE_ATTENTION_ZLR_VALID              (1 << 25)
+#define PGLUE_ATTENTION_ILT_VALID              (1 << 23)
+static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       u32 tmp;
+
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+       if (tmp & PGLUE_ATTENTION_VALID) {
+               u32 addr_lo, addr_hi, details;
+
+               addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+               addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+               details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "Illegal write by chip to [%08x:%08x] blocked.\n"
+                       "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+                       "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+                       addr_hi, addr_lo, details,
+                       (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+                       (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+                       GET_FIELD(details,
+                                 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+                       tmp,
+                       GET_FIELD(tmp,
+                                 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+                       GET_FIELD(tmp,
+                                 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+                       GET_FIELD(tmp,
+                                 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+       }
+
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+       if (tmp & PGLUE_ATTENTION_RD_VALID) {
+               u32 addr_lo, addr_hi, details;
+
+               addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+               addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+               details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "Illegal read by chip from [%08x:%08x] blocked.\n"
+                       " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+                       " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+                       addr_hi, addr_lo, details,
+                       (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+                       (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+                       GET_FIELD(details,
+                                 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+                       tmp,
+                       GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+                                                                        : 0,
+                       GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+                       GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+                                                                       : 0);
+       }
+
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+       if (tmp & PGLUE_ATTENTION_ICPL_VALID)
+               DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
+
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+       if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
+               u32 addr_hi, addr_lo;
+
+               addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+               addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+               DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
+                       tmp, addr_hi, addr_lo);
+       }
+
+       tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+       if (tmp & PGLUE_ATTENTION_ILT_VALID) {
+               u32 addr_hi, addr_lo, details;
+
+               addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+               addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+               details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+                       details, tmp, addr_hi, addr_lo);
+       }
+
+       /* Clear the indications */
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+              PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+       return 0;
+}
+
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_SIZE_MASK   (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT  (16)
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       u32 reason;
+
+       reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+                       QED_DORQ_ATTENTION_REASON_MASK;
+       if (reason) {
+               u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                    DORQ_REG_DB_DROP_DETAILS);
+
+               DP_INFO(p_hwfn->cdev,
+                       "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+                       qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                              DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+                       (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
+                       GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+                       reason);
+       }
+
+       return -EINVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+       {
+               {       /* After Invert 1 */
+                       {"GPIO0 function%d",
+                        (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+               }
+       },
+
+       {
+               {       /* After Invert 2 */
+                       {"PGLUE config_space", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"PGLUE misc_flr", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"PGLUE B RBC", ATTENTION_PAR_INT,
+                        qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+                       {"PGLUE misc_mctp", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+                       {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+                       {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+                       {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
+                                         (1 << ATTENTION_OFFSET_SHIFT),
+                        NULL, MAX_BLOCK_ID},
+                       {"PCIE glue/PXP VPD %d",
+                        (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
+               }
+       },
+
+       {
+               {       /* After Invert 3 */
+                       {"General Attention %d",
+                        (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+               }
+       },
+
+       {
+               {       /* After Invert 4 */
+                       {"General Attention 32", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"General Attention %d",
+                        (2 << ATTENTION_LENGTH_SHIFT) |
+                        (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
+                       {"General Attention 35", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
+                        NULL, BLOCK_CNIG},
+                       {"MCP CPU", ATTENTION_SINGLE,
+                        qed_mcp_attn_cb, MAX_BLOCK_ID},
+                       {"MCP Watchdog timer", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+                       {"AVS stop status ready", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+                       {"MSTAT per-path", ATTENTION_PAR_INT,
+                        NULL, MAX_BLOCK_ID},
+                       {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
+                        NULL, MAX_BLOCK_ID},
+                       {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
+                       {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
+                       {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
+                       {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
+                       {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
+               }
+       },
+
+       {
+               {       /* After Invert 5 */
+                       {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
+                       {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
+                       {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
+                       {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
+                       {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
+                       {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
+                       {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
+                       {"MCM",  ATTENTION_PAR_INT, NULL, BLOCK_MCM},
+                       {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
+                       {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
+                       {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
+                       {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
+                       {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
+                       {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
+                       {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
+                       {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
+               }
+       },
+
+       {
+               {       /* After Invert 6 */
+                       {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
+                       {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
+                       {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
+                       {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
+                       {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
+                       {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
+                       {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
+                       {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
+                       {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
+                       {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
+                       {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
+                       {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
+                       {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
+                       {"DORQ", ATTENTION_PAR_INT,
+                        qed_dorq_attn_cb, BLOCK_DORQ},
+                       {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
+                       {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
+               }
+       },
+
+       {
+               {       /* After Invert 7 */
+                       {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
+                       {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
+                       {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
+                       {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
+                       {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+                       {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
+                       {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
+                       {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
+                       {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
+                       {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
+                       {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
+                       {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
+                       {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
+                       {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
+                       {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
+                       {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
+                       {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
+               }
+       },
+
+       {
+               {       /* After Invert 8 */
+                       {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
+                        NULL, BLOCK_PSWRQ2},
+                       {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
+                       {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
+                        NULL, BLOCK_PSWWR2},
+                       {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
+                       {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
+                        NULL, BLOCK_PSWRD2},
+                       {"PSWHST", ATTENTION_PAR_INT,
+                        qed_pswhst_attn_cb, BLOCK_PSWHST},
+                       {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
+                        NULL, BLOCK_PSWHST2},
+                       {"GRC", ATTENTION_PAR_INT,
+                        qed_grc_attn_cb, BLOCK_GRC},
+                       {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
+                       {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
+                       {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+                       {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
+                       {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
+                        NULL, BLOCK_PGLCS},
+                       {"PERST_B assertion", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"PERST_B deassertion", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
+                        NULL, MAX_BLOCK_ID},
+               }
+       },
+
+       {
+               {       /* After Invert 9 */
+                       {"MCP Latched memory", ATTENTION_PAR,
+                        NULL, MAX_BLOCK_ID},
+                       {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
+                        NULL, MAX_BLOCK_ID},
+                       {"MCP Latched ump_tx", ATTENTION_PAR,
+                        NULL, MAX_BLOCK_ID},
+                       {"MCP Latched scratchpad", ATTENTION_PAR,
+                        NULL, MAX_BLOCK_ID},
+                       {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
+                        NULL, MAX_BLOCK_ID},
+               }
+       },
+};
+
+#define ATTN_STATE_BITS         (0xfff)
+#define ATTN_BITS_MASKABLE      (0x3ff)
+struct qed_sb_attn_info {
+       /* Virtual & Physical address of the SB */
+       struct atten_status_block       *sb_attn;
+       dma_addr_t                      sb_phys;
+
+       /* Last seen running index */
+       u16                             index;
+
+       /* A mask of the AEU bits resulting in a parity error */
+       u32                             parity_mask[NUM_ATTN_REGS];
+
+       /* A pointer to the attention description structure */
+       struct aeu_invert_reg           *p_aeu_desc;
+
+       /* Previously asserted attentions, which are still unasserted */
+       u16                             known_attn;
+
+       /* Cleanup address for the link's general hw attention */
+       u32                             mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+                                     struct qed_sb_attn_info   *p_sb_desc)
+{
+       u16     rc = 0;
+       u16     index;
+
+       /* Make certain HW write took affect */
+       mmiowb();
+
+       index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+       if (p_sb_desc->index != index) {
+               p_sb_desc->index        = index;
+               rc                    = QED_SB_ATT_IDX;
+       }
+
+       /* Make certain we got a consistent view with HW */
+       mmiowb();
+
+       return rc;
+}
+
+/**
+ *  @brief qed_int_assertion - handles asserted attention bits
+ *
+ *  @param p_hwfn
+ *  @param asserted_bits newly asserted bits
+ *  @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+                            u16 asserted_bits)
+{
+       struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 igu_mask;
+
+       /* Mask the source of the attention in the IGU */
+       igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                         IGU_REG_ATTENTION_ENABLE);
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+                  igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+       igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "inner known ATTN state: 0x%04x --> 0x%04x\n",
+                  sb_attn_sw->known_attn,
+                  sb_attn_sw->known_attn | asserted_bits);
+       sb_attn_sw->known_attn |= asserted_bits;
+
+       /* Handle MCP events */
+       if (asserted_bits & 0x100) {
+               qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+               /* Clean the MCP attention */
+               qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+                      sb_attn_sw->mfw_attn_addr, 0);
+       }
+
+       DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+                     GTT_BAR0_MAP_REG_IGU_CMD +
+                     ((IGU_CMD_ATTN_BIT_SET_UPPER -
+                       IGU_CMD_INT_ACK_BASE) << 3),
+                     (u32)asserted_bits);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+                  asserted_bits);
+
+       return 0;
+}
+
+static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
+                                         struct attn_hw_reg *p_reg_desc,
+                                         struct attn_hw_block *p_block,
+                                         enum qed_attention_type type,
+                                         u32 val, u32 mask)
+{
+       int j;
+
+       for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+               if (!(val & (1 << j)))
+                       continue;
+
+               DP_NOTICE(p_hwfn,
+                         "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
+                         p_block->name,
+                         type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
+                                                      "Parity",
+                         p_reg_desc->reg_idx, p_reg_desc->sts_addr,
+                         j, (mask & (1 << j)) ? " [MASKED]" : "");
+       }
+}
+
+/**
+ * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ *  this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return int
+ */
+static int
+qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
+                           struct aeu_invert_reg_bit *p_aeu,
+                           u32 aeu_en_reg,
+                           u32 bitmask)
+{
+       int rc = -EINVAL;
+       u32 val;
+
+       DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+               p_aeu->bit_name, bitmask);
+
+       /* Call callback before clearing the interrupt status */
+       if (p_aeu->cb) {
+               DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+                       p_aeu->bit_name);
+               rc = p_aeu->cb(p_hwfn);
+       }
+
+       /* Handle HW block interrupt registers */
+       if (p_aeu->block_index != MAX_BLOCK_ID) {
+               struct attn_hw_block *p_block;
+               u32 mask;
+               int i;
+
+               p_block = &attn_blocks[p_aeu->block_index];
+
+               /* Handle each interrupt register */
+               for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
+                       struct attn_hw_reg *p_reg_desc;
+                       u32 sts_addr;
+
+                       p_reg_desc = p_block->chip_regs[0].int_regs[i];
+
+                       /* In case of fatal attention, don't clear the status
+                        * so it would appear in following idle check.
+                        */
+                       if (rc == 0)
+                               sts_addr = p_reg_desc->sts_clr_addr;
+                       else
+                               sts_addr = p_reg_desc->sts_addr;
+
+                       val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+                       mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                     p_reg_desc->mask_addr);
+                       qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+                                                     p_block,
+                                                     QED_ATTN_TYPE_ATTN,
+                                                     val, mask);
+               }
+       }
+
+       /* If the attention is benign, no need to prevent it */
+       if (!rc)
+               goto out;
+
+       /* Prevent this Attention from being asserted in the future */
+       val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
+       DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+               p_aeu->bit_name);
+
+out:
+       return rc;
+}
+
+static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
+                                struct aeu_invert_reg_bit *p_aeu,
+                                struct attn_hw_block *p_block,
+                                u8 bit_index)
+{
+       int i;
+
+       for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
+               struct attn_hw_reg *p_reg_desc;
+               u32 val, mask;
+
+               p_reg_desc = p_block->chip_regs[0].prty_regs[i];
+
+               val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                            p_reg_desc->sts_clr_addr);
+               mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                             p_reg_desc->mask_addr);
+               qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+                                             p_block,
+                                             QED_ATTN_TYPE_PARITY,
+                                             val, mask);
+       }
+}
+
+/**
+ * @brief qed_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param bit_index
+ */
+static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
+                                      struct aeu_invert_reg_bit *p_aeu,
+                                      u8 bit_index)
+{
+       u32 block_id = p_aeu->block_index;
+
+       DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
+               p_aeu->bit_name, bit_index);
+
+       if (block_id != MAX_BLOCK_ID) {
+               qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+                                    bit_index);
+
+               /* In BB, there's a single parity bit for several blocks */
+               if (block_id == BLOCK_BTB) {
+                       qed_int_parity_print(p_hwfn, p_aeu,
+                                            &attn_blocks[BLOCK_OPTE],
+                                            bit_index);
+                       qed_int_parity_print(p_hwfn, p_aeu,
+                                            &attn_blocks[BLOCK_MCP],
+                                            bit_index);
+               }
+       }
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
+                              u16 deasserted_bits)
+{
+       struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+       u8 i, j, k, bit_idx;
+       int rc = 0;
+
+       /* Read the attention registers in the AEU */
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                       MISC_REG_AEU_AFTER_INVERT_1_IGU +
+                                       i * 0x4);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "Deasserted bits [%d]: %08x\n",
+                          i, aeu_inv_arr[i]);
+       }
+
+       /* Find parity attentions first */
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+               u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+                               i * sizeof(u32));
+               u32 parities;
+
+               /* Skip register in which no parity bit is currently set */
+               parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+               if (!parities)
+                       continue;
+
+               for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+                       struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+                       if ((p_bit->flags & ATTENTION_PARITY) &&
+                           !!(parities & (1 << bit_idx)))
+                               qed_int_deassertion_parity(p_hwfn, p_bit,
+                                                          bit_idx);
+
+                       bit_idx += ATTENTION_LENGTH(p_bit->flags);
+               }
+       }
+
+       /* Find non-parity cause for attention and act */
+       for (k = 0; k < MAX_ATTN_GRPS; k++) {
+               struct aeu_invert_reg_bit *p_aeu;
+
+               /* Handle only groups whose attention is currently deasserted */
+               if (!(deasserted_bits & (1 << k)))
+                       continue;
+
+               for (i = 0; i < NUM_ATTN_REGS; i++) {
+                       u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+                                    i * sizeof(u32) +
+                                    k * sizeof(u32) * NUM_ATTN_REGS;
+                       u32 en, bits;
+
+                       en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+                       bits = aeu_inv_arr[i] & en;
+
+                       /* Skip if no bit from this group is currently set */
+                       if (!bits)
+                               continue;
+
+                       /* Find all set bits from current register which belong
+                        * to current group, making them responsible for the
+                        * previous assertion.
+                        */
+                       for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+                               u8 bit, bit_len;
+                               u32 bitmask;
+
+                               p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+                               /* No need to handle parity-only bits */
+                               if (p_aeu->flags == ATTENTION_PAR)
+                                       continue;
+
+                               bit = bit_idx;
+                               bit_len = ATTENTION_LENGTH(p_aeu->flags);
+                               if (p_aeu->flags & ATTENTION_PAR_INT) {
+                                       /* Skip Parity */
+                                       bit++;
+                                       bit_len--;
+                               }
+
+                               bitmask = bits & (((1 << bit_len) - 1) << bit);
+                               if (bitmask) {
+                                       /* Handle source of the attention */
+                                       qed_int_deassertion_aeu_bit(p_hwfn,
+                                                                   p_aeu,
+                                                                   aeu_en,
+                                                                   bitmask);
+                               }
+
+                               bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+                       }
+               }
+       }
+
+       /* Clear IGU indication for the deasserted bits */
+       DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+                                   GTT_BAR0_MAP_REG_IGU_CMD +
+                                   ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+                                     IGU_CMD_INT_ACK_BASE) << 3),
+                                   ~((u32)deasserted_bits));
+
+       /* Unmask deasserted attentions in IGU */
+       aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                         IGU_REG_ATTENTION_ENABLE);
+       aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+       /* Clear deassertion from inner state */
+       sb_attn_sw->known_attn &= ~deasserted_bits;
+
+       return rc;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+       struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+       u32 attn_bits = 0, attn_acks = 0;
+       u16 asserted_bits, deasserted_bits;
+       __le16 index;
+       int rc = 0;
+
+       /* Read current attention bits/acks - safeguard against attentions
+        * by guaranting work on a synchronized timeframe
+        */
+       do {
+               index = p_sb_attn->sb_index;
+               attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+               attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+       } while (index != p_sb_attn->sb_index);
+       p_sb_attn->sb_index = index;
+
+       /* Attention / Deassertion are meaningful (and in correct state)
+        * only when they differ and consistent with known state - deassertion
+        * when previous attention & current ack, and assertion when current
+        * attention with no previous attention
+        */
+       asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+               ~p_sb_attn_sw->known_attn;
+       deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+               p_sb_attn_sw->known_attn;
+
+       if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+               DP_INFO(p_hwfn,
+                       "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+                       index, attn_bits, attn_acks, asserted_bits,
+                       deasserted_bits, p_sb_attn_sw->known_attn);
+       } else if (asserted_bits == 0x100) {
+               DP_INFO(p_hwfn,
+                       "MFW indication via attention\n");
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "MFW indication [deassertion]\n");
+       }
+
+       if (asserted_bits) {
+               rc = qed_int_assertion(p_hwfn, asserted_bits);
+               if (rc)
+                       return rc;
+       }
+
+       if (deasserted_bits) {
+               rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+                           void __iomem *igu_addr,
+                           u32 ack_cons)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+               ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                (IGU_SEG_ACCESS_ATTN <<
+                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       mmiowb();
+       barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+       struct qed_pi_info *pi_info = NULL;
+       struct qed_sb_attn_info *sb_attn;
+       struct qed_sb_info *sb_info;
+       int arr_size;
+       u16 rc = 0;
+
+       if (!p_hwfn->p_sp_sb) {
+               DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+               return;
+       }
+
+       sb_info = &p_hwfn->p_sp_sb->sb_info;
+       arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+       if (!sb_info) {
+               DP_ERR(p_hwfn->cdev,
+                      "Status block is NULL - cannot ack interrupts\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sb_attn) {
+               DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+               return;
+       }
+       sb_attn = p_hwfn->p_sb_attn;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+                  p_hwfn, p_hwfn->my_id);
+
+       /* Disable ack for def status block. Required both for msix +
+        * inta in non-mask mode, in inta does no harm.
+        */
+       qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+       /* Gather Interrupts/Attentions information */
+       if (!sb_info->sb_virt) {
+               DP_ERR(
+                       p_hwfn->cdev,
+                       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+       } else {
+               u32 tmp_index = sb_info->sb_ack;
+
+               rc = qed_sb_update_sb_idx(sb_info);
+               DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+                          "Interrupt indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_info->sb_ack);
+       }
+
+       if (!sb_attn || !sb_attn->sb_attn) {
+               DP_ERR(
+                       p_hwfn->cdev,
+                       "Attentions Status block is NULL - cannot check for new attentions!\n");
+       } else {
+               u16 tmp_index = sb_attn->index;
+
+               rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+               DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+                          "Attention indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_attn->index);
+       }
+
+       /* Check if we expect interrupts at this time. if not just ack them */
+       if (!(rc & QED_SB_EVENT_MASK)) {
+               qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+       if (!p_hwfn->p_dpc_ptt) {
+               DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+               qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       if (rc & QED_SB_ATT_IDX)
+               qed_int_attentions(p_hwfn);
+
+       if (rc & QED_SB_IDX) {
+               int pi;
+
+               /* Look for a free index */
+               for (pi = 0; pi < arr_size; pi++) {
+                       pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+                       if (pi_info->comp_cb)
+                               pi_info->comp_cb(p_hwfn, pi_info->cookie);
+               }
+       }
+
+       if (sb_attn && (rc & QED_SB_ATT_IDX))
+               /* This should be done before the interrupts are enabled,
+                * since otherwise a new attention will be generated.
+                */
+               qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+       qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_attn)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 SB_ATTN_ALIGNED_SIZE(p_hwfn),
+                                 p_sb->sb_attn,
+                                 p_sb->sb_phys);
+       kfree(p_sb);
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt)
+{
+       struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+       memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+       sb_info->index = 0;
+       sb_info->known_attn = 0;
+
+       /* Configure Attention Status Block in IGU */
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+              lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+              upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                void *sb_virt_addr,
+                                dma_addr_t sb_phy_addr)
+{
+       struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+       int i, j, k;
+
+       sb_info->sb_attn = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       /* Set the pointer to the AEU descriptors */
+       sb_info->p_aeu_desc = aeu_descs;
+
+       /* Calculate Parity Masks */
+       memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               /* j is array index, k is bit index */
+               for (j = 0, k = 0; k < 32; j++) {
+                       unsigned int flags = aeu_descs[i].bits[j].flags;
+
+                       if (flags & ATTENTION_PARITY)
+                               sb_info->parity_mask[i] |= 1 << k;
+
+                       k += ATTENTION_LENGTH(flags);
+               }
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "Attn Mask [Reg %d]: 0x%08x\n",
+                          i, sb_info->parity_mask[i]);
+       }
+
+       /* Set the address of cleanup for the mcp attention */
+       sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+                                MISC_REG_AEU_GENERAL_ATTN_0;
+
+       qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       struct qed_sb_attn_info *p_sb;
+       void *p_virt;
+       dma_addr_t p_phys = 0;
+
+       /* SB struct */
+       p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
+       if (!p_sb) {
+               DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+               return -ENOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                   SB_ATTN_ALIGNED_SIZE(p_hwfn),
+                                   &p_phys, GFP_KERNEL);
+
+       if (!p_virt) {
+               DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+               kfree(p_sb);
+               return -ENOMEM;
+       }
+
+       /* Attention setup */
+       p_hwfn->p_sb_attn = p_sb;
+       qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+       return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+                          struct cau_sb_entry *p_sb_entry,
+                          u8 pf_id,
+                          u16 vf_number,
+                          u8 vf_valid)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u32 cau_state;
+       u8 timer_res;
+
+       memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+       /* setting the time resultion to a fixed value ( = 1) */
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+                 QED_CAU_DEF_RX_TIMER_RES);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+                 QED_CAU_DEF_TX_TIMER_RES);
+
+       cau_state = CAU_HC_DISABLE_STATE;
+
+       if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               cau_state = CAU_HC_ENABLE_STATE;
+               if (!cdev->rx_coalesce_usecs)
+                       cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
+               if (!cdev->tx_coalesce_usecs)
+                       cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
+       }
+
+       /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+       if (cdev->rx_coalesce_usecs <= 0x7F)
+               timer_res = 0;
+       else if (cdev->rx_coalesce_usecs <= 0xFF)
+               timer_res = 1;
+       else
+               timer_res = 2;
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+       if (cdev->tx_coalesce_usecs <= 0x7F)
+               timer_res = 0;
+       else if (cdev->tx_coalesce_usecs <= 0xFF)
+               timer_res = 1;
+       else
+               timer_res = 2;
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        dma_addr_t sb_phys,
+                        u16 igu_sb_id,
+                        u16 vf_number,
+                        u8 vf_valid)
+{
+       struct cau_sb_entry sb_entry;
+
+       qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+                             vf_number, vf_valid);
+
+       if (p_hwfn->hw_init_done) {
+               /* Wide-bus, initialize via DMAE */
+               u64 phys_addr = (u64)sb_phys;
+
+               qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
+                                 CAU_REG_SB_ADDR_MEMORY +
+                                 igu_sb_id * sizeof(u64), 2, 0);
+               qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
+                                 CAU_REG_SB_VAR_MEMORY +
+                                 igu_sb_id * sizeof(u64), 2, 0);
+       } else {
+               /* Initialize Status Block Address */
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2,
+                                sb_phys);
+
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2,
+                                sb_entry);
+       }
+
+       /* Configure pi coalescing if set */
+       if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               u8 timeset, timer_res;
+               u8 num_tc = 1, i;
+
+               /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+               if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
+                       timer_res = 0;
+               else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
+                       timer_res = 1;
+               else
+                       timer_res = 2;
+               timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
+               qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+                                   QED_COAL_RX_STATE_MACHINE,
+                                   timeset);
+
+               if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
+                       timer_res = 0;
+               else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
+                       timer_res = 1;
+               else
+                       timer_res = 2;
+               timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
+               for (i = 0; i < num_tc; i++) {
+                       qed_int_cau_conf_pi(p_hwfn, p_ptt,
+                                           igu_sb_id, TX_PI(i),
+                                           QED_COAL_TX_STATE_MACHINE,
+                                           timeset);
+               }
+       }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        u16 igu_sb_id,
+                        u32 pi_index,
+                        enum qed_coalescing_fsm coalescing_fsm,
+                        u8 timeset)
+{
+       struct cau_pi_entry pi_entry;
+       u32 sb_offset;
+       u32 pi_offset;
+
+       if (IS_VF(p_hwfn->cdev))
+               return;
+
+       sb_offset = igu_sb_id * PIS_PER_SB;
+       memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+       SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+       if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+       else
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+       pi_offset = sb_offset + pi_index;
+       if (p_hwfn->hw_init_done) {
+               qed_wr(p_hwfn, p_ptt,
+                      CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+                      *((u32 *)&(pi_entry)));
+       } else {
+               STORE_RT_REG(p_hwfn,
+                            CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+                            *((u32 *)&(pi_entry)));
+       }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_sb_info *sb_info)
+{
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       if (IS_PF(p_hwfn->cdev))
+               qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+                                   sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ *        igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+                            u16 sb_id)
+{
+       u16 igu_sb_id;
+
+       /* Assuming continuous set of IGU SBs dedicated for given PF */
+       if (sb_id == QED_SP_SB_ID)
+               igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       else if (IS_PF(p_hwfn->cdev))
+               igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       else
+               igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+                  (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+       return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_sb_info *sb_info,
+                   void *sb_virt_addr,
+                   dma_addr_t sb_phy_addr,
+                   u16 sb_id)
+{
+       sb_info->sb_virt = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+       if (sb_id != QED_SP_SB_ID) {
+               p_hwfn->sbs_info[sb_id] = sb_info;
+               p_hwfn->num_sbs++;
+       }
+
+       sb_info->cdev = p_hwfn->cdev;
+
+       /* The igu address will hold the absolute address that needs to be
+        * written to for a specific status block
+        */
+       if (IS_PF(p_hwfn->cdev)) {
+               sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+                                                 GTT_BAR0_MAP_REG_IGU_CMD +
+                                                 (sb_info->igu_sb_id << 3);
+       } else {
+               sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+                                                 PXP_VF_BAR0_START_IGU +
+                                                 ((IGU_CMD_INT_ACK_BASE +
+                                                   sb_info->igu_sb_id) << 3);
+       }
+
+       sb_info->flags |= QED_SB_INFO_INIT;
+
+       qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+       return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+                      struct qed_sb_info *sb_info,
+                      u16 sb_id)
+{
+       if (sb_id == QED_SP_SB_ID) {
+               DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+               return -EINVAL;
+       }
+
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       if (p_hwfn->sbs_info[sb_id] != NULL) {
+               p_hwfn->sbs_info[sb_id] = NULL;
+               p_hwfn->num_sbs--;
+       }
+
+       return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_info.sb_virt)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 SB_ALIGNED_SIZE(p_hwfn),
+                                 p_sb->sb_info.sb_virt,
+                                 p_sb->sb_info.sb_phys);
+       kfree(p_sb);
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt)
+{
+       struct qed_sb_sp_info *p_sb;
+       dma_addr_t p_phys = 0;
+       void *p_virt;
+
+       /* SB struct */
+       p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
+       if (!p_sb) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+               return -ENOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   SB_ALIGNED_SIZE(p_hwfn),
+                                   &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+               kfree(p_sb);
+               return -ENOMEM;
+       }
+
+       /* Status Block setup */
+       p_hwfn->p_sp_sb = p_sb;
+       qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+                       p_phys, QED_SP_SB_ID);
+
+       memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+       return 0;
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+                       qed_int_comp_cb_t comp_cb,
+                       void *cookie,
+                       u8 *sb_idx,
+                       __le16 **p_fw_cons)
+{
+       struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+       int rc = -ENOMEM;
+       u8 pi;
+
+       /* Look for a free index */
+       for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+               if (p_sp_sb->pi_info_arr[pi].comp_cb)
+                       continue;
+
+               p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+               p_sp_sb->pi_info_arr[pi].cookie = cookie;
+               *sb_idx = pi;
+               *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+               rc = 0;
+               break;
+       }
+
+       return rc;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+       struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+       if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
+               return -ENOMEM;
+
+       p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+       p_sp_sb->pi_info_arr[pi].cookie = NULL;
+
+       return 0;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+       return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_int_mode int_mode)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+       p_hwfn->cdev->int_mode = int_mode;
+       switch (p_hwfn->cdev->int_mode) {
+       case QED_INT_MODE_INTA:
+               igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case QED_INT_MODE_MSI:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case QED_INT_MODE_MSIX:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               break;
+       case QED_INT_MODE_POLL:
+               break;
+       }
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                      enum qed_int_mode int_mode)
+{
+       int rc = 0;
+
+       /* Configure AEU signal change to produce attentions */
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+       qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+       qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+       /* Flush the writes to IGU */
+       mmiowb();
+
+       /* Unmask AEU signals toward IGU */
+       qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+       if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_slowpath_irq_req(p_hwfn);
+               if (rc != 0) {
+                       DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
+                       return -EINVAL;
+               }
+               p_hwfn->b_int_requested = true;
+       }
+       /* Enable interrupt Generation */
+       qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+       p_hwfn->b_int_enabled = 1;
+
+       return rc;
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt)
+{
+       p_hwfn->b_int_enabled = 0;
+
+       if (IS_VF(p_hwfn->cdev))
+               return;
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH                (1000)
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  u32 sb_id, bool cleanup_set, u16 opaque_fid)
+{
+       u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+       u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+       u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+
+       /* Set the data field */
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+       SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+       /* Set the control register */
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+       barrier();
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+       /* Flush the write to IGU */
+       mmiowb();
+
+       /* calculate where to read the status bit from */
+       sb_bit = 1 << (sb_id % 32);
+       sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+       sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+       /* Now wait for the command to complete */
+       do {
+               val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+               if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+                       break;
+
+               usleep_range(5000, 10000);
+       } while (--sleep_cnt);
+
+       if (!sleep_cnt)
+               DP_NOTICE(p_hwfn,
+                         "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+                         val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 sb_id, u16 opaque, bool b_set)
+{
+       int pi, i;
+
+       /* Set */
+       if (b_set)
+               qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+       /* Clear */
+       qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+       /* Wait for the IGU SB to cleanup */
+       for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+               u32 val;
+
+               val = qed_rd(p_hwfn, p_ptt,
+                            IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+               if (val & (1 << (sb_id % 32)))
+                       usleep_range(10, 20);
+               else
+                       break;
+       }
+       if (i == IGU_CLEANUP_SLEEP_LENGTH)
+               DP_NOTICE(p_hwfn,
+                         "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+                         sb_id);
+
+       /* Clear the CAU for the SB */
+       for (pi = 0; pi < 12; pi++)
+               qed_wr(p_hwfn, p_ptt,
+                      CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             bool b_set, bool b_slowpath)
+{
+       u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+       u32 sb_id = 0, val = 0;
+
+       val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+       val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+       val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+       qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "IGU cleaning SBs [%d,...,%d]\n",
+                  igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+       for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+               qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                               p_hwfn->hw_info.opaque_fid,
+                                               b_set);
+
+       if (!b_slowpath)
+               return;
+
+       sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "IGU cleaning slowpath SB [%d]\n", sb_id);
+       qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                       p_hwfn->hw_info.opaque_fid, b_set);
+}
+
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn  *p_hwfn,
+                                     struct qed_ptt    *p_ptt,
+                                     u16               sb_id)
+{
+       u32 val = qed_rd(p_hwfn, p_ptt,
+                        IGU_REG_MAPPING_MEMORY +
+                        sizeof(u32) * sb_id);
+       struct qed_igu_block *p_block;
+
+       p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+       /* stop scanning when hit first invalid PF entry */
+       if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+           GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+               goto out;
+
+       /* Fill the block information */
+       p_block->status         = QED_IGU_STATUS_VALID;
+       p_block->function_id    = GET_FIELD(val,
+                                           IGU_MAPPING_LINE_FUNCTION_NUMBER);
+       p_block->is_pf          = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+       p_block->vector_number  = GET_FIELD(val,
+                                           IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+                  sb_id, val, p_block->function_id,
+                  p_block->is_pf, p_block->vector_number);
+
+out:
+       return val;
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt)
+{
+       struct qed_igu_info *p_igu_info;
+       u32 val, min_vf = 0, max_vf = 0;
+       u16 sb_id, last_iov_sb_id = 0;
+       struct qed_igu_block *blk;
+       u16 prev_sb_id = 0xFF;
+
+       p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
+
+       if (!p_hwfn->hw_info.p_igu_info)
+               return -ENOMEM;
+
+       p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Initialize base sb / sb cnt for PFs and VFs */
+       p_igu_info->igu_base_sb         = 0xffff;
+       p_igu_info->igu_sb_cnt          = 0;
+       p_igu_info->igu_dsb_id          = 0xffff;
+       p_igu_info->igu_base_sb_iov     = 0xffff;
+
+       if (p_hwfn->cdev->p_iov_info) {
+               struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+               min_vf  = p_iov->first_vf_in_pf;
+               max_vf  = p_iov->first_vf_in_pf + p_iov->total_vfs;
+       }
+
+       for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+            sb_id++) {
+               blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+               val     = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+
+               /* stop scanning when hit first invalid PF entry */
+               if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+                   GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+                       break;
+
+               if (blk->is_pf) {
+                       if (blk->function_id == p_hwfn->rel_pf_id) {
+                               blk->status |= QED_IGU_STATUS_PF;
+
+                               if (blk->vector_number == 0) {
+                                       if (p_igu_info->igu_dsb_id == 0xffff)
+                                               p_igu_info->igu_dsb_id = sb_id;
+                               } else {
+                                       if (p_igu_info->igu_base_sb ==
+                                           0xffff) {
+                                               p_igu_info->igu_base_sb = sb_id;
+                                       } else if (prev_sb_id != sb_id - 1) {
+                                               DP_NOTICE(p_hwfn->cdev,
+                                                         "consecutive igu vectors for HWFN %x broken",
+                                                         p_hwfn->rel_pf_id);
+                                               break;
+                                       }
+                                       prev_sb_id = sb_id;
+                                       /* we don't count the default */
+                                       (p_igu_info->igu_sb_cnt)++;
+                               }
+                       }
+               } else {
+                       if ((blk->function_id >= min_vf) &&
+                           (blk->function_id < max_vf)) {
+                               /* Available for VFs of this PF */
+                               if (p_igu_info->igu_base_sb_iov == 0xffff) {
+                                       p_igu_info->igu_base_sb_iov = sb_id;
+                               } else if (last_iov_sb_id != sb_id - 1) {
+                                       if (!val) {
+                                               DP_VERBOSE(p_hwfn->cdev,
+                                                          NETIF_MSG_INTR,
+                                                          "First uninitialized IGU CAM entry at index 0x%04x\n",
+                                                          sb_id);
+                                       } else {
+                                               DP_NOTICE(p_hwfn->cdev,
+                                                         "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+                                                         p_hwfn->rel_pf_id,
+                                                         last_iov_sb_id,
+                                                         sb_id); }
+                                       break;
+                               }
+                               blk->status |= QED_IGU_STATUS_FREE;
+                               p_hwfn->hw_info.p_igu_info->free_blks++;
+                               last_iov_sb_id = sb_id;
+                       }
+               }
+       }
+       p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+       DP_VERBOSE(
+               p_hwfn,
+               NETIF_MSG_INTR,
+               "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+               p_igu_info->igu_base_sb,
+               p_igu_info->igu_base_sb_iov,
+               p_igu_info->igu_sb_cnt,
+               p_igu_info->igu_sb_cnt_iov,
+               p_igu_info->igu_dsb_id);
+
+       if (p_igu_info->igu_base_sb == 0xffff ||
+           p_igu_info->igu_dsb_id == 0xffff ||
+           p_igu_info->igu_sb_cnt == 0) {
+               DP_NOTICE(p_hwfn,
+                         "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+                          p_igu_info->igu_base_sb,
+                          p_igu_info->igu_sb_cnt,
+                          p_igu_info->igu_dsb_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+       u32 igu_pf_conf = 0;
+
+       igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+       STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+       u64 intr_status = 0;
+       u32 intr_status_lo = 0;
+       u32 intr_status_hi = 0;
+       u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+                              IGU_CMD_INT_ACK_BASE;
+       u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+                              IGU_CMD_INT_ACK_BASE;
+
+       intr_status_lo = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               lsb_igu_cmd_addr * 8);
+       intr_status_hi = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               msb_igu_cmd_addr * 8);
+       intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+       return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+       tasklet_init(p_hwfn->sp_dpc,
+                    qed_int_sp_dpc, (unsigned long)p_hwfn);
+       p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
+       if (!p_hwfn->sp_dpc)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       int rc = 0;
+
+       rc = qed_int_sp_dpc_alloc(p_hwfn);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+               return rc;
+       }
+       rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+               return rc;
+       }
+       rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+               return rc;
+       }
+       return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+       qed_int_sp_sb_free(p_hwfn);
+       qed_int_sb_attn_free(p_hwfn);
+       qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt)
+{
+       qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+       qed_int_sb_attn_setup(p_hwfn, p_ptt);
+       qed_int_sp_dpc_setup(p_hwfn);
+}
+
+void qed_int_get_num_sbs(struct qed_hwfn       *p_hwfn,
+                        struct qed_sb_cnt_info *p_sb_cnt_info)
+{
+       struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+       if (!info || !p_sb_cnt_info)
+               return;
+
+       p_sb_cnt_info->sb_cnt           = info->igu_sb_cnt;
+       p_sb_cnt_info->sb_iov_cnt       = info->igu_sb_cnt_iov;
+       p_sb_cnt_info->sb_free_blk      = info->free_blks;
+}
+
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+       struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Determine origin of SB id */
+       if ((sb_id >= p_info->igu_base_sb) &&
+           (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+               return sb_id - p_info->igu_base_sb;
+       } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+                  (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+               return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+       } else {
+               DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+               return 0;
+       }
+}
+
+void qed_int_disable_post_isr_release(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i)
+               cdev->hwfns[i].b_int_requested = false;
+}
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                         u8 timer_res, u16 sb_id, bool tx)
+{
+       struct cau_sb_entry sb_entry;
+       int rc;
+
+       if (!p_hwfn->hw_init_done) {
+               DP_ERR(p_hwfn, "hardware not initialized yet\n");
+               return -EINVAL;
+       }
+
+       rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+                              sb_id * sizeof(u64),
+                              (u64)(uintptr_t)&sb_entry, 2, 0);
+       if (rc) {
+               DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+               return rc;
+       }
+
+       if (tx)
+               SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+       else
+               SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+       rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+                              (u64)(uintptr_t)&sb_entry,
+                              CAU_REG_SB_VAR_MEMORY +
+                              sb_id * sizeof(u64), 2, 0);
+       if (rc) {
+               DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+               return rc;
+       }
+
+       return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644 (file)
index 0000000..0948be6
--- /dev/null
@@ -0,0 +1,397 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)    /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)    /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)    /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)    /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)    /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)    /* simd all ones mode     */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN        (0x1 << 0)  /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)  /* MSI/MSIX enable        */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)  /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK    (0xF)       /* Parent PF              */
+#define IGU_VF_CONF_PARENT_SHIFT   5           /* Parent PF              */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+       IGU_CTRL_CMD_TYPE_RD,
+       IGU_CTRL_CMD_TYPE_WR,
+       MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+       u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK           0xFFFF  /* Opaque_FID   */
+#define IGU_CTRL_REG_FID_SHIFT          0
+#define IGU_CTRL_REG_PXP_ADDR_MASK      0xFFF   /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT     16
+#define IGU_CTRL_REG_RESERVED_MASK      0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT     28
+#define IGU_CTRL_REG_TYPE_MASK          0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT         31
+};
+
+enum qed_coalescing_fsm {
+       QED_COAL_RX_STATE_MACHINE,
+       QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ *        status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        u16 igu_sb_id,
+                        u32 pi_index,
+                        enum qed_coalescing_fsm coalescing_fsm,
+                        u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ *        register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      points to an uninitialized (but
+ *                     allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id        the sb_id to be used (zero based in driver)
+ *                     should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_sb_info *sb_info,
+                   void *sb_virt_addr,
+                   dma_addr_t sb_phy_addr,
+                   u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info      points to an allocated sb_info structure
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should never be equal to QED_SP_SB_ID
+ *                     (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+                      struct qed_sb_info *sb_info,
+                      u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ *        default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ *        blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return int - number of status blocks configured
+ */
+void qed_int_get_num_sbs(struct qed_hwfn       *p_hwfn,
+                        struct qed_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ *        release. The API need to be called after releasing all slowpath IRQs
+ *        of the device.
+ *
+ * @param cdev
+ *
+ */
+void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX  0x0001
+#define QED_SB_EVENT_MASK       0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn)        \
+       ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+       u8      status;
+#define QED_IGU_STATUS_FREE     0x01
+#define QED_IGU_STATUS_VALID    0x02
+#define QED_IGU_STATUS_PF       0x04
+
+       u8      vector_number;
+       u8      function_id;
+       u8      is_pf;
+};
+
+struct qed_igu_map {
+       struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+       struct qed_igu_map      igu_map;
+       u16                     igu_dsb_id;
+       u16                     igu_base_sb;
+       u16                     igu_base_sb_iov;
+       u16                     igu_sb_cnt;
+       u16                     igu_sb_cnt_iov;
+       u16                     free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             bool b_set,
+                             bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ *     This function needs to be called during hardware
+ *     prepare. It reads the info from igu cam to know which
+ *     status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+                                void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ *      slowhwfn statusblock.
+ *
+ *     Every protocol that uses the slowhwfn status block
+ *     should register a callback function that will be called
+ *     once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ *                  interrupt on the sp sb
+ *
+ * @param cookie  - passed to the callback function
+ * @param sb_idx  - OUT parameter which gives the chosen index
+ *                  for this protocol.
+ * @param p_fw_cons  - pointer to the actual address of the
+ *                     consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+                       qed_int_comp_cb_t comp_cb,
+                       void *cookie,
+                       u8 *sb_idx,
+                       __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ *      function from sp sb.
+ *      Partner of qed_int_register_cb -> should be called
+ *      when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+                         u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param opaque       - opaque fid of the sb owner.
+ * @param b_set                - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 sb_id,
+                                    u16 opaque,
+                                    bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ *        block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        dma_addr_t sb_phys,
+                        u16 igu_sb_id,
+                        u16 vf_number,
+                        u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+ * @return int
+ */
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                      enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+                          struct cau_sb_entry *p_sb_entry,
+                          u8 pf_id,
+                          u16 vf_number,
+                          u8 vf_valid);
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                         u8 timer_res, u16 sb_id, bool tx);
+
+#define QED_MAPPING_MEMORY_SIZE(dev)   (NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644 (file)
index 0000000..401e738
--- /dev/null
@@ -0,0 +1,2213 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_l2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_start_params *p_params)
+{
+       struct vport_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent =  NULL;
+       struct qed_sp_init_data init_data;
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+       u16 rx_mode = 0;
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_params->opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_VPORT_START,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod                = &p_ent->ramrod.vport_start;
+       p_ramrod->vport_id      = abs_vport_id;
+
+       p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
+       p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+       p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
+       p_ramrod->untagged              = p_params->only_untagged;
+
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+       p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+       /* TPA related fields */
+       memset(&p_ramrod->tpa_param, 0,
+              sizeof(struct eth_vport_tpa_param));
+
+       p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+       switch (p_params->tpa_mode) {
+       case QED_TPA_MODE_GRO:
+               p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+               p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+               p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+               p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+               p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+               p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+               p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+               p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+               break;
+       default:
+               break;
+       }
+
+       p_ramrod->tx_switching_en = p_params->tx_switching;
+
+       /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+       p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+                                                 p_params->concrete_fid);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+                      struct qed_sp_vport_start_params *p_params)
+{
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+                                            p_params->mtu,
+                                            p_params->remove_inner_vlan,
+                                            p_params->tpa_mode,
+                                            p_params->max_buffers_per_cqe,
+                                            p_params->only_untagged);
+       }
+
+       return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+                       struct vport_update_ramrod_data *p_ramrod,
+                       struct qed_rss_params *p_params)
+{
+       struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+       u16 abs_l2_queue = 0, capabilities = 0;
+       int rc = 0, i;
+
+       if (!p_params) {
+               p_ramrod->common.update_rss_flg = 0;
+               return rc;
+       }
+
+       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+                    ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+       rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+       if (rc)
+               return rc;
+
+       p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+       rss->update_rss_capabilities = p_params->update_rss_capabilities;
+       rss->update_rss_ind_table = p_params->update_rss_ind_table;
+       rss->update_rss_key = p_params->update_rss_key;
+
+       rss->rss_mode = p_params->rss_enable ?
+                       ETH_VPORT_RSS_MODE_REGULAR :
+                       ETH_VPORT_RSS_MODE_DISABLED;
+
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+       rss->tbl_size = p_params->rss_table_size_log;
+
+       rss->capabilities = cpu_to_le16(capabilities);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                  "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+                  p_ramrod->common.update_rss_flg,
+                  rss->rss_mode, rss->update_rss_capabilities,
+                  capabilities, rss->update_rss_ind_table,
+                  rss->update_rss_key);
+
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               rc = qed_fw_l2_queue(p_hwfn,
+                                    (u8)p_params->rss_ind_table[i],
+                                    &abs_l2_queue);
+               if (rc)
+                       return rc;
+
+               rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+                          i, rss->indirection_table[i]);
+       }
+
+       for (i = 0; i < 10; i++)
+               rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+       return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+                         struct vport_update_ramrod_data *p_ramrod,
+                         struct qed_filter_accept_flags accept_flags)
+{
+       p_ramrod->common.update_rx_mode_flg =
+               accept_flags.update_rx_mode_config;
+
+       p_ramrod->common.update_tx_mode_flg =
+               accept_flags.update_tx_mode_config;
+
+       /* Set Rx mode accept flags */
+       if (p_ramrod->common.update_rx_mode_flg) {
+               u8 accept_filter = accept_flags.rx_accept_filter;
+               u16 state = 0;
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+                         !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+                         !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+                         !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+                           !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & QED_ACCEPT_BCAST));
+
+               p_ramrod->rx_mode.state = cpu_to_le16(state);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "p_ramrod->rx_mode.state = 0x%x\n", state);
+       }
+
+       /* Set Tx mode accept flags */
+       if (p_ramrod->common.update_tx_mode_flg) {
+               u8 accept_filter = accept_flags.tx_accept_filter;
+               u16 state = 0;
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+                         !!(accept_filter & QED_ACCEPT_NONE));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+                         !!(accept_filter & QED_ACCEPT_NONE));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & QED_ACCEPT_BCAST));
+
+               p_ramrod->tx_mode.state = cpu_to_le16(state);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "p_ramrod->tx_mode.state = 0x%x\n", state);
+       }
+}
+
+static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+                           struct vport_update_ramrod_data *p_ramrod,
+                           struct qed_sge_tpa_params *p_params)
+{
+       struct eth_vport_tpa_param *p_tpa;
+
+       if (!p_params) {
+               p_ramrod->common.update_tpa_param_flg = 0;
+               p_ramrod->common.update_tpa_en_flg = 0;
+               p_ramrod->common.update_tpa_param_flg = 0;
+               return;
+       }
+
+       p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+       p_tpa = &p_ramrod->tpa_param;
+       p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+       p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+       p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+       p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+       p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+       p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+       p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+       p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+       p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+       p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+       p_tpa->tpa_max_size = p_params->tpa_max_size;
+       p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+       p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+                       struct vport_update_ramrod_data *p_ramrod,
+                       struct qed_sp_vport_update_params *p_params)
+{
+       int i;
+
+       memset(&p_ramrod->approx_mcast.bins, 0,
+              sizeof(p_ramrod->approx_mcast.bins));
+
+       if (p_params->update_approx_mcast_flg) {
+               p_ramrod->common.update_approx_mcast_flg = 1;
+               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       u32 *p_bins = (u32 *)p_params->bins;
+                       __le32 val = cpu_to_le32(p_bins[i]);
+
+                       p_ramrod->approx_mcast.bins[i] = val;
+               }
+       }
+}
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+                       struct qed_sp_vport_update_params *p_params,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_rss_params *p_rss_params = p_params->rss_params;
+       struct vport_update_ramrod_data_cmn *p_cmn;
+       struct qed_sp_init_data init_data;
+       struct vport_update_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       u8 abs_vport_id = 0, val;
+       int rc = -EINVAL;
+
+       if (IS_VF(p_hwfn->cdev)) {
+               rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+               return rc;
+       }
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_params->opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_VPORT_UPDATE,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       /* Copy input params to ramrod according to FW struct */
+       p_ramrod = &p_ent->ramrod.vport_update;
+       p_cmn = &p_ramrod->common;
+
+       p_cmn->vport_id = abs_vport_id;
+       p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+       p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+       p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+       p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+       p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+       p_cmn->update_accept_any_vlan_flg =
+                       p_params->update_accept_any_vlan_flg;
+
+       p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+       val = p_params->update_inner_vlan_removal_flg;
+       p_cmn->update_inner_vlan_removal_en_flg = val;
+
+       p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+       val = p_params->update_default_vlan_enable_flg;
+       p_cmn->update_default_vlan_en_flg = val;
+
+       p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+       p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+       p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+       p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+       p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+       p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+       val = p_params->update_anti_spoofing_en_flg;
+       p_ramrod->common.update_anti_spoofing_en_flg = val;
+
+       rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+       if (rc) {
+               /* Return spq entry which is taken in qed_sp_init_request()*/
+               qed_spq_return_entry(p_hwfn, p_ent);
+               return rc;
+       }
+
+       /* Update mcast bins for VFs, PF doesn't use this functionality */
+       qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+       qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+       qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
+{
+       struct vport_stop_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       u8 abs_vport_id = 0;
+       int rc;
+
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_vport_stop(p_hwfn);
+
+       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_VPORT_STOP,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vport_stop;
+       p_ramrod->vport_id = abs_vport_id;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+                      struct qed_filter_accept_flags *p_accept_flags)
+{
+       struct qed_sp_vport_update_params s_params;
+
+       memset(&s_params, 0, sizeof(s_params));
+       memcpy(&s_params.accept_flags, p_accept_flags,
+              sizeof(struct qed_filter_accept_flags));
+
+       return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+                                u8 vport,
+                                struct qed_filter_accept_flags accept_flags,
+                                u8 update_accept_any_vlan,
+                                u8 accept_any_vlan,
+                                enum spq_mode comp_mode,
+                                struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_sp_vport_update_params vport_update_params;
+       int i, rc;
+
+       /* Prepare and send the vport rx_mode change */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = vport;
+       vport_update_params.accept_flags = accept_flags;
+       vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+       vport_update_params.accept_any_vlan = accept_any_vlan;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               if (IS_VF(cdev)) {
+                       rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+                       if (rc)
+                               return rc;
+                       continue;
+               }
+
+               rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+                                        comp_mode, p_comp_data);
+               if (rc != 0) {
+                       DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+                       return rc;
+               }
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+                          accept_flags.rx_accept_filter,
+                          accept_flags.tx_accept_filter);
+               if (update_accept_any_vlan)
+                       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                                  "accept_any_vlan=%d configured\n",
+                                  accept_any_vlan);
+       }
+
+       return 0;
+}
+
+static int qed_sp_release_queue_cid(
+       struct qed_hwfn *p_hwfn,
+       struct qed_hw_cid_data *p_cid_data)
+{
+       if (!p_cid_data->b_cid_allocated)
+               return 0;
+
+       qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+       p_cid_data->b_cid_allocated = false;
+
+       return 0;
+}
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                               u16 opaque_fid,
+                               u32 cid,
+                               struct qed_queue_start_common_params *params,
+                               u8 stats_id,
+                               u16 bd_max_bytes,
+                               dma_addr_t bd_chain_phys_addr,
+                               dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+{
+       struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       struct qed_hw_cid_data *p_rx_cid;
+       u16 abs_rx_q_id = 0;
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+
+       /* Store information for the stop */
+       p_rx_cid                = &p_hwfn->p_rx_cids[params->queue_id];
+       p_rx_cid->cid           = cid;
+       p_rx_cid->opaque_fid    = opaque_fid;
+       p_rx_cid->vport_id      = params->vport_id;
+
+       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+       if (rc != 0)
+               return rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+                  opaque_fid, cid, params->queue_id, params->vport_id,
+                  params->sb);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = cid;
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_RX_QUEUE_START,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+       p_ramrod->sb_id                 = cpu_to_le16(params->sb);
+       p_ramrod->sb_index              = params->sb_idx;
+       p_ramrod->vport_id              = abs_vport_id;
+       p_ramrod->stats_counter_id      = stats_id;
+       p_ramrod->rx_queue_id           = cpu_to_le16(abs_rx_q_id);
+       p_ramrod->complete_cqe_flg      = 0;
+       p_ramrod->complete_event_flg    = 1;
+
+       p_ramrod->bd_max_bytes  = cpu_to_le16(bd_max_bytes);
+       DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+       p_ramrod->num_of_pbl_pages      = cpu_to_le16(cqe_pbl_size);
+       DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+       p_ramrod->vf_rx_prod_index = params->vf_qid;
+       if (params->vf_qid)
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct qed_queue_start_common_params *params,
+                         u16 bd_max_bytes,
+                         dma_addr_t bd_chain_phys_addr,
+                         dma_addr_t cqe_pbl_addr,
+                         u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+       struct qed_hw_cid_data *p_rx_cid;
+       u32 init_prod_val = 0;
+       u16 abs_l2_queue = 0;
+       u8 abs_stats_id = 0;
+       int rc;
+
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_rxq_start(p_hwfn,
+                                          params->queue_id,
+                                          params->sb,
+                                          params->sb_idx,
+                                          bd_max_bytes,
+                                          bd_chain_phys_addr,
+                                          cqe_pbl_addr, cqe_pbl_size, pp_prod);
+       }
+
+       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+       if (rc != 0)
+               return rc;
+
+       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+       if (rc != 0)
+               return rc;
+
+       *pp_prod = (u8 __iomem *)p_hwfn->regview +
+                                GTT_BAR0_MAP_REG_MSDM_RAM +
+                                MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+
+       /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+       __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+                         (u32 *)(&init_prod_val));
+
+       /* Allocate a CID for the queue */
+       p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+                                &p_rx_cid->cid);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_rx_cid->b_cid_allocated = true;
+
+       rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+                                        opaque_fid,
+                                        p_rx_cid->cid,
+                                        params,
+                                        abs_stats_id,
+                                        bd_max_bytes,
+                                        bd_chain_phys_addr,
+                                        cqe_pbl_addr,
+                                        cqe_pbl_size);
+
+       if (rc != 0)
+               qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+       return rc;
+}
+
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+                               u16 rx_queue_id,
+                               u8 num_rxqs,
+                               u8 complete_cqe_flg,
+                               u8 complete_event_flg,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
+{
+       struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       struct qed_hw_cid_data *p_rx_cid;
+       u16 qid, abs_rx_q_id = 0;
+       int rc = -EINVAL;
+       u8 i;
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       for (i = 0; i < num_rxqs; i++) {
+               qid = rx_queue_id + i;
+               p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+               /* Get SPQ entry */
+               init_data.cid = p_rx_cid->cid;
+               init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+               rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                        ETH_RAMROD_RX_QUEUE_UPDATE,
+                                        PROTOCOLID_ETH, &init_data);
+               if (rc)
+                       return rc;
+
+               p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+               qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+               qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+               p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+               p_ramrod->complete_cqe_flg = complete_cqe_flg;
+               p_ramrod->complete_event_flg = complete_event_flg;
+
+               rc = qed_spq_post(p_hwfn, p_ent, NULL);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                            u16 rx_queue_id,
+                            bool eq_completion_only, bool cqe_completion)
+{
+       struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u16 abs_rx_q_id = 0;
+       int rc = -EINVAL;
+
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_rx_cid->cid;
+       init_data.opaque_fid = p_rx_cid->opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_RX_QUEUE_STOP,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+       qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+       qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+       p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+       /* Cleaning the queue requires the completion to arrive there.
+        * In addition, VFs require the answer to come as eqe to PF.
+        */
+       p_ramrod->complete_cqe_flg =
+               (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+                !eq_completion_only) || cqe_completion;
+       p_ramrod->complete_event_flg =
+               !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+               eq_completion_only;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+                               u16  opaque_fid,
+                               u32  cid,
+                               struct qed_queue_start_common_params *p_params,
+                               u8  stats_id,
+                               dma_addr_t pbl_addr,
+                               u16 pbl_size,
+                               union qed_qm_pq_params *p_pq_params)
+{
+       struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       struct qed_hw_cid_data *p_tx_cid;
+       u16 pq_id, abs_tx_q_id = 0;
+       int rc = -EINVAL;
+       u8 abs_vport_id;
+
+       /* Store information for the stop */
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+       p_tx_cid->cid           = cid;
+       p_tx_cid->opaque_fid    = opaque_fid;
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc)
+               return rc;
+
+       rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
+       if (rc)
+               return rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = cid;
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_TX_QUEUE_START,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod                = &p_ent->ramrod.tx_queue_start;
+       p_ramrod->vport_id      = abs_vport_id;
+
+       p_ramrod->sb_id                 = cpu_to_le16(p_params->sb);
+       p_ramrod->sb_index              = p_params->sb_idx;
+       p_ramrod->stats_counter_id      = stats_id;
+
+       p_ramrod->queue_zone_id         = cpu_to_le16(abs_tx_q_id);
+       p_ramrod->pbl_size              = cpu_to_le16(pbl_size);
+       DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
+
+       pq_id                   = qed_get_qm_pq(p_hwfn,
+                                               PROTOCOLID_ETH,
+                                               p_pq_params);
+       p_ramrod->qm_pq_id      = cpu_to_le16(pq_id);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct qed_queue_start_common_params *p_params,
+                         dma_addr_t pbl_addr,
+                         u16 pbl_size, void __iomem **pp_doorbell)
+{
+       struct qed_hw_cid_data *p_tx_cid;
+       union qed_qm_pq_params pq_params;
+       u8 abs_stats_id = 0;
+       int rc;
+
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_txq_start(p_hwfn,
+                                          p_params->queue_id,
+                                          p_params->sb,
+                                          p_params->sb_idx,
+                                          pbl_addr, pbl_size, pp_doorbell);
+       }
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+       if (rc)
+               return rc;
+
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+       memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+       memset(&pq_params, 0, sizeof(pq_params));
+
+       /* Allocate a CID for the queue */
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+                                &p_tx_cid->cid);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_tx_cid->b_cid_allocated = true;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+                  opaque_fid, p_tx_cid->cid,
+                  p_params->queue_id, p_params->vport_id, p_params->sb);
+
+       rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+                                        opaque_fid,
+                                        p_tx_cid->cid,
+                                        p_params,
+                                        abs_stats_id,
+                                        pbl_addr,
+                                        pbl_size,
+                                        &pq_params);
+
+       *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+                                    qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+       if (rc)
+               qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+       return rc;
+}
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
+{
+       struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_tx_cid->cid;
+       init_data.opaque_fid = p_tx_cid->opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_TX_QUEUE_STOP,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+       enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+       switch (opcode) {
+       case QED_FILTER_ADD:
+               action = ETH_FILTER_ACTION_ADD;
+               break;
+       case QED_FILTER_REMOVE:
+               action = ETH_FILTER_ACTION_REMOVE;
+               break;
+       case QED_FILTER_FLUSH:
+               action = ETH_FILTER_ACTION_REMOVE_ALL;
+               break;
+       default:
+               action = MAX_ETH_FILTER_ACTION;
+       }
+
+       return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+                               __le16 *fw_mid,
+                               __le16 *fw_lsb,
+                               u8 *mac)
+{
+       ((u8 *)fw_msb)[0] = mac[1];
+       ((u8 *)fw_msb)[1] = mac[0];
+       ((u8 *)fw_mid)[0] = mac[3];
+       ((u8 *)fw_mid)[1] = mac[2];
+       ((u8 *)fw_lsb)[0] = mac[5];
+       ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+                       u16 opaque_fid,
+                       struct qed_filter_ucast *p_filter_cmd,
+                       struct vport_filter_update_ramrod_data **pp_ramrod,
+                       struct qed_spq_entry **pp_ent,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
+{
+       u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+       struct vport_filter_update_ramrod_data *p_ramrod;
+       struct eth_filter_cmd *p_first_filter;
+       struct eth_filter_cmd *p_second_filter;
+       struct qed_sp_init_data init_data;
+       enum eth_filter_action action;
+       int rc;
+
+       rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+                         &vport_to_remove_from);
+       if (rc)
+               return rc;
+
+       rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+                         &vport_to_add_to);
+       if (rc)
+               return rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, pp_ent,
+                                ETH_RAMROD_FILTERS_UPDATE,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+       p_ramrod = *pp_ramrod;
+       p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+       p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+       switch (p_filter_cmd->opcode) {
+       case QED_FILTER_REPLACE:
+       case QED_FILTER_MOVE:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+       default:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+       }
+
+       p_first_filter  = &p_ramrod->filter_cmds[0];
+       p_second_filter = &p_ramrod->filter_cmds[1];
+
+       switch (p_filter_cmd->type) {
+       case QED_FILTER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+       case QED_FILTER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+       case QED_FILTER_MAC_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+       case QED_FILTER_INNER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+       case QED_FILTER_INNER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+       case QED_FILTER_INNER_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+       case QED_FILTER_INNER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+               break;
+       case QED_FILTER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+       case QED_FILTER_VNI:
+               p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+       }
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+               qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+                                   &p_first_filter->mac_mid,
+                                   &p_first_filter->mac_lsb,
+                                   (u8 *)p_filter_cmd->mac);
+       }
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+               p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+               p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+       if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+               p_second_filter->type           = p_first_filter->type;
+               p_second_filter->mac_msb        = p_first_filter->mac_msb;
+               p_second_filter->mac_mid        = p_first_filter->mac_mid;
+               p_second_filter->mac_lsb        = p_first_filter->mac_lsb;
+               p_second_filter->vlan_id        = p_first_filter->vlan_id;
+               p_second_filter->vni            = p_first_filter->vni;
+
+               p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+               p_first_filter->vport_id = vport_to_remove_from;
+
+               p_second_filter->action         = ETH_FILTER_ACTION_ADD;
+               p_second_filter->vport_id       = vport_to_add_to;
+       } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
+               p_first_filter->vport_id = vport_to_add_to;
+               memcpy(p_second_filter, p_first_filter,
+                      sizeof(*p_second_filter));
+               p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
+               p_second_filter->action = ETH_FILTER_ACTION_ADD;
+       } else {
+               action = qed_filter_action(p_filter_cmd->opcode);
+
+               if (action == MAX_ETH_FILTER_ACTION) {
+                       DP_NOTICE(p_hwfn,
+                                 "%d is not supported yet\n",
+                                 p_filter_cmd->opcode);
+                       return -EINVAL;
+               }
+
+               p_first_filter->action = action;
+               p_first_filter->vport_id = (p_filter_cmd->opcode ==
+                                           QED_FILTER_REMOVE) ?
+                                          vport_to_remove_from :
+                                          vport_to_add_to;
+       }
+
+       return 0;
+}
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           struct qed_filter_ucast *p_filter_cmd,
+                           enum spq_mode comp_mode,
+                           struct qed_spq_comp_cb *p_comp_data)
+{
+       struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
+       struct qed_spq_entry                    *p_ent          = NULL;
+       struct eth_filter_cmd_header            *p_header;
+       int                                     rc;
+
+       rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+                                    &p_ramrod, &p_ent,
+                                    comp_mode, p_comp_data);
+       if (rc != 0) {
+               DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+               return rc;
+       }
+       p_header = &p_ramrod->filter_cmd_hdr;
+       p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc != 0) {
+               DP_ERR(p_hwfn,
+                      "Unicast filter ADD command failed %d\n",
+                      rc);
+               return rc;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+                  (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+                  ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+                  "REMOVE" :
+                  ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+                   "MOVE" : "REPLACE")),
+                  (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+                  ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+                   "VLAN" : "MAC & VLAN"),
+                  p_ramrod->filter_cmd_hdr.cmd_cnt,
+                  p_filter_cmd->is_rx_filter,
+                  p_filter_cmd->is_tx_filter);
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+                  p_filter_cmd->vport_to_add_to,
+                  p_filter_cmd->vport_to_remove_from,
+                  p_filter_cmd->mac[0],
+                  p_filter_cmd->mac[1],
+                  p_filter_cmd->mac[2],
+                  p_filter_cmd->mac[3],
+                  p_filter_cmd->mac[4],
+                  p_filter_cmd->mac[5],
+                  p_filter_cmd->vlan);
+
+       return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ *         Calculates crc 32 on a buffer
+ *         Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+                          u32 crc32_length,
+                          u32 crc32_seed,
+                          u8 complement)
+{
+       u32 byte = 0;
+       u32 bit = 0;
+       u8 msb = 0;
+       u8 current_byte = 0;
+       u32 crc32_result = crc32_seed;
+
+       if ((!crc32_packet) ||
+           (crc32_length == 0) ||
+           ((crc32_length % 8) != 0))
+               return crc32_result;
+       for (byte = 0; byte < crc32_length; byte++) {
+               current_byte = crc32_packet[byte];
+               for (bit = 0; bit < 8; bit++) {
+                       msb = (u8)(crc32_result >> 31);
+                       crc32_result = crc32_result << 1;
+                       if (msb != (0x1 & (current_byte >> bit))) {
+                               crc32_result = crc32_result ^ CRC32_POLY;
+                               crc32_result |= 1; /*crc32_result[0] = 1;*/
+                       }
+               }
+       }
+       return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+                               u8 *mac,
+                               u32 len)
+{
+       u32 packet_buf[2] = { 0 };
+
+       memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+       return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+       u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+                               mac, ETH_ALEN);
+
+       return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+                       u16 opaque_fid,
+                       struct qed_filter_mcast *p_filter_cmd,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
+{
+       unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+       struct vport_update_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u8 abs_vport_id = 0;
+       int rc, i;
+
+       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+               rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+                                 &abs_vport_id);
+               if (rc)
+                       return rc;
+       } else {
+               rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+                                 &abs_vport_id);
+               if (rc)
+                       return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_VPORT_UPDATE,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc) {
+               DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.vport_update;
+       p_ramrod->common.update_approx_mcast_flg = 1;
+
+       /* explicitly clear out the entire vector */
+       memset(&p_ramrod->approx_mcast.bins, 0,
+              sizeof(p_ramrod->approx_mcast.bins));
+       memset(bins, 0, sizeof(unsigned long) *
+              ETH_MULTICAST_MAC_BINS_IN_REGS);
+       /* filter ADD op is explicit set op and it removes
+        *  any existing filters for the vport
+        */
+       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+               for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+                       u32 bit;
+
+                       bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+                       __set_bit(bit, bins);
+               }
+
+               /* Convert to correct endianity */
+               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       u32 *p_bins = (u32 *)bins;
+                       struct vport_update_ramrod_mcast *approx_mcast;
+
+                       approx_mcast = &p_ramrod->approx_mcast;
+                       approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+               }
+       }
+
+       p_ramrod->common.vport_id = abs_vport_id;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+                               struct qed_filter_mcast *p_filter_cmd,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
+{
+       int rc = 0;
+       int i;
+
+       /* only ADD and REMOVE operations are supported for multi-cast */
+       if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+            (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+           (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+               return -EINVAL;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               u16 opaque_fid;
+
+               if (IS_VF(cdev)) {
+                       qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
+
+               opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               rc = qed_sp_eth_filter_mcast(p_hwfn,
+                                            opaque_fid,
+                                            p_filter_cmd,
+                                            comp_mode,
+                                            p_comp_data);
+       }
+       return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+                               struct qed_filter_ucast *p_filter_cmd,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
+{
+       int rc = 0;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               u16 opaque_fid;
+
+               if (IS_VF(cdev)) {
+                       rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
+
+               opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               rc = qed_sp_eth_filter_ucast(p_hwfn,
+                                            opaque_fid,
+                                            p_filter_cmd,
+                                            comp_mode,
+                                            p_comp_data);
+               if (rc != 0)
+                       break;
+       }
+
+       return rc;
+}
+
+/* Statistics related code */
+static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+                                          u32 *p_addr,
+                                          u32 *p_len, u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_PSDM_RAM +
+                   PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+       }
+}
+
+static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_eth_stats *p_stats,
+                                  u16 statistics_bin)
+{
+       struct eth_pstorm_per_queue_stat pstats;
+       u32 pstats_addr = 0, pstats_len = 0;
+
+       __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+                                      statistics_bin);
+
+       memset(&pstats, 0, sizeof(pstats));
+       qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+       p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+       p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_eth_stats *p_stats,
+                                  u16 statistics_bin)
+{
+       struct tstorm_per_port_stat tstats;
+       u32 tstats_addr, tstats_len;
+
+       if (IS_PF(p_hwfn->cdev)) {
+               tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+                   TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+               tstats_len = sizeof(struct tstorm_per_port_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+               tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+       }
+
+       memset(&tstats, 0, sizeof(tstats));
+       qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+       p_stats->mftag_filter_discards +=
+               HILO_64_REGPAIR(tstats.mftag_filter_discard);
+       p_stats->mac_filter_discards +=
+               HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+                                          u32 *p_addr,
+                                          u32 *p_len, u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_USDM_RAM +
+                   USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+               *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+       }
+}
+
+static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_eth_stats *p_stats,
+                                  u16 statistics_bin)
+{
+       struct eth_ustorm_per_queue_stat ustats;
+       u32 ustats_addr = 0, ustats_len = 0;
+
+       __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+                                      statistics_bin);
+
+       memset(&ustats, 0, sizeof(ustats));
+       qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+       p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+                                          u32 *p_addr,
+                                          u32 *p_len, u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_MSDM_RAM +
+                   MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+       }
+}
+
+static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_eth_stats *p_stats,
+                                  u16 statistics_bin)
+{
+       struct eth_mstorm_per_queue_stat mstats;
+       u32 mstats_addr = 0, mstats_len = 0;
+
+       __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+                                      statistics_bin);
+
+       memset(&mstats, 0, sizeof(mstats));
+       qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+       p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
+       p_stats->packet_too_big_discard +=
+               HILO_64_REGPAIR(mstats.packet_too_big_discard);
+       p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+       p_stats->tpa_coalesced_pkts +=
+               HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+       p_stats->tpa_coalesced_events +=
+               HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+       p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
+       p_stats->tpa_coalesced_bytes +=
+               HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      struct qed_eth_stats *p_stats)
+{
+       struct port_stats port_stats;
+       int j;
+
+       memset(&port_stats, 0, sizeof(port_stats));
+
+       qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+                       p_hwfn->mcp_info->port_addr +
+                       offsetof(struct public_port, stats),
+                       sizeof(port_stats));
+
+       p_stats->rx_64_byte_packets             += port_stats.eth.r64;
+       p_stats->rx_65_to_127_byte_packets      += port_stats.eth.r127;
+       p_stats->rx_128_to_255_byte_packets     += port_stats.eth.r255;
+       p_stats->rx_256_to_511_byte_packets     += port_stats.eth.r511;
+       p_stats->rx_512_to_1023_byte_packets    += port_stats.eth.r1023;
+       p_stats->rx_1024_to_1518_byte_packets   += port_stats.eth.r1518;
+       p_stats->rx_1519_to_1522_byte_packets   += port_stats.eth.r1522;
+       p_stats->rx_1519_to_2047_byte_packets   += port_stats.eth.r2047;
+       p_stats->rx_2048_to_4095_byte_packets   += port_stats.eth.r4095;
+       p_stats->rx_4096_to_9216_byte_packets   += port_stats.eth.r9216;
+       p_stats->rx_9217_to_16383_byte_packets  += port_stats.eth.r16383;
+       p_stats->rx_crc_errors                  += port_stats.eth.rfcs;
+       p_stats->rx_mac_crtl_frames             += port_stats.eth.rxcf;
+       p_stats->rx_pause_frames                += port_stats.eth.rxpf;
+       p_stats->rx_pfc_frames                  += port_stats.eth.rxpp;
+       p_stats->rx_align_errors                += port_stats.eth.raln;
+       p_stats->rx_carrier_errors              += port_stats.eth.rfcr;
+       p_stats->rx_oversize_packets            += port_stats.eth.rovr;
+       p_stats->rx_jabbers                     += port_stats.eth.rjbr;
+       p_stats->rx_undersize_packets           += port_stats.eth.rund;
+       p_stats->rx_fragments                   += port_stats.eth.rfrg;
+       p_stats->tx_64_byte_packets             += port_stats.eth.t64;
+       p_stats->tx_65_to_127_byte_packets      += port_stats.eth.t127;
+       p_stats->tx_128_to_255_byte_packets     += port_stats.eth.t255;
+       p_stats->tx_256_to_511_byte_packets     += port_stats.eth.t511;
+       p_stats->tx_512_to_1023_byte_packets    += port_stats.eth.t1023;
+       p_stats->tx_1024_to_1518_byte_packets   += port_stats.eth.t1518;
+       p_stats->tx_1519_to_2047_byte_packets   += port_stats.eth.t2047;
+       p_stats->tx_2048_to_4095_byte_packets   += port_stats.eth.t4095;
+       p_stats->tx_4096_to_9216_byte_packets   += port_stats.eth.t9216;
+       p_stats->tx_9217_to_16383_byte_packets  += port_stats.eth.t16383;
+       p_stats->tx_pause_frames                += port_stats.eth.txpf;
+       p_stats->tx_pfc_frames                  += port_stats.eth.txpp;
+       p_stats->tx_lpi_entry_count             += port_stats.eth.tlpiec;
+       p_stats->tx_total_collisions            += port_stats.eth.tncl;
+       p_stats->rx_mac_bytes                   += port_stats.eth.rbyte;
+       p_stats->rx_mac_uc_packets              += port_stats.eth.rxuca;
+       p_stats->rx_mac_mc_packets              += port_stats.eth.rxmca;
+       p_stats->rx_mac_bc_packets              += port_stats.eth.rxbca;
+       p_stats->rx_mac_frames_ok               += port_stats.eth.rxpok;
+       p_stats->tx_mac_bytes                   += port_stats.eth.tbyte;
+       p_stats->tx_mac_uc_packets              += port_stats.eth.txuca;
+       p_stats->tx_mac_mc_packets              += port_stats.eth.txmca;
+       p_stats->tx_mac_bc_packets              += port_stats.eth.txbca;
+       p_stats->tx_mac_ctrl_frames             += port_stats.eth.txcf;
+       for (j = 0; j < 8; j++) {
+               p_stats->brb_truncates  += port_stats.brb.brb_truncate[j];
+               p_stats->brb_discards   += port_stats.brb.brb_discard[j];
+       }
+}
+
+static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_eth_stats *stats,
+                                 u16 statistics_bin, bool b_get_port_stats)
+{
+       __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+       __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+       __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+       __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+       if (b_get_port_stats && p_hwfn->mcp_info)
+               __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _qed_get_vport_stats(struct qed_dev *cdev,
+                                struct qed_eth_stats *stats)
+{
+       u8 fw_vport = 0;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+                                                   :  NULL;
+
+               if (IS_PF(cdev)) {
+                       /* The main vport index is relative first */
+                       if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+                               DP_ERR(p_hwfn, "No vport available!\n");
+                               goto out;
+                       }
+               }
+
+               if (IS_PF(cdev) && !p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+                                     IS_PF(cdev) ? true : false);
+
+out:
+               if (IS_PF(cdev) && p_ptt)
+                       qed_ptt_release(p_hwfn, p_ptt);
+       }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+                        struct qed_eth_stats *stats)
+{
+       u32 i;
+
+       if (!cdev) {
+               memset(stats, 0, sizeof(*stats));
+               return;
+       }
+
+       _qed_get_vport_stats(cdev, stats);
+
+       if (!cdev->reset_stats)
+               return;
+
+       /* Reduce the statistics baseline */
+       for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+               ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct eth_mstorm_per_queue_stat mstats;
+               struct eth_ustorm_per_queue_stat ustats;
+               struct eth_pstorm_per_queue_stat pstats;
+               struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+                                                   : NULL;
+               u32 addr = 0, len = 0;
+
+               if (IS_PF(cdev) && !p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               memset(&mstats, 0, sizeof(mstats));
+               __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+               qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+               memset(&ustats, 0, sizeof(ustats));
+               __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+               qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+               memset(&pstats, 0, sizeof(pstats));
+               __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+               qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+               if (IS_PF(cdev))
+                       qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       /* PORT statistics are not necessarily reset, so we need to
+        * read and create a baseline for future statistics.
+        */
+       if (!cdev->reset_stats)
+               DP_INFO(cdev, "Reset stats not allocated\n");
+       else
+               _qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+                                struct qed_dev_eth_info *info)
+{
+       int i;
+
+       memset(info, 0, sizeof(*info));
+
+       info->num_tc = 1;
+
+       if (IS_PF(cdev)) {
+               int max_vf_vlan_filters = 0;
+
+               if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+                       for_each_hwfn(cdev, i)
+                           info->num_queues +=
+                           FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+                       if (cdev->int_params.fp_msix_cnt)
+                               info->num_queues =
+                                   min_t(u8, info->num_queues,
+                                         cdev->int_params.fp_msix_cnt);
+               } else {
+                       info->num_queues = cdev->num_hwfns;
+               }
+
+               if (IS_QED_SRIOV(cdev))
+                       max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
+                                             QED_ETH_VF_NUM_VLAN_FILTERS;
+               info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+                                        max_vf_vlan_filters;
+
+               ether_addr_copy(info->port_mac,
+                               cdev->hwfns[0].hw_info.hw_mac_addr);
+       } else {
+               qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+               if (cdev->num_hwfns > 1) {
+                       u8 queues = 0;
+
+                       qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+                       info->num_queues += queues;
+               }
+
+               qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+                                           &info->num_vlan_filters);
+               qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+       }
+
+       qed_fill_dev_info(cdev, &info->common);
+
+       if (IS_VF(cdev))
+               memset(info->common.hw_mac, 0, ETH_ALEN);
+
+       return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+                                struct qed_eth_cb_ops *ops, void *cookie)
+{
+       cdev->protocol_ops.eth = ops;
+       cdev->ops_cookie = cookie;
+
+       /* For VF, we start bulletin reading */
+       if (IS_VF(cdev))
+               qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
+{
+       if (IS_PF(cdev))
+               return true;
+
+       return qed_vf_check_mac(&cdev->hwfns[0], mac);
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+                          struct qed_start_vport_params *params)
+{
+       int rc, i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_sp_vport_start_params start = { 0 };
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
+                                                       QED_TPA_MODE_NONE;
+               start.remove_inner_vlan = params->remove_inner_vlan;
+               start.only_untagged = true;     /* untagged only */
+               start.drop_ttl0 = params->drop_ttl0;
+               start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+               start.vport_id = params->vport_id;
+               start.max_buffers_per_cqe = 16;
+               start.mtu = params->mtu;
+
+               rc = qed_sp_vport_start(p_hwfn, &start);
+               if (rc) {
+                       DP_ERR(cdev, "Failed to start VPORT\n");
+                       return rc;
+               }
+
+               qed_hw_start_fastpath(p_hwfn);
+
+               DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                          "Started V-PORT %d with MTU %d\n",
+                          start.vport_id, start.mtu);
+       }
+
+       if (params->clear_stats)
+               qed_reset_vport_stats(cdev);
+
+       return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+                         u8 vport_id)
+{
+       int rc, i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_vport_stop(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      vport_id);
+
+               if (rc) {
+                       DP_ERR(cdev, "Failed to stop VPORT\n");
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+                           struct qed_update_vport_params *params)
+{
+       struct qed_sp_vport_update_params sp_params;
+       struct qed_rss_params sp_rss_params;
+       int rc, i;
+
+       if (!cdev)
+               return -ENODEV;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+       /* Translate protocol params into sp params */
+       sp_params.vport_id = params->vport_id;
+       sp_params.update_vport_active_rx_flg =
+               params->update_vport_active_flg;
+       sp_params.update_vport_active_tx_flg =
+               params->update_vport_active_flg;
+       sp_params.vport_active_rx_flg = params->vport_active_flg;
+       sp_params.vport_active_tx_flg = params->vport_active_flg;
+       sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+       sp_params.tx_switching_flg = params->tx_switching_flg;
+       sp_params.accept_any_vlan = params->accept_any_vlan;
+       sp_params.update_accept_any_vlan_flg =
+               params->update_accept_any_vlan_flg;
+
+       /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+        * We need to re-fix the rss values per engine for CMT.
+        */
+       if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+               struct qed_update_vport_rss_params *rss =
+                       &params->rss_params;
+               int k, max = 0;
+
+               /* Find largest entry, since it's possible RSS needs to
+                * be disabled [in case only 1 queue per-hwfn]
+                */
+               for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+                       max = (max > rss->rss_ind_table[k]) ?
+                               max : rss->rss_ind_table[k];
+
+               /* Either fix RSS values or disable RSS */
+               if (cdev->num_hwfns < max + 1) {
+                       int divisor = (max + cdev->num_hwfns - 1) /
+                               cdev->num_hwfns;
+
+                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                                  "CMT - fixing RSS values (modulo %02x)\n",
+                                  divisor);
+
+                       for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+                               rss->rss_ind_table[k] =
+                                       rss->rss_ind_table[k] % divisor;
+               } else {
+                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+                       params->update_rss_flg = 0;
+               }
+       }
+
+       /* Now, update the RSS configuration for actual configuration */
+       if (params->update_rss_flg) {
+               sp_rss_params.update_rss_config = 1;
+               sp_rss_params.rss_enable = 1;
+               sp_rss_params.update_rss_capabilities = 1;
+               sp_rss_params.update_rss_ind_table = 1;
+               sp_rss_params.update_rss_key = 1;
+               sp_rss_params.rss_caps = params->rss_params.rss_caps;
+               sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+               memcpy(sp_rss_params.rss_ind_table,
+                      params->rss_params.rss_ind_table,
+                      QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+               memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+                      QED_RSS_KEY_SIZE * sizeof(u32));
+       }
+       sp_params.rss_params = &sp_rss_params;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = qed_sp_vport_update(p_hwfn, &sp_params,
+                                        QED_SPQ_MODE_EBLOCK,
+                                        NULL);
+               if (rc) {
+                       DP_ERR(cdev, "Failed to update VPORT\n");
+                       return rc;
+               }
+
+               DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                          "Updated V-PORT %d: active_flag %d [update %d]\n",
+                          params->vport_id, params->vport_active_flg,
+                          params->update_vport_active_flg);
+       }
+
+       return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+                        struct qed_queue_start_common_params *params,
+                        u16 bd_max_bytes,
+                        dma_addr_t bd_chain_phys_addr,
+                        dma_addr_t cqe_pbl_addr,
+                        u16 cqe_pbl_size,
+                        void __iomem **pp_prod)
+{
+       int rc, hwfn_index;
+       struct qed_hwfn *p_hwfn;
+
+       hwfn_index = params->rss_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+
+       /* Fix queue ID in 100g mode */
+       params->queue_id /= cdev->num_hwfns;
+
+       rc = qed_sp_eth_rx_queue_start(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      params,
+                                      bd_max_bytes,
+                                      bd_chain_phys_addr,
+                                      cqe_pbl_addr,
+                                      cqe_pbl_size,
+                                      pp_prod);
+
+       if (rc) {
+               DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+               return rc;
+       }
+
+       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                  "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+                  params->queue_id, params->rss_id, params->vport_id,
+                  params->sb);
+
+       return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+                       struct qed_stop_rxq_params *params)
+{
+       int rc, hwfn_index;
+       struct qed_hwfn *p_hwfn;
+
+       hwfn_index      = params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+                                     params->rx_queue_id / cdev->num_hwfns,
+                                     params->eq_completion_only,
+                                     false);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+                        struct qed_queue_start_common_params *p_params,
+                        dma_addr_t pbl_addr,
+                        u16 pbl_size,
+                        void __iomem **pp_doorbell)
+{
+       struct qed_hwfn *p_hwfn;
+       int rc, hwfn_index;
+
+       hwfn_index      = p_params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       /* Fix queue ID in 100g mode */
+       p_params->queue_id /= cdev->num_hwfns;
+
+       rc = qed_sp_eth_tx_queue_start(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      p_params,
+                                      pbl_addr,
+                                      pbl_size,
+                                      pp_doorbell);
+
+       if (rc) {
+               DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+               return rc;
+       }
+
+       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                  "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+                  p_params->queue_id, p_params->rss_id, p_params->vport_id,
+                  p_params->sb);
+
+       return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+       qed_hw_stop_fastpath(cdev);
+
+       return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+                       struct qed_stop_txq_params *params)
+{
+       struct qed_hwfn *p_hwfn;
+       int rc, hwfn_index;
+
+       hwfn_index      = params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+                                     params->tx_queue_id / cdev->num_hwfns);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qed_tunn_configure(struct qed_dev *cdev,
+                             struct qed_tunn_params *tunn_params)
+{
+       struct qed_tunn_update_params tunn_info;
+       int i, rc;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       memset(&tunn_info, 0, sizeof(tunn_info));
+       if (tunn_params->update_vxlan_port == 1) {
+               tunn_info.update_vxlan_udp_port = 1;
+               tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+       }
+
+       if (tunn_params->update_geneve_port == 1) {
+               tunn_info.update_geneve_udp_port = 1;
+               tunn_info.geneve_udp_port = tunn_params->geneve_port;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+                                              QED_SPQ_MODE_EBLOCK, NULL);
+
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+                                       enum qed_filter_rx_mode_type type)
+{
+       struct qed_filter_accept_flags accept_flags;
+
+       memset(&accept_flags, 0, sizeof(accept_flags));
+
+       accept_flags.update_rx_mode_config      = 1;
+       accept_flags.update_tx_mode_config      = 1;
+       accept_flags.rx_accept_filter           = QED_ACCEPT_UCAST_MATCHED |
+                                                 QED_ACCEPT_MCAST_MATCHED |
+                                                 QED_ACCEPT_BCAST;
+       accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+                                       QED_ACCEPT_MCAST_MATCHED |
+                                       QED_ACCEPT_BCAST;
+
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+               accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+                                                QED_ACCEPT_MCAST_UNMATCHED;
+       else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+               accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+       return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
+                                    QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+                                     struct qed_filter_ucast_params *params)
+{
+       struct qed_filter_ucast ucast;
+
+       if (!params->vlan_valid && !params->mac_valid) {
+               DP_NOTICE(
+                       cdev,
+                       "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+               return -EINVAL;
+       }
+
+       memset(&ucast, 0, sizeof(ucast));
+       switch (params->type) {
+       case QED_FILTER_XCAST_TYPE_ADD:
+               ucast.opcode = QED_FILTER_ADD;
+               break;
+       case QED_FILTER_XCAST_TYPE_DEL:
+               ucast.opcode = QED_FILTER_REMOVE;
+               break;
+       case QED_FILTER_XCAST_TYPE_REPLACE:
+               ucast.opcode = QED_FILTER_REPLACE;
+               break;
+       default:
+               DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+                         params->type);
+       }
+
+       if (params->vlan_valid && params->mac_valid) {
+               ucast.type = QED_FILTER_MAC_VLAN;
+               ether_addr_copy(ucast.mac, params->mac);
+               ucast.vlan = params->vlan;
+       } else if (params->mac_valid) {
+               ucast.type = QED_FILTER_MAC;
+               ether_addr_copy(ucast.mac, params->mac);
+       } else {
+               ucast.type = QED_FILTER_VLAN;
+               ucast.vlan = params->vlan;
+       }
+
+       ucast.is_rx_filter = true;
+       ucast.is_tx_filter = true;
+
+       return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+                                     struct qed_filter_mcast_params *params)
+{
+       struct qed_filter_mcast mcast;
+       int i;
+
+       memset(&mcast, 0, sizeof(mcast));
+       switch (params->type) {
+       case QED_FILTER_XCAST_TYPE_ADD:
+               mcast.opcode = QED_FILTER_ADD;
+               break;
+       case QED_FILTER_XCAST_TYPE_DEL:
+               mcast.opcode = QED_FILTER_REMOVE;
+               break;
+       default:
+               DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+                         params->type);
+       }
+
+       mcast.num_mc_addrs = params->num;
+       for (i = 0; i < mcast.num_mc_addrs; i++)
+               ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+       return qed_filter_mcast_cmd(cdev, &mcast,
+                                   QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+                               struct qed_filter_params *params)
+{
+       enum qed_filter_rx_mode_type accept_flags;
+
+       switch (params->type) {
+       case QED_FILTER_TYPE_UCAST:
+               return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+       case QED_FILTER_TYPE_MCAST:
+               return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+       case QED_FILTER_TYPE_RX_MODE:
+               accept_flags = params->filter.accept_flags;
+               return qed_configure_filter_rx_mode(cdev, accept_flags);
+       default:
+               DP_NOTICE(cdev, "Unknown filter type %d\n",
+                         (int)params->type);
+               return -EINVAL;
+       }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+                                u8 rss_id,
+                                struct eth_slow_path_rx_cqe *cqe)
+{
+       return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+                                     cqe);
+}
+
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
+#ifdef CONFIG_DCB
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+#endif
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+       .common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+       .iov = &qed_iov_ops_pass,
+#endif
+#ifdef CONFIG_DCB
+       .dcb = &qed_dcbnl_ops_pass,
+#endif
+       .fill_dev_info = &qed_fill_eth_dev_info,
+       .register_ops = &qed_register_eth_ops,
+       .check_mac = &qed_check_mac,
+       .vport_start = &qed_start_vport,
+       .vport_stop = &qed_stop_vport,
+       .vport_update = &qed_update_vport,
+       .q_rx_start = &qed_start_rxq,
+       .q_rx_stop = &qed_stop_rxq,
+       .q_tx_start = &qed_start_txq,
+       .q_tx_stop = &qed_stop_txq,
+       .filter_config = &qed_configure_filter,
+       .fastpath_stop = &qed_fastpath_stop,
+       .eth_cqe_completion = &qed_fp_cqe_completion,
+       .get_vport_stats = &qed_get_vport_stats,
+       .tunn_config = &qed_tunn_configure,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+       return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+       /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644 (file)
index 0000000..0021145
--- /dev/null
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+       u8 max_buffers_per_cqe;
+
+       u8 update_tpa_en_flg;
+       u8 tpa_ipv4_en_flg;
+       u8 tpa_ipv6_en_flg;
+       u8 tpa_ipv4_tunn_en_flg;
+       u8 tpa_ipv6_tunn_en_flg;
+
+       u8 update_tpa_param_flg;
+       u8 tpa_pkt_split_flg;
+       u8 tpa_hdr_data_split_flg;
+       u8 tpa_gro_consistent_flg;
+       u8 tpa_max_aggs_num;
+       u16 tpa_max_size;
+       u16 tpa_min_size_to_start;
+       u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+       QED_FILTER_ADD,
+       QED_FILTER_REMOVE,
+       QED_FILTER_MOVE,
+       QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
+       QED_FILTER_FLUSH,       /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+       QED_FILTER_MAC,
+       QED_FILTER_VLAN,
+       QED_FILTER_MAC_VLAN,
+       QED_FILTER_INNER_MAC,
+       QED_FILTER_INNER_VLAN,
+       QED_FILTER_INNER_PAIR,
+       QED_FILTER_INNER_MAC_VNI_PAIR,
+       QED_FILTER_MAC_VNI_PAIR,
+       QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+       enum qed_filter_opcode opcode;
+       enum qed_filter_ucast_type type;
+       u8 is_rx_filter;
+       u8 is_tx_filter;
+       u8 vport_to_add_to;
+       u8 vport_to_remove_from;
+       unsigned char mac[ETH_ALEN];
+       u8 assert_on_error;
+       u16 vlan;
+       u32 vni;
+};
+
+struct qed_filter_mcast {
+       /* MOVE is not supported for multicast */
+       enum qed_filter_opcode opcode;
+       u8 vport_to_add_to;
+       u8 vport_to_remove_from;
+       u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS        64
+       unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                            u16 rx_queue_id,
+                            bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+       QED_TPA_MODE_NONE,
+       QED_TPA_MODE_UNUSED,
+       QED_TPA_MODE_GRO,
+       QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+       enum qed_tpa_mode tpa_mode;
+       bool remove_inner_vlan;
+       bool tx_switching;
+       bool only_untagged;
+       bool drop_ttl0;
+       u8 max_buffers_per_cqe;
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u8 vport_id;
+       u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+       u8      update_rss_config;
+       u8      rss_enable;
+       u8      rss_eng_id;
+       u8      update_rss_capabilities;
+       u8      update_rss_ind_table;
+       u8      update_rss_key;
+       u8      rss_caps;
+       u8      rss_table_size_log;
+       u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+       u32     rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+       u8      update_rx_mode_config;
+       u8      update_tx_mode_config;
+       u8      rx_accept_filter;
+       u8      tx_accept_filter;
+#define QED_ACCEPT_NONE         0x01
+#define QED_ACCEPT_UCAST_MATCHED        0x02
+#define QED_ACCEPT_UCAST_UNMATCHED      0x04
+#define QED_ACCEPT_MCAST_MATCHED        0x08
+#define QED_ACCEPT_MCAST_UNMATCHED      0x10
+#define QED_ACCEPT_BCAST                0x20
+};
+
+struct qed_sp_vport_update_params {
+       u16                             opaque_fid;
+       u8                              vport_id;
+       u8                              update_vport_active_rx_flg;
+       u8                              vport_active_rx_flg;
+       u8                              update_vport_active_tx_flg;
+       u8                              vport_active_tx_flg;
+       u8                              update_inner_vlan_removal_flg;
+       u8                              inner_vlan_removal_flg;
+       u8                              silent_vlan_removal_flg;
+       u8                              update_default_vlan_enable_flg;
+       u8                              default_vlan_enable_flg;
+       u8                              update_default_vlan_flg;
+       u16                             default_vlan;
+       u8                              update_tx_switching_flg;
+       u8                              tx_switching_flg;
+       u8                              update_approx_mcast_flg;
+       u8                              update_anti_spoofing_en_flg;
+       u8                              anti_spoofing_en;
+       u8                              update_accept_any_vlan_flg;
+       u8                              accept_any_vlan;
+       unsigned long                   bins[8];
+       struct qed_rss_params           *rss_params;
+       struct qed_filter_accept_flags  accept_flags;
+       struct qed_sge_tpa_params       *sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+                       struct qed_sp_vport_update_params *p_params,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           struct qed_filter_ucast *p_filter_cmd,
+                           enum spq_mode comp_mode,
+                           struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id          RX Queue ID
+ * @param num_rxqs             Allow to update multiple rx
+ *                             queues, from rx_queue_id to
+ *                             (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg     Post completion to the CQE Ring if set
+ * @param complete_event_flg   Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+                           u16 rx_queue_id,
+                           u8 num_rxqs,
+                           u8 complete_cqe_flg,
+                           u8 complete_event_flg,
+                           enum spq_mode comp_mode,
+                           struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                               u16 opaque_fid,
+                               u32 cid,
+                               struct qed_queue_start_common_params *params,
+                               u8 stats_id,
+                               u16 bd_max_bytes,
+                               dma_addr_t bd_chain_phys_addr,
+                               dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+                               u16  opaque_fid,
+                               u32  cid,
+                               struct qed_queue_start_common_params *p_params,
+                               u8  stats_id,
+                               dma_addr_t pbl_addr,
+                               u16 pbl_size,
+                               union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644 (file)
index 0000000..c7dc34b
--- /dev/null
@@ -0,0 +1,1393 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+#include "qed_selftest.h"
+
+static char version[] =
+       "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION                                \
+       __stringify(FW_MAJOR_VERSION) "."       \
+       __stringify(FW_MINOR_VERSION) "."       \
+       __stringify(FW_REVISION_VERSION) "."    \
+       __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME       \
+       "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+MODULE_FIRMWARE(QED_FW_FILE_NAME);
+
+static int __init qed_init(void)
+{
+       pr_notice("qed_init called\n");
+
+       pr_info("%s", version);
+
+       return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+       pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+       struct device *dev = &cdev->pdev->dev;
+
+       if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+               if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+                       DP_NOTICE(cdev,
+                                 "Can't request 64-bit consistent allocations\n");
+                       return -EIO;
+               }
+       } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+               DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+       struct pci_dev *pdev = cdev->pdev;
+
+       if (cdev->doorbells)
+               iounmap(cdev->doorbells);
+       if (cdev->regview)
+               iounmap(cdev->regview);
+       if (atomic_read(&pdev->enable_cnt) == 1)
+               pci_release_regions(pdev);
+
+       pci_disable_device(pdev);
+}
+
+#define PCI_REVISION_ID_ERROR_VAL      0xff
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+                       struct pci_dev *pdev)
+{
+       u8 rev_id;
+       int rc;
+
+       cdev->pdev = pdev;
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               DP_NOTICE(cdev, "Cannot enable PCI device\n");
+               goto err0;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               DP_NOTICE(cdev, "No memory region found in bar #0\n");
+               rc = -EIO;
+               goto err1;
+       }
+
+       if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+               DP_NOTICE(cdev, "No memory region found in bar #2\n");
+               rc = -EIO;
+               goto err1;
+       }
+
+       if (atomic_read(&pdev->enable_cnt) == 1) {
+               rc = pci_request_regions(pdev, "qed");
+               if (rc) {
+                       DP_NOTICE(cdev,
+                                 "Failed to request PCI memory resources\n");
+                       goto err1;
+               }
+               pci_set_master(pdev);
+               pci_save_state(pdev);
+       }
+
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+       if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
+               DP_NOTICE(cdev,
+                         "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
+                         rev_id);
+               rc = -ENODEV;
+               goto err2;
+       }
+       if (!pci_is_pcie(pdev)) {
+               DP_NOTICE(cdev, "The bus is not PCI Express\n");
+               rc = -EIO;
+               goto err2;
+       }
+
+       cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+       if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
+               DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+       rc = qed_set_coherency_mask(cdev);
+       if (rc)
+               goto err2;
+
+       cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+       cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+       cdev->pci_params.irq = pdev->irq;
+
+       cdev->regview = pci_ioremap_bar(pdev, 0);
+       if (!cdev->regview) {
+               DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+               rc = -ENOMEM;
+               goto err2;
+       }
+
+       if (IS_PF(cdev)) {
+               cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+               cdev->db_size = pci_resource_len(cdev->pdev, 2);
+               cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+               if (!cdev->doorbells) {
+                       DP_NOTICE(cdev, "Cannot map doorbell space\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+
+err2:
+       pci_release_regions(pdev);
+err1:
+       pci_disable_device(pdev);
+err0:
+       return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+                     struct qed_dev_info *dev_info)
+{
+       struct qed_ptt  *ptt;
+
+       memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+       dev_info->num_hwfns = cdev->num_hwfns;
+       dev_info->pci_mem_start = cdev->pci_params.mem_start;
+       dev_info->pci_mem_end = cdev->pci_params.mem_end;
+       dev_info->pci_irq = cdev->pci_params.irq;
+       dev_info->rdma_supported =
+           (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+       dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+       ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+       if (IS_PF(cdev)) {
+               dev_info->fw_major = FW_MAJOR_VERSION;
+               dev_info->fw_minor = FW_MINOR_VERSION;
+               dev_info->fw_rev = FW_REVISION_VERSION;
+               dev_info->fw_eng = FW_ENGINEERING_VERSION;
+               dev_info->mf_mode = cdev->mf_mode;
+               dev_info->tx_switching = true;
+       } else {
+               qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+                                     &dev_info->fw_minor, &dev_info->fw_rev,
+                                     &dev_info->fw_eng);
+       }
+
+       if (IS_PF(cdev)) {
+               ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+               if (ptt) {
+                       qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+                                           &dev_info->mfw_rev, NULL);
+
+                       qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+                                              &dev_info->flash_size);
+
+                       qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+               }
+       } else {
+               qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+                                   &dev_info->mfw_rev, NULL);
+       }
+
+       return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+       kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+       struct qed_dev *cdev;
+
+       cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+       if (!cdev)
+               return cdev;
+
+       qed_init_struct(cdev);
+
+       return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+                              pci_power_t state)
+{
+       if (!cdev)
+               return -ENODEV;
+
+       DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+       return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+                                struct qed_probe_params *params)
+{
+       struct qed_dev *cdev;
+       int rc;
+
+       cdev = qed_alloc_cdev(pdev);
+       if (!cdev)
+               goto err0;
+
+       cdev->protocol = params->protocol;
+
+       if (params->is_vf)
+               cdev->b_is_vf = true;
+
+       qed_init_dp(cdev, params->dp_module, params->dp_level);
+
+       rc = qed_init_pci(cdev, pdev);
+       if (rc) {
+               DP_ERR(cdev, "init pci failed\n");
+               goto err1;
+       }
+       DP_INFO(cdev, "PCI init completed successfully\n");
+
+       rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+       if (rc) {
+               DP_ERR(cdev, "hw prepare failed\n");
+               goto err2;
+       }
+
+       DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+       return cdev;
+
+err2:
+       qed_free_pci(cdev);
+err1:
+       qed_free_cdev(cdev);
+err0:
+       return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+       if (!cdev)
+               return;
+
+       qed_hw_remove(cdev);
+
+       qed_free_pci(cdev);
+
+       qed_set_power_state(cdev, PCI_D3hot);
+
+       qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               pci_disable_msix(cdev->pdev);
+               kfree(cdev->int_params.msix_table);
+       } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+               pci_disable_msi(cdev->pdev);
+       }
+
+       memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+                          struct qed_int_params *int_params)
+{
+       int i, rc, cnt;
+
+       cnt = int_params->in.num_vectors;
+
+       for (i = 0; i < cnt; i++)
+               int_params->msix_table[i].entry = i;
+
+       rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+                                  int_params->in.min_msix_cnt, cnt);
+       if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+           (rc % cdev->num_hwfns)) {
+               pci_disable_msix(cdev->pdev);
+
+               /* If fastpath is initialized, we need at least one interrupt
+                * per hwfn [and the slow path interrupts]. New requested number
+                * should be a multiple of the number of hwfns.
+                */
+               cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+               DP_NOTICE(cdev,
+                         "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+                         cnt, int_params->in.num_vectors);
+               rc = pci_enable_msix_exact(cdev->pdev,
+                                          int_params->msix_table, cnt);
+               if (!rc)
+                       rc = cnt;
+       }
+
+       if (rc > 0) {
+               /* MSI-x configuration was achieved */
+               int_params->out.int_mode = QED_INT_MODE_MSIX;
+               int_params->out.num_vectors = rc;
+               rc = 0;
+       } else {
+               DP_NOTICE(cdev,
+                         "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+                         cnt, rc);
+       }
+
+       return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+       struct qed_int_params *int_params = &cdev->int_params;
+       struct msix_entry *tbl;
+       int rc = 0, cnt;
+
+       switch (int_params->in.int_mode) {
+       case QED_INT_MODE_MSIX:
+               /* Allocate MSIX table */
+               cnt = int_params->in.num_vectors;
+               int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+               if (!int_params->msix_table) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               /* Enable MSIX */
+               rc = qed_enable_msix(cdev, int_params);
+               if (!rc)
+                       goto out;
+
+               DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+               kfree(int_params->msix_table);
+               if (force_mode)
+                       goto out;
+               /* Fallthrough */
+
+       case QED_INT_MODE_MSI:
+               if (cdev->num_hwfns == 1) {
+                       rc = pci_enable_msi(cdev->pdev);
+                       if (!rc) {
+                               int_params->out.int_mode = QED_INT_MODE_MSI;
+                               goto out;
+                       }
+
+                       DP_NOTICE(cdev, "Failed to enable MSI\n");
+                       if (force_mode)
+                               goto out;
+               }
+               /* Fallthrough */
+
+       case QED_INT_MODE_INTA:
+                       int_params->out.int_mode = QED_INT_MODE_INTA;
+                       rc = 0;
+                       goto out;
+       default:
+               DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+                         int_params->in.int_mode);
+               rc = -EINVAL;
+       }
+
+out:
+       cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+       return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+                                   int index, void(*handler)(void *))
+{
+       struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+       int relative_idx = index / cdev->num_hwfns;
+
+       hwfn->simd_proto_handler[relative_idx].func = handler;
+       hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+       struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+       int relative_idx = index / cdev->num_hwfns;
+
+       memset(&hwfn->simd_proto_handler[relative_idx], 0,
+              sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+       tasklet_schedule((struct tasklet_struct *)tasklet);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+       struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+       struct qed_hwfn *hwfn;
+       irqreturn_t rc = IRQ_NONE;
+       u64 status;
+       int i, j;
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+               if (!status)
+                       continue;
+
+               hwfn = &cdev->hwfns[i];
+
+               /* Slowpath interrupt */
+               if (unlikely(status & 0x1)) {
+                       tasklet_schedule(hwfn->sp_dpc);
+                       status &= ~0x1;
+                       rc = IRQ_HANDLED;
+               }
+
+               /* Fastpath interrupts */
+               for (j = 0; j < 64; j++) {
+                       if ((0x2ULL << j) & status) {
+                               hwfn->simd_proto_handler[j].func(
+                                       hwfn->simd_proto_handler[j].token);
+                               status &= ~(0x2ULL << j);
+                               rc = IRQ_HANDLED;
+                       }
+               }
+
+               if (unlikely(status))
+                       DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+                                  "got an unknown interrupt status 0x%llx\n",
+                                  status);
+       }
+
+       return rc;
+}
+
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
+{
+       struct qed_dev *cdev = hwfn->cdev;
+       int rc = 0;
+       u8 id;
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               id = hwfn->my_id;
+               snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
+                        id, cdev->pdev->bus->number,
+                        PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+               rc = request_irq(cdev->int_params.msix_table[id].vector,
+                                qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+               if (!rc)
+                       DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+                                  "Requested slowpath MSI-X\n");
+       } else {
+               unsigned long flags = 0;
+
+               snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+                        cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+                        PCI_FUNC(cdev->pdev->devfn));
+
+               if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+                       flags |= IRQF_SHARED;
+
+               rc = request_irq(cdev->pdev->irq, qed_single_int,
+                                flags, cdev->name, cdev);
+       }
+
+       return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+       int i;
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               for_each_hwfn(cdev, i) {
+                       if (!cdev->hwfns[i].b_int_requested)
+                               break;
+                       synchronize_irq(cdev->int_params.msix_table[i].vector);
+                       free_irq(cdev->int_params.msix_table[i].vector,
+                                cdev->hwfns[i].sp_dpc);
+               }
+       } else {
+               if (QED_LEADING_HWFN(cdev)->b_int_requested)
+                       free_irq(cdev->pdev->irq, cdev);
+       }
+       qed_int_disable_post_isr_release(cdev);
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+       int i, rc;
+
+       rc = qed_hw_stop(cdev);
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (p_hwfn->b_sp_dpc_enabled) {
+                       tasklet_disable(p_hwfn->sp_dpc);
+                       p_hwfn->b_sp_dpc_enabled = false;
+                       DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+                                  "Disabled sp taskelt [hwfn %d] at %p\n",
+                                  i, p_hwfn->sp_dpc);
+               }
+       }
+
+       return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+       int rc;
+
+       rc = qed_hw_reset(cdev);
+       if (rc)
+               return rc;
+
+       qed_resc_free(cdev);
+
+       return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+       int rc;
+
+       rc = qed_resc_alloc(cdev);
+       if (rc)
+               return rc;
+
+       DP_INFO(cdev, "Allocated qed resources\n");
+
+       qed_resc_setup(cdev);
+
+       return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+       int limit = 0;
+
+       /* Mark the fastpath as free/used */
+       cdev->int_params.fp_initialized = cnt ? true : false;
+
+       if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+               limit = cdev->num_hwfns * 63;
+       else if (cdev->int_params.fp_msix_cnt)
+               limit = cdev->int_params.fp_msix_cnt;
+
+       if (!limit)
+               return -ENOMEM;
+
+       return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+       memset(info, 0, sizeof(struct qed_int_info));
+
+       if (!cdev->int_params.fp_initialized) {
+               DP_INFO(cdev,
+                       "Protocol driver requested interrupt information, but its support is not yet configured\n");
+               return -EINVAL;
+       }
+
+       /* Need to expose only MSI-X information; Single IRQ is handled solely
+        * by qed.
+        */
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               int msix_base = cdev->int_params.fp_msix_base;
+
+               info->msix_cnt = cdev->int_params.fp_msix_cnt;
+               info->msix = &cdev->int_params.msix_table[msix_base];
+       }
+
+       return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+                                 enum qed_int_mode int_mode)
+{
+       struct qed_sb_cnt_info sb_cnt_info;
+       int rc;
+       int i;
+
+       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+               DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+               return -EINVAL;
+       }
+
+       memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+       cdev->int_params.in.int_mode = int_mode;
+       for_each_hwfn(cdev, i) {
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
+               cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+               cdev->int_params.in.num_vectors++; /* slowpath */
+       }
+
+       /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+       cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+       rc = qed_set_int_mode(cdev, false);
+       if (rc)  {
+               DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+               return rc;
+       }
+
+       cdev->int_params.fp_msix_base = cdev->num_hwfns;
+       cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+                                      cdev->num_hwfns;
+
+       return 0;
+}
+
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+       int rc;
+
+       memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+       cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+       qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+                           &cdev->int_params.in.num_vectors);
+       if (cdev->num_hwfns > 1) {
+               u8 vectors = 0;
+
+               qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+               cdev->int_params.in.num_vectors += vectors;
+       }
+
+       /* We want a minimum of one fastpath vector per vf hwfn */
+       cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+       rc = qed_set_int_mode(cdev, true);
+       if (rc)
+               return rc;
+
+       cdev->int_params.fp_msix_base = 0;
+       cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+       return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+                  u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+       int rc;
+
+       p_hwfn->stream->next_in = input_buf;
+       p_hwfn->stream->avail_in = input_len;
+       p_hwfn->stream->next_out = unzip_buf;
+       p_hwfn->stream->avail_out = max_size;
+
+       rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+       if (rc != Z_OK) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+                          rc);
+               return 0;
+       }
+
+       rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+       zlib_inflateEnd(p_hwfn->stream);
+
+       if (rc != Z_OK && rc != Z_STREAM_END) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+                          p_hwfn->stream->msg, rc);
+               return 0;
+       }
+
+       return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+       int i;
+       void *workspace;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+               if (!p_hwfn->stream)
+                       return -ENOMEM;
+
+               workspace = vzalloc(zlib_inflate_workspacesize());
+               if (!workspace)
+                       return -ENOMEM;
+               p_hwfn->stream->workspace = workspace;
+       }
+
+       return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (!p_hwfn->stream)
+                       return;
+
+               vfree(p_hwfn->stream->workspace);
+               kfree(p_hwfn->stream);
+       }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+                                struct qed_pf_params *params)
+{
+       int i;
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->pf_params = *params;
+       }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+                             struct qed_slowpath_params *params)
+{
+       struct qed_tunn_start_params tunn_info;
+       struct qed_mcp_drv_version drv_version;
+       const u8 *data = NULL;
+       struct qed_hwfn *hwfn;
+       int rc = -EINVAL;
+
+       if (qed_iov_wq_start(cdev))
+               goto err;
+
+       if (IS_PF(cdev)) {
+               rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+                                     &cdev->pdev->dev);
+               if (rc) {
+                       DP_NOTICE(cdev,
+                                 "Failed to find fw file - /lib/firmware/%s\n",
+                                 QED_FW_FILE_NAME);
+                       goto err;
+               }
+       }
+
+       rc = qed_nic_setup(cdev);
+       if (rc)
+               goto err;
+
+       if (IS_PF(cdev))
+               rc = qed_slowpath_setup_int(cdev, params->int_mode);
+       else
+               rc = qed_slowpath_vf_setup_int(cdev);
+       if (rc)
+               goto err1;
+
+       if (IS_PF(cdev)) {
+               /* Allocate stream for unzipping */
+               rc = qed_alloc_stream_mem(cdev);
+               if (rc) {
+                       DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+                       goto err2;
+               }
+
+               /* First Dword used to diffrentiate between various sources */
+               data = cdev->firmware->data + sizeof(u32);
+       }
+
+       memset(&tunn_info, 0, sizeof(tunn_info));
+       tunn_info.tunn_mode |=  1 << QED_MODE_VXLAN_TUNN |
+                               1 << QED_MODE_L2GRE_TUNN |
+                               1 << QED_MODE_IPGRE_TUNN |
+                               1 << QED_MODE_L2GENEVE_TUNN |
+                               1 << QED_MODE_IPGENEVE_TUNN;
+
+       tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+       tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+       tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+
+       /* Start the slowpath */
+       rc = qed_hw_init(cdev, &tunn_info, true,
+                        cdev->int_params.out.int_mode,
+                        true, data);
+       if (rc)
+               goto err2;
+
+       DP_INFO(cdev,
+               "HW initialization and function start completed successfully\n");
+
+       if (IS_PF(cdev)) {
+               hwfn = QED_LEADING_HWFN(cdev);
+               drv_version.version = (params->drv_major << 24) |
+                                     (params->drv_minor << 16) |
+                                     (params->drv_rev << 8) |
+                                     (params->drv_eng);
+               strlcpy(drv_version.name, params->name,
+                       MCP_DRV_VER_STR_SIZE - 4);
+               rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+                                             &drv_version);
+               if (rc) {
+                       DP_NOTICE(cdev, "Failed sending drv version command\n");
+                       return rc;
+               }
+       }
+
+       qed_reset_vport_stats(cdev);
+
+       return 0;
+
+err2:
+       qed_hw_timers_stop_all(cdev);
+       if (IS_PF(cdev))
+               qed_slowpath_irq_free(cdev);
+       qed_free_stream_mem(cdev);
+       qed_disable_msix(cdev);
+err1:
+       qed_resc_free(cdev);
+err:
+       if (IS_PF(cdev))
+               release_firmware(cdev->firmware);
+
+       qed_iov_wq_stop(cdev, false);
+
+       return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+       if (!cdev)
+               return -ENODEV;
+
+       if (IS_PF(cdev)) {
+               qed_free_stream_mem(cdev);
+               if (IS_QED_ETH_IF(cdev))
+                       qed_sriov_disable(cdev, true);
+
+               qed_nic_stop(cdev);
+               qed_slowpath_irq_free(cdev);
+       }
+
+       qed_disable_msix(cdev);
+       qed_nic_reset(cdev);
+
+       qed_iov_wq_stop(cdev, true);
+
+       if (IS_PF(cdev))
+               release_firmware(cdev->firmware);
+
+       return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+                      char ver_str[VER_SIZE])
+{
+       int i;
+
+       memcpy(cdev->name, name, NAME_SIZE);
+       for_each_hwfn(cdev, i)
+               snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+       memcpy(cdev->ver_str, ver_str, VER_SIZE);
+       cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+                      struct qed_sb_info *sb_info,
+                      void *sb_virt_addr,
+                      dma_addr_t sb_phy_addr, u16 sb_id,
+                      enum qed_sb_type type)
+{
+       struct qed_hwfn *p_hwfn;
+       int hwfn_index;
+       u16 rel_sb_id;
+       u8 n_hwfns;
+       u32 rc;
+
+       /* RoCE uses single engine and CMT uses two engines. When using both
+        * we force only a single engine. Storage uses only engine 0 too.
+        */
+       if (type == QED_SB_TYPE_L2_QUEUE)
+               n_hwfns = cdev->num_hwfns;
+       else
+               n_hwfns = 1;
+
+       hwfn_index = sb_id % n_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+       rel_sb_id = sb_id / n_hwfns;
+
+       DP_VERBOSE(cdev, NETIF_MSG_INTR,
+                  "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+                  hwfn_index, rel_sb_id, sb_id);
+
+       rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+                            sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+       return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+                         struct qed_sb_info *sb_info,
+                         u16 sb_id)
+{
+       struct qed_hwfn *p_hwfn;
+       int hwfn_index;
+       u16 rel_sb_id;
+       u32 rc;
+
+       hwfn_index = sb_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+       rel_sb_id = sb_id / cdev->num_hwfns;
+
+       DP_VERBOSE(cdev, NETIF_MSG_INTR,
+                  "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+                  hwfn_index, rel_sb_id, sb_id);
+
+       rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+       return rc;
+}
+
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+       return true;
+}
+
+static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
+{
+       struct qed_hwfn *hwfn;
+       struct qed_mcp_link_params *link_params;
+       struct qed_ptt *ptt;
+       int rc;
+
+       if (!cdev)
+               return -ENODEV;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       /* The link should be set only once per PF */
+       hwfn = &cdev->hwfns[0];
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EBUSY;
+
+       link_params = qed_mcp_get_link_params(hwfn);
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+               link_params->speed.autoneg = params->autoneg;
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+               link_params->speed.advertised_speeds = 0;
+               if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+                   (params->adv_speeds & SUPPORTED_1000baseT_Full))
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+               if (params->adv_speeds & 0)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+               if (params->adv_speeds & 0)
+                       link_params->speed.advertised_speeds |=
+                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
+       }
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+               link_params->speed.forced_speed = params->forced_speed;
+       if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+               if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+                       link_params->pause.autoneg = true;
+               else
+                       link_params->pause.autoneg = false;
+               if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+                       link_params->pause.forced_rx = true;
+               else
+                       link_params->pause.forced_rx = false;
+               if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+                       link_params->pause.forced_tx = true;
+               else
+                       link_params->pause.forced_tx = false;
+       }
+       if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+               switch (params->loopback_mode) {
+               case QED_LINK_LOOPBACK_INT_PHY:
+                       link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
+                       break;
+               case QED_LINK_LOOPBACK_EXT_PHY:
+                       link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
+                       break;
+               case QED_LINK_LOOPBACK_EXT:
+                       link_params->loopback_mode = ETH_LOOPBACK_EXT;
+                       break;
+               case QED_LINK_LOOPBACK_MAC:
+                       link_params->loopback_mode = ETH_LOOPBACK_MAC;
+                       break;
+               default:
+                       link_params->loopback_mode = ETH_LOOPBACK_NONE;
+                       break;
+               }
+       }
+
+       rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+       int port_type;
+
+       switch (media_type) {
+       case MEDIA_SFPP_10G_FIBER:
+       case MEDIA_SFP_1G_FIBER:
+       case MEDIA_XFP_FIBER:
+       case MEDIA_MODULE_FIBER:
+       case MEDIA_KR:
+               port_type = PORT_FIBRE;
+               break;
+       case MEDIA_DA_TWINAX:
+               port_type = PORT_DA;
+               break;
+       case MEDIA_BASE_T:
+               port_type = PORT_TP;
+               break;
+       case MEDIA_NOT_PRESENT:
+               port_type = PORT_NONE;
+               break;
+       case MEDIA_UNSPECIFIED:
+       default:
+               port_type = PORT_OTHER;
+               break;
+       }
+       return port_type;
+}
+
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *link_caps)
+{
+       void *p;
+
+       if (!IS_PF(hwfn->cdev)) {
+               qed_vf_get_link_params(hwfn, params);
+               qed_vf_get_link_state(hwfn, link);
+               qed_vf_get_link_caps(hwfn, link_caps);
+
+               return 0;
+       }
+
+       p = qed_mcp_get_link_params(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(params, p, sizeof(*params));
+
+       p = qed_mcp_get_link_state(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link, p, sizeof(*link));
+
+       p = qed_mcp_get_link_capabilities(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link_caps, p, sizeof(*link_caps));
+
+       return 0;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+                         struct qed_link_output *if_link)
+{
+       struct qed_mcp_link_params params;
+       struct qed_mcp_link_state link;
+       struct qed_mcp_link_capabilities link_caps;
+       u32 media_type;
+
+       memset(if_link, 0, sizeof(*if_link));
+
+       /* Prepare source inputs */
+       if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+               dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+               return;
+       }
+
+       /* Set the link parameters to pass to protocol driver */
+       if (link.link_up)
+               if_link->link_up = true;
+
+       /* TODO - at the moment assume supported and advertised speed equal */
+       if_link->supported_caps = SUPPORTED_FIBRE;
+       if (params.speed.autoneg)
+               if_link->supported_caps |= SUPPORTED_Autoneg;
+       if (params.pause.autoneg ||
+           (params.pause.forced_rx && params.pause.forced_tx))
+               if_link->supported_caps |= SUPPORTED_Asym_Pause;
+       if (params.pause.autoneg || params.pause.forced_rx ||
+           params.pause.forced_tx)
+               if_link->supported_caps |= SUPPORTED_Pause;
+
+       if_link->advertised_caps = if_link->supported_caps;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+               if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+                                          SUPPORTED_1000baseT_Full;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+               if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+       if (params.speed.advertised_speeds &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+               if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+       if (params.speed.advertised_speeds &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+               if_link->advertised_caps |= 0;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+               if_link->advertised_caps |= 0;
+
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+               if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+                                          SUPPORTED_1000baseT_Full;
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+               if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+       if (link_caps.speed_capabilities &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+               if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+       if (link_caps.speed_capabilities &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+               if_link->supported_caps |= 0;
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+               if_link->supported_caps |= 0;
+
+       if (link.link_up)
+               if_link->speed = link.speed;
+
+       /* TODO - fill duplex properly */
+       if_link->duplex = DUPLEX_FULL;
+       qed_mcp_get_media_type(hwfn->cdev, &media_type);
+       if_link->port = qed_get_port_type(media_type);
+
+       if_link->autoneg = params.speed.autoneg;
+
+       if (params.pause.autoneg)
+               if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+       if (params.pause.forced_rx)
+               if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+       if (params.pause.forced_tx)
+               if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+       /* Link partner capabilities */
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_1G_HD)
+               if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_1G_FD)
+               if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_10G)
+               if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_40G)
+               if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_50G)
+               if_link->lp_caps |= 0;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_100G)
+               if_link->lp_caps |= 0;
+
+       if (link.an_complete)
+               if_link->lp_caps |= SUPPORTED_Autoneg;
+
+       if (link.partner_adv_pause)
+               if_link->lp_caps |= SUPPORTED_Pause;
+       if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+           link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+               if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+                                struct qed_link_output *if_link)
+{
+       int i;
+
+       qed_fill_link(&cdev->hwfns[0], if_link);
+
+       for_each_hwfn(cdev, i)
+               qed_inform_vf_link_state(&cdev->hwfns[i]);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+       void *cookie = hwfn->cdev->ops_cookie;
+       struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+       struct qed_link_output if_link;
+
+       qed_fill_link(hwfn, &if_link);
+       qed_inform_vf_link_state(hwfn);
+
+       if (IS_LEAD_HWFN(hwfn) && cookie)
+               op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
+       int i, rc;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       for_each_hwfn(cdev, i) {
+               hwfn = &cdev->hwfns[i];
+               ptt = qed_ptt_acquire(hwfn);
+               if (!ptt) {
+                       DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_drain(hwfn, ptt);
+               if (rc)
+                       return rc;
+               qed_ptt_release(hwfn, ptt);
+       }
+
+       return 0;
+}
+
+static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
+{
+       *rx_coal = cdev->rx_coalesce_usecs;
+       *tx_coal = cdev->tx_coalesce_usecs;
+}
+
+static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+                           u8 qid, u16 sb_id)
+{
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
+       int hwfn_index;
+       int status = 0;
+
+       hwfn_index = qid % cdev->num_hwfns;
+       hwfn = &cdev->hwfns[hwfn_index];
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
+                                     qid / cdev->num_hwfns, sb_id);
+       if (status)
+               goto out;
+       status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
+                                     qid / cdev->num_hwfns, sb_id);
+out:
+       qed_ptt_release(hwfn, ptt);
+
+       return status;
+}
+
+static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *ptt;
+       int status = 0;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       status = qed_mcp_set_led(hwfn, ptt, mode);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return status;
+}
+
+struct qed_selftest_ops qed_selftest_ops_pass = {
+       .selftest_memory = &qed_selftest_memory,
+       .selftest_interrupt = &qed_selftest_interrupt,
+       .selftest_register = &qed_selftest_register,
+       .selftest_clock = &qed_selftest_clock,
+};
+
+const struct qed_common_ops qed_common_ops_pass = {
+       .selftest = &qed_selftest_ops_pass,
+       .probe = &qed_probe,
+       .remove = &qed_remove,
+       .set_power_state = &qed_set_power_state,
+       .set_id = &qed_set_id,
+       .update_pf_params = &qed_update_pf_params,
+       .slowpath_start = &qed_slowpath_start,
+       .slowpath_stop = &qed_slowpath_stop,
+       .set_fp_int = &qed_set_int_fp,
+       .get_fp_int = &qed_get_int_fp,
+       .sb_init = &qed_sb_init,
+       .sb_release = &qed_sb_release,
+       .simd_handler_config = &qed_simd_handler_config,
+       .simd_handler_clean = &qed_simd_handler_clean,
+       .can_link_change = &qed_can_link_change,
+       .set_link = &qed_set_link,
+       .get_link = &qed_get_current_link,
+       .drain = &qed_drain,
+       .update_msglvl = &qed_init_dp,
+       .chain_alloc = &qed_chain_alloc,
+       .chain_free = &qed_chain_free,
+       .get_coalesce = &qed_get_coalesce,
+       .set_coalesce = &qed_set_coalesce,
+       .set_led = &qed_set_led,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644 (file)
index 0000000..f776a77
--- /dev/null
@@ -0,0 +1,1238 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000)    /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES  (50 * 1000)     /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)          \
+       qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+              _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+       qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
+       DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+                    offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field)        \
+       DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+                    offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+                 DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+               return false;
+       return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PORT);
+       u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+       p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+                                                  MFW_PORT(p_hwfn));
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "port_addr = 0x%x, port_id 0x%02x\n",
+                  p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+       u32 tmp, i;
+
+       if (!p_hwfn->mcp_info->public_base)
+               return;
+
+       for (i = 0; i < length; i++) {
+               tmp = qed_rd(p_hwfn, p_ptt,
+                            p_hwfn->mcp_info->mfw_mb_addr +
+                            (i << 2) + sizeof(u32));
+
+               /* The MB data is actually BE; Need to force it to cpu */
+               ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+                       be32_to_cpu((__force __be32)tmp);
+       }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->mcp_info) {
+               kfree(p_hwfn->mcp_info->mfw_mb_cur);
+               kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+       }
+       kfree(p_hwfn->mcp_info);
+
+       return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+       u32 drv_mb_offsize, mfw_mb_offsize;
+       u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+       p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+       if (!p_info->public_base)
+               return 0;
+
+       p_info->public_base |= GRCBASE_MCP;
+
+       /* Calculate the driver and MFW mailbox address */
+       drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_DRV_MB));
+       p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+                  drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+       /* Set the MFW MB address */
+       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+       /* Get the current driver mailbox sequence before sending
+        * the first command
+        */
+       p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+                            DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Get current FW pulse sequence */
+       p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+                               DRV_PULSE_SEQ_MASK;
+
+       p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+       return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *p_info;
+       u32 size;
+
+       /* Allocate mcp_info structure */
+       p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
+       if (!p_hwfn->mcp_info)
+               goto err;
+       p_info = p_hwfn->mcp_info;
+
+       if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+               DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+               /* Do not free mcp_info here, since public_base indicate that
+                * the MCP is not initialized
+                */
+               return 0;
+       }
+
+       size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+       p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
+       p_info->mfw_mb_shadow =
+               kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+                               p_info->mfw_mb_length), GFP_KERNEL);
+       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+               goto err;
+
+       /* Initialize the MFW spinlock */
+       spin_lock_init(&p_info->lock);
+
+       return 0;
+
+err:
+       DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+       qed_mcp_free(p_hwfn);
+       return -ENOMEM;
+}
+
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
+                          u32 cmd)
+{
+       spin_lock_bh(&p_hwfn->mcp_info->lock);
+
+       /* The spinlock shouldn't be acquired when the mailbox command is
+        * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+        * pending [UN]LOAD_REQ command of another PF together with a spinlock
+        * (i.e. interrupts are disabled) - can lead to a deadlock.
+        * It is assumed that for a single PF, no other mailbox commands can be
+        * sent from another context while sending LOAD_REQ, and that any
+        * parallel commands to UNLOAD_REQ can be cancelled.
+        */
+       if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+               p_hwfn->mcp_info->block_mb_sending = false;
+
+       if (p_hwfn->mcp_info->block_mb_sending) {
+               DP_NOTICE(p_hwfn,
+                         "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
+                         cmd);
+               spin_unlock_bh(&p_hwfn->mcp_info->lock);
+               return -EBUSY;
+       }
+
+       if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+               p_hwfn->mcp_info->block_mb_sending = true;
+               spin_unlock_bh(&p_hwfn->mcp_info->lock);
+       }
+
+       return 0;
+}
+
+static void qed_mcp_mb_unlock(struct qed_hwfn  *p_hwfn,
+                             u32               cmd)
+{
+       if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+               spin_unlock_bh(&p_hwfn->mcp_info->lock);
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       u8 delay = CHIP_MCP_RESP_ITER_US;
+       u32 org_mcp_reset_seq, cnt = 0;
+       int rc = 0;
+
+       /* Ensure that only a single thread is accessing the mailbox at a
+        * certain time.
+        */
+       rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+       if (rc != 0)
+               return rc;
+
+       /* Set drv command along with the updated sequence */
+       org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+                 (DRV_MSG_CODE_MCP_RESET | seq));
+
+       do {
+               /* Wait for MFW response */
+               udelay(delay);
+               /* Give the FW up to 500 second (50*1000*10usec) */
+       } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+                                             MISCS_REG_GENERIC_POR_0)) &&
+                (cnt++ < QED_MCP_RESET_RETRIES));
+
+       if (org_mcp_reset_seq !=
+           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "MCP was reset after %d usec\n", cnt * delay);
+       } else {
+               DP_ERR(p_hwfn, "Failed to reset MCP\n");
+               rc = -EAGAIN;
+       }
+
+       qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+
+       return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         u32 cmd,
+                         u32 param,
+                         u32 *o_mcp_resp,
+                         u32 *o_mcp_param)
+{
+       u8 delay = CHIP_MCP_RESP_ITER_US;
+       u32 seq, cnt = 1, actual_mb_seq;
+       int rc = 0;
+
+       /* Get actual driver mailbox sequence */
+       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+                       DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Use MCP history register to check if MCP reset occurred between
+        * init time and now.
+        */
+       if (p_hwfn->mcp_info->mcp_hist !=
+           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+               qed_load_mcp_offsets(p_hwfn, p_ptt);
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+       /* Set drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+       /* Set drv command along with the updated sequence */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "wrote command (%x) to MFW MB param 0x%08x\n",
+                  (cmd | seq), param);
+
+       do {
+               /* Wait for MFW response */
+               udelay(delay);
+               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+               /* Give the FW up to 5 second (500*10ms) */
+       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+                (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+                  cnt * delay, *o_mcp_resp, seq);
+
+       /* Is this a reply to our command? */
+       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+               *o_mcp_resp &= FW_MSG_CODE_MASK;
+               /* Get the MCP param */
+               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+       } else {
+               /* FW BUG! */
+               DP_ERR(p_hwfn, "MFW failed to respond!\n");
+               *o_mcp_resp = 0;
+               rc = -EAGAIN;
+       }
+       return rc;
+}
+
+static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                struct qed_mcp_mb_params *p_mb_params)
+{
+       u32 union_data_addr;
+       int rc;
+
+       /* MCP not initialized */
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                         offsetof(struct public_drv_mb, union_data);
+
+       /* Ensure that only a single thread is accessing the mailbox at a
+        * certain time.
+        */
+       rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+       if (rc)
+               return rc;
+
+       if (p_mb_params->p_data_src != NULL)
+               qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
+                             p_mb_params->p_data_src,
+                             sizeof(*p_mb_params->p_data_src));
+
+       rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+                           p_mb_params->param, &p_mb_params->mcp_resp,
+                           &p_mb_params->mcp_param);
+
+       if (p_mb_params->p_data_dst != NULL)
+               qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+                               union_data_addr,
+                               sizeof(*p_mb_params->p_data_dst));
+
+       qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+
+       return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               u32 cmd,
+               u32 param,
+               u32 *o_mcp_resp,
+               u32 *o_mcp_param)
+{
+       struct qed_mcp_mb_params mb_params;
+       int rc;
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = cmd;
+       mb_params.param = param;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc)
+               return rc;
+
+       *o_mcp_resp = mb_params.mcp_resp;
+       *o_mcp_param = mb_params.mcp_param;
+
+       return 0;
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 *p_load_code)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       struct qed_mcp_mb_params mb_params;
+       union drv_union_data union_data;
+       int rc;
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       /* Load Request */
+       mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+       mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+                         cdev->drv_type;
+       memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
+       mb_params.p_data_src = &union_data;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+       /* if mcp fails to respond we must abort */
+       if (rc) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       *p_load_code = mb_params.mcp_resp;
+
+       /* If MFW refused (e.g. other port is in diagnostic mode) we
+        * must abort. This can happen in the following cases:
+        * - Other port is in diagnostic mode
+        * - Previously loaded function on the engine is not compliant with
+        *   the requester.
+        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+        *      -
+        */
+       if (!(*p_load_code) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PATH);
+       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+                                    QED_PATH_ID(p_hwfn));
+       u32 disabled_vfs[VF_MAX_STATIC / 32];
+       int i;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+                  mfw_path_offsize, path_addr);
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+               disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+                                        path_addr +
+                                        offsetof(struct public_path,
+                                                 mcp_vf_disabled) +
+                                        sizeof(u32) * i);
+               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+                          "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+       }
+
+       if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+               qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+                                    MCP_PF_ID(p_hwfn));
+       struct qed_mcp_mb_params mb_params;
+       union drv_union_data union_data;
+       int rc;
+       int i;
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+                          "Acking VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+       memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+       mb_params.p_data_src = &union_data;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+               return -EBUSY;
+       }
+
+       /* Clear the ACK bits */
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               qed_wr(p_hwfn, p_ptt,
+                      func_addr +
+                      offsetof(struct public_func, drv_ack_vf_disabled) +
+                      i * sizeof(u32), 0);
+
+       return rc;
+}
+
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt)
+{
+       u32 transceiver_state;
+
+       transceiver_state = qed_rd(p_hwfn, p_ptt,
+                                  p_hwfn->mcp_info->port_addr +
+                                  offsetof(struct public_port,
+                                           transceiver_data));
+
+       DP_VERBOSE(p_hwfn,
+                  (NETIF_MSG_HW | QED_MSG_SP),
+                  "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+                  transceiver_state,
+                  (u32)(p_hwfn->mcp_info->port_addr +
+                        offsetof(struct public_port,
+                                 transceiver_data)));
+
+       transceiver_state = GET_FIELD(transceiver_state,
+                                     ETH_TRANSCEIVER_STATE);
+
+       if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+               DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+       else
+               DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      bool b_reset)
+{
+       struct qed_mcp_link_state *p_link;
+       u8 max_bw, min_bw;
+       u32 status = 0;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+       memset(p_link, 0, sizeof(*p_link));
+       if (!b_reset) {
+               status = qed_rd(p_hwfn, p_ptt,
+                               p_hwfn->mcp_info->port_addr +
+                               offsetof(struct public_port, link_status));
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+                          "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+                          status,
+                          (u32)(p_hwfn->mcp_info->port_addr +
+                                offsetof(struct public_port,
+                                         link_status)));
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Resetting link indications\n");
+               return;
+       }
+
+       if (p_hwfn->b_drv_link_init)
+               p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+       else
+               p_link->link_up = false;
+
+       p_link->full_duplex = true;
+       switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+       case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+               p_link->speed = 100000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+               p_link->speed = 50000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+               p_link->speed = 40000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+               p_link->speed = 25000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+               p_link->speed = 20000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+               p_link->speed = 10000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+               p_link->full_duplex = false;
+       /* Fall-through */
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+               p_link->speed = 1000;
+               break;
+       default:
+               p_link->speed = 0;
+       }
+
+       if (p_link->link_up && p_link->speed)
+               p_link->line_speed = p_link->speed;
+       else
+               p_link->line_speed = 0;
+
+       max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+       min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+       /* Max bandwidth configuration */
+       __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+       /* Min bandwidth configuration */
+       __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+       qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
+
+       p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+       p_link->an_complete = !!(status &
+                                LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+       p_link->parallel_detection = !!(status &
+                                       LINK_STATUS_PARALLEL_DETECTION_USED);
+       p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_1G_FD : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_1G_HD : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_10G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_20G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_40G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_50G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_100G : 0;
+
+       p_link->partner_tx_flow_ctrl_en =
+               !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+       p_link->partner_rx_flow_ctrl_en =
+               !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+       switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+       case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+               break;
+       default:
+               p_link->partner_adv_pause = 0;
+       }
+
+       p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+       qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
+{
+       struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+       struct qed_mcp_mb_params mb_params;
+       union drv_union_data union_data;
+       struct eth_phy_cfg *phy_cfg;
+       int rc = 0;
+       u32 cmd;
+
+       /* Set the shmem configuration according to params */
+       phy_cfg = &union_data.drv_phy_cfg;
+       memset(phy_cfg, 0, sizeof(*phy_cfg));
+       cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+       if (!params->speed.autoneg)
+               phy_cfg->speed = params->speed.forced_speed;
+       phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+       phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+       phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+       phy_cfg->adv_speed = params->speed.advertised_speeds;
+       phy_cfg->loopback_mode = params->loopback_mode;
+
+       p_hwfn->b_drv_link_init = b_up;
+
+       if (b_up) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+                          phy_cfg->speed,
+                          phy_cfg->pause,
+                          phy_cfg->adv_speed,
+                          phy_cfg->loopback_mode,
+                          phy_cfg->feature_config_flags);
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Resetting link\n");
+       }
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = cmd;
+       mb_params.p_data_src = &union_data;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+       /* if mcp fails to respond we must abort */
+       if (rc) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* Reset the link status if needed */
+       if (!b_up)
+               qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+       return 0;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+                                 struct public_func *p_shmem_info)
+{
+       struct qed_mcp_function_info *p_info;
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       p_info->bandwidth_min = (p_shmem_info->config &
+                                FUNC_MF_CFG_MIN_BW_MASK) >>
+                                       FUNC_MF_CFG_MIN_BW_SHIFT;
+       if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+                       p_info->bandwidth_min);
+               p_info->bandwidth_min = 1;
+       }
+
+       p_info->bandwidth_max = (p_shmem_info->config &
+                                FUNC_MF_CFG_MAX_BW_MASK) >>
+                                       FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+                       p_info->bandwidth_max);
+               p_info->bandwidth_max = 100;
+       }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct public_func *p_data,
+                                 int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       memset(p_data, 0, sizeof(*p_data));
+
+       size = min_t(u32, sizeof(*p_data),
+                    QED_SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+                                           func_addr + (i << 2));
+       return size;
+}
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u8 *p_pf)
+{
+       struct public_func shmem_info;
+       int i;
+
+       /* Find first Ethernet interface in port */
+       for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
+            i += p_hwfn->cdev->num_ports_in_engines) {
+               qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                                      MCP_PF_ID_BY_REL(p_hwfn, i));
+
+               if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+                       continue;
+
+               if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
+                   FUNC_MF_CFG_PROTOCOL_ETHERNET) {
+                       *p_pf = (u8)i;
+                       return 0;
+               }
+       }
+
+       DP_NOTICE(p_hwfn,
+                 "Failed to find on port an ethernet interface in MF_SI mode\n");
+
+       return -EINVAL;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_function_info *p_info;
+       struct public_func shmem_info;
+       u32 resp = 0, param = 0;
+
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                              MCP_PF_ID(p_hwfn));
+
+       qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+       qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+       /* Acknowledge the MFW */
+       qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+                   &param);
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *info = p_hwfn->mcp_info;
+       int rc = 0;
+       bool found = false;
+       u16 i;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+       /* Read Messages from MFW */
+       qed_mcp_read_mb(p_hwfn, p_ptt);
+
+       /* Compare current messages to old ones */
+       for (i = 0; i < info->mfw_mb_length; i++) {
+               if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+                       continue;
+
+               found = true;
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+                          i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+               switch (i) {
+               case MFW_DRV_MSG_LINK_CHANGE:
+                       qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+                       break;
+               case MFW_DRV_MSG_VF_DISABLED:
+                       qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+                       qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                 QED_DCBX_REMOTE_LLDP_MIB);
+                       break;
+               case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+                       qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                 QED_DCBX_REMOTE_MIB);
+                       break;
+               case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+                       qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                 QED_DCBX_OPERATIONAL_MIB);
+                       break;
+               case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+                       qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_BW_UPDATE:
+                       qed_mcp_update_bw(p_hwfn, p_ptt);
+                       break;
+               default:
+                       DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+                       rc = -EINVAL;
+               }
+       }
+
+       /* ACK everything */
+       for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+               __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+               /* MFW expect answer in BE, so we force write in that format */
+               qed_wr(p_hwfn, p_ptt,
+                      info->mfw_mb_addr + sizeof(u32) +
+                      MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+                      sizeof(u32) + i * sizeof(u32),
+                      (__force u32)val);
+       }
+
+       if (!found) {
+               DP_NOTICE(p_hwfn,
+                         "Received an MFW message indication but no new message!\n");
+               rc = -EINVAL;
+       }
+
+       /* Copy the new mfw messages into the shadow */
+       memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+       return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       u32 *p_mfw_ver, u32 *p_running_bundle_id)
+{
+       u32 global_offsize;
+
+       if (IS_VF(p_hwfn->cdev)) {
+               if (p_hwfn->vf_iov_info) {
+                       struct pfvf_acquire_resp_tlv *p_resp;
+
+                       p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+                       *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+                       return 0;
+               } else {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF requested MFW version prior to ACQUIRE\n");
+                       return -EINVAL;
+               }
+       }
+
+       global_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_hwfn->
+                                                    mcp_info->public_base,
+                                                    PUBLIC_GLOBAL));
+       *p_mfw_ver =
+           qed_rd(p_hwfn, p_ptt,
+                  SECTION_ADDR(global_offsize,
+                               0) + offsetof(struct public_global, mfw_ver));
+
+       if (p_running_bundle_id != NULL) {
+               *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+                                             SECTION_ADDR(global_offsize, 0) +
+                                             offsetof(struct public_global,
+                                                      running_bundle_id));
+       }
+
+       return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+                          u32 *p_media_type)
+{
+       struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+       struct qed_ptt  *p_ptt;
+
+       if (IS_VF(cdev))
+               return -EINVAL;
+
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       *p_media_type = MEDIA_UNSPECIFIED;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EBUSY;
+
+       *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+                              offsetof(struct public_port, media_type));
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+                       struct public_func *p_info,
+                       enum qed_pci_personality *p_proto)
+{
+       int rc = 0;
+
+       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+               if (test_bit(QED_DEV_CAP_ROCE,
+                            &p_hwfn->hw_info.device_capabilities))
+                       *p_proto = QED_PCI_ETH_ROCE;
+               else
+                       *p_proto = QED_PCI_ETH;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ISCSI:
+               *p_proto = QED_PCI_ISCSI;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ROCE:
+               DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+               rc = -EINVAL;
+               break;
+       default:
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_function_info *info;
+       struct public_func shmem_info;
+
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                              MCP_PF_ID(p_hwfn));
+       info = &p_hwfn->mcp_info->func_info;
+
+       info->pause_on_host = (shmem_info.config &
+                              FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+       if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+                                   &info->protocol)) {
+               DP_ERR(p_hwfn, "Unknown personality %08x\n",
+                      (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+               return -EINVAL;
+       }
+
+       qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       if (shmem_info.mac_upper || shmem_info.mac_lower) {
+               info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+               info->mac[1] = (u8)(shmem_info.mac_upper);
+               info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+               info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+               info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+               info->mac[5] = (u8)(shmem_info.mac_lower);
+       } else {
+               DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+       }
+
+       info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+                        (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+       info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+                        (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+       info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+       DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+                  "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+               info->pause_on_host, info->protocol,
+               info->bandwidth_min, info->bandwidth_max,
+               info->mac[0], info->mac[1], info->mac[2],
+               info->mac[3], info->mac[4], info->mac[5],
+               info->wwn_port, info->wwn_node, info->ovlan);
+
+       return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       u32 resp = 0, param = 0;
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt,
+                        DRV_MSG_CODE_NIG_DRAIN, 1000,
+                        &resp, &param);
+
+       /* Wait for the drain to complete before returning */
+       msleep(1020);
+
+       return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          u32 *p_flash_size)
+{
+       u32 flash_size;
+
+       if (IS_VF(p_hwfn->cdev))
+               return -EINVAL;
+
+       flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+       flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+                     MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+       flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+       *p_flash_size = flash_size;
+
+       return 0;
+}
+
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+       u32 resp = 0, param = 0, rc_param = 0;
+       int rc;
+
+       /* Only Leader can configure MSIX, and need to take CMT into account */
+       if (!IS_LEAD_HWFN(p_hwfn))
+               return 0;
+       num *= p_hwfn->cdev->num_hwfns;
+
+       param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+                DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+       param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+                DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+                        &resp, &rc_param);
+
+       if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+               DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+               rc = -EINVAL;
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+                          num, vf_id);
+       }
+
+       return rc;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        struct qed_mcp_drv_version *p_ver)
+{
+       struct drv_version_stc *p_drv_version;
+       struct qed_mcp_mb_params mb_params;
+       union drv_union_data union_data;
+       __be32 val;
+       u32 i;
+       int rc;
+
+       p_drv_version = &union_data.drv_version;
+       p_drv_version->version = p_ver->version;
+
+       for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+               val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
+               *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+       }
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+       mb_params.p_data_src = &union_data;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                   enum qed_led_mode mode)
+{
+       u32 resp = 0, param = 0, drv_mb_param;
+       int rc;
+
+       switch (mode) {
+       case QED_LED_MODE_ON:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+               break;
+       case QED_LED_MODE_OFF:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+               break;
+       case QED_LED_MODE_RESTORE:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
+               return -EINVAL;
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+                        drv_mb_param, &resp, &param);
+
+       return rc;
+}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 drv_mb_param = 0, rsp, param;
+       int rc = 0;
+
+       drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+                       DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+                        drv_mb_param, &rsp, &param);
+
+       if (rc)
+               return rc;
+
+       if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+           (param != DRV_MB_PARAM_BIST_RC_PASSED))
+               rc = -EAGAIN;
+
+       return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 drv_mb_param, rsp, param;
+       int rc = 0;
+
+       drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+                       DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+                        drv_mb_param, &rsp, &param);
+
+       if (rc)
+               return rc;
+
+       if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+           (param != DRV_MB_PARAM_BIST_RC_PASSED))
+               rc = -EAGAIN;
+
+       return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644 (file)
index 0000000..7f319aa
--- /dev/null
@@ -0,0 +1,463 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+       bool    autoneg;
+       u32     advertised_speeds;      /* bitmask of DRV_SPEED_CAPABILITY */
+       u32     forced_speed;      /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+       bool    autoneg;
+       bool    forced_rx;
+       bool    forced_tx;
+};
+
+struct qed_mcp_link_params {
+       struct qed_mcp_link_speed_params        speed;
+       struct qed_mcp_link_pause_params        pause;
+       u32                                  loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+       u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+       bool    link_up;
+
+       u32     min_pf_rate;
+
+       /* Actual link speed in Mb/s */
+       u32     line_speed;
+
+       /* PF max speed in Mb/s, deduced from line_speed
+        * according to PF max bandwidth configuration.
+        */
+       u32     speed;
+       bool    full_duplex;
+
+       bool    an;
+       bool    an_complete;
+       bool    parallel_detection;
+       bool    pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD    BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD    BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G      BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G      BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G      BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G      BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G     BIT(6)
+       u32     partner_adv_speed;
+
+       bool    partner_tx_flow_ctrl_en;
+       bool    partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+       u8      partner_adv_pause;
+
+       bool    sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+       u8                              pause_on_host;
+
+       enum qed_pci_personality        protocol;
+
+       u8                              bandwidth_min;
+       u8                              bandwidth_max;
+
+       u8                              mac[ETH_ALEN];
+
+       u64                             wwn_port;
+       u64                             wwn_node;
+
+#define QED_MCP_VLAN_UNSET              (0xffff)
+       u16                             ovlan;
+};
+
+struct qed_mcp_nvm_common {
+       u32     offset;
+       u32     param;
+       u32     resp;
+       u32     cmd;
+};
+
+struct qed_mcp_drv_version {
+       u32     version;
+       u8      name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+       *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn   *p_hwfn,
+                    struct qed_ptt     *p_ptt,
+                    bool               b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver    - mfw version value
+ * @param p_running_bundle_id  - image id in nvram; Optional.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       u32 *p_mfw_ver, u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev      - qed dev pointer
+ * @param mfw_ver    - media type value
+ *
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev      *cdev,
+                          u32                  *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ *        mailbox. It acquire mutex lock for the entire
+ *        operation, from sending the request until the MCP
+ *        response. Waiting for MCP response will be checked up
+ *        to 5 seconds every 5ms.
+ *
+ * @param p_hwfn     - hw function
+ * @param p_ptt      - PTT required for register access
+ * @param cmd        - command to be sent to the MCP.
+ * @param param      - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ *                     response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               u32 cmd,
+               u32 param,
+               u32 *o_mcp_resp,
+               u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ *          (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size  - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn     *p_hwfn,
+                          struct qed_ptt       *p_ptt,
+                          u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        struct qed_mcp_drv_version *p_ver);
+
+/**
+ * @brief Set LED status
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mode - LED mode
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   enum qed_led_mode mode);
+
+/**
+ * @brief Bist register test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ?               \
+                                           ((rel_pfid) |                      \
+                                            ((p_hwfn)->abs_pf_id & 1) << 3) : \
+                                           rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %        \
+                                ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+       spinlock_t                              lock;
+       bool                                    block_mb_sending;
+       u32                                     public_base;
+       u32                                     drv_mb_addr;
+       u32                                     mfw_mb_addr;
+       u32                                     port_addr;
+       u16                                     drv_mb_seq;
+       u16                                     drv_pulse_seq;
+       struct qed_mcp_link_params              link_input;
+       struct qed_mcp_link_state               link_output;
+       struct qed_mcp_link_capabilities        link_capabilities;
+       struct qed_mcp_function_info            func_info;
+       u8                                      *mfw_mb_cur;
+       u8                                      *mfw_mb_shadow;
+       u16                                     mfw_mb_length;
+       u16                                     mcp_hist;
+};
+
+struct qed_mcp_mb_params {
+       u32                     cmd;
+       u32                     param;
+       union drv_union_data    *p_data_src;
+       union drv_union_data    *p_data_dst;
+       u32                     mcp_resp;
+       u32                     mcp_param;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ *        succeed, returns whether this PF is the first on the
+ *        chip/engine/port or function. This function should be
+ *        called when driver is ready to accept MFW events after
+ *        Storms initializations are done.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param p_load_code  - The MCP response param containing one
+ *      of the following:
+ *      FW_MSG_CODE_DRV_LOAD_ENGINE
+ *      FW_MSG_CODE_DRV_LOAD_PORT
+ *      FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 min_bw);
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u8 *p_pf);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644 (file)
index 0000000..f6b86ca
--- /dev/null
@@ -0,0 +1,524 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+       0
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE          ( \
+               0xfff << 0)
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+       12
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE           ( \
+               0xfff << 12)
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+       24
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB                  ( \
+               0xff << 24)
+
+#define CDU_REG_SEGMENT0_PARAMS        \
+       0x580904UL
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
+       (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
+       0
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
+       (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
+       16
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
+       (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
+       24
+#define CDU_REG_SEGMENT1_PARAMS        \
+       0x580908UL
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
+       (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
+       0
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
+       (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
+       16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
+       (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
+       24
+
+#define  XSDM_REG_OPERATION_GEN \
+       0xf80408UL
+#define  NIG_REG_RX_BRB_OUT_EN \
+       0x500e18UL
+#define  NIG_REG_STORM_OUT_EN \
+       0x500e08UL
+#define  PSWRQ2_REG_L2P_VALIDATE_VFID \
+       0x240c50UL
+#define  PGLUE_B_REG_USE_CLIENTID_IN_TAG       \
+       0x2aae04UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER       \
+       0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+       0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+       0x2a0800UL
+#define  BAR0_MAP_REG_MSDM_RAM \
+       0x1d00000UL
+#define  BAR0_MAP_REG_USDM_RAM \
+       0x1d80000UL
+#define  BAR0_MAP_REG_PSDM_RAM \
+       0x1f00000UL
+#define  BAR0_MAP_REG_TSDM_RAM \
+       0x1c80000UL
+#define BAR0_MAP_REG_XSDM_RAM \
+       0x1e00000UL
+#define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+       0x5011f4UL
+#define  PRS_REG_SEARCH_TCP \
+       0x1f0400UL
+#define  PRS_REG_SEARCH_UDP \
+       0x1f0404UL
+#define  PRS_REG_SEARCH_FCOE \
+       0x1f0408UL
+#define  PRS_REG_SEARCH_ROCE \
+       0x1f040cUL
+#define  PRS_REG_SEARCH_OPENFLOW       \
+       0x1f0434UL
+#define  TM_REG_PF_ENABLE_CONN \
+       0x2c043cUL
+#define  TM_REG_PF_ENABLE_TASK \
+       0x2c0444UL
+#define  TM_REG_PF_SCAN_ACTIVE_CONN \
+       0x2c04fcUL
+#define  TM_REG_PF_SCAN_ACTIVE_TASK \
+       0x2c0500UL
+#define  IGU_REG_LEADING_EDGE_LATCH \
+       0x18082cUL
+#define  IGU_REG_TRAILING_EDGE_LATCH \
+       0x180830UL
+#define  QM_REG_USG_CNT_PF_TX \
+       0x2f2eacUL
+#define  QM_REG_USG_CNT_PF_OTHER       \
+       0x2f2eb0UL
+#define  DORQ_REG_PF_DB_ENABLE \
+       0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+       0x1009c4UL
+#define  QM_REG_PF_EN \
+       0x2f2ea4UL
+#define  TCFC_REG_STRONG_ENABLE_PF \
+       0x2d0708UL
+#define  CCFC_REG_STRONG_ENABLE_PF \
+       0x2e0708UL
+#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+       0x2aa404UL
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+       0x2aa408UL
+#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+       0x2aa40cUL
+#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+       0x2aa410UL
+#define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+       0x2aa138UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+       0x2aa174UL
+#define  MISC_REG_GEN_PURP_CR0 \
+       0x008c80UL
+#define  MCP_REG_SCRATCH       \
+       0xe20000UL
+#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
+       0x218200UL
+#define  MISCS_REG_CHIP_NUM \
+       0x00976cUL
+#define  MISCS_REG_CHIP_REV \
+       0x009770UL
+#define  MISCS_REG_CMT_ENABLED_FOR_PAIR \
+       0x00971cUL
+#define  MISCS_REG_CHIP_TEST_REG       \
+       0x009778UL
+#define  MISCS_REG_CHIP_METAL \
+       0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+       0x0096f0UL
+#define  BRB_REG_HEADER_SIZE \
+       0x340804UL
+#define  BTB_REG_HEADER_SIZE \
+       0xdb0804UL
+#define  CAU_REG_LONG_TIMEOUT_THRESHOLD \
+       0x1c0708UL
+#define  CCFC_REG_ACTIVITY_COUNTER \
+       0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+       0x2e070cUL
+#define  CDU_REG_CID_ADDR_PARAMS       \
+       0x580900UL
+#define  DBG_REG_CLIENT_ENABLE \
+       0x010004UL
+#define  DMAE_REG_INIT \
+       0x00c000UL
+#define  DORQ_REG_IFEN \
+       0x100040UL
+#define DORQ_REG_DB_DROP_REASON \
+       0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS \
+       0x100a24UL
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \
+       0x100a1cUL
+#define  GRC_REG_TIMEOUT_EN \
+       0x050404UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \
+       0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \
+       0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \
+       0x050050UL
+#define  IGU_REG_BLOCK_CONFIGURATION \
+       0x180040UL
+#define  MCM_REG_INIT \
+       0x1200000UL
+#define  MCP2_REG_DBG_DWORD_ENABLE \
+       0x052404UL
+#define  MISC_REG_PORT_MODE \
+       0x008c00UL
+#define  MISCS_REG_CLK_100G_MODE       \
+       0x009070UL
+#define  MSDM_REG_ENABLE_IN1 \
+       0xfc0004UL
+#define  MSEM_REG_ENABLE_IN \
+       0x1800004UL
+#define  NIG_REG_CM_HDR \
+       0x500840UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
+       0x50196cUL
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
+       0x501964UL
+#define  NCSI_REG_CONFIG       \
+       0x040200UL
+#define  PBF_REG_INIT \
+       0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+       0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+       0xd806ccUL
+#define  PTU_REG_ATC_INIT_ARRAY \
+       0x560000UL
+#define  PCM_REG_INIT \
+       0x1100000UL
+#define  PGLUE_B_REG_ADMIN_PER_PF_REGION       \
+       0x2a9000UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \
+       0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \
+       0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \
+       0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS \
+       0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \
+       0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \
+       0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS \
+       0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \
+       0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \
+       0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \
+       0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \
+       0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \
+       0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \
+       0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \
+       0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \
+       0x2aae7cUL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \
+       0x2aae80UL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
+       0x2aa3bcUL
+#define  PRM_REG_DISABLE_PRM \
+       0x230000UL
+#define  PRS_REG_SOFT_RST \
+       0x1f0000UL
+#define PRS_REG_MSG_INFO \
+       0x1f0a1cUL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF \
+       0x1f0430UL
+#define  PSDM_REG_ENABLE_IN1 \
+       0xfa0004UL
+#define  PSEM_REG_ENABLE_IN \
+       0x1600004UL
+#define  PSWRQ_REG_DBG_SELECT \
+       0x280020UL
+#define  PSWRQ2_REG_CDUT_P_SIZE \
+       0x24000cUL
+#define PSWRQ2_REG_ILT_MEMORY \
+       0x260000UL
+#define  PSWHST_REG_DISCARD_INTERNAL_WRITES \
+       0x2a0040UL
+#define  PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+       0x29e050UL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID \
+       0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \
+       0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA \
+       0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \
+       0x2a006cUL
+#define  PSWRD_REG_DBG_SELECT \
+       0x29c040UL
+#define  PSWRD2_REG_CONF11 \
+       0x29d064UL
+#define  PSWWR_REG_USDM_FULL_TH \
+       0x29a040UL
+#define  PSWWR2_REG_CDU_FULL_TH2       \
+       0x29b040UL
+#define  QM_REG_MAXPQSIZE_0 \
+       0x2f0434UL
+#define  RSS_REG_RSS_INIT_EN \
+       0x238804UL
+#define  RDIF_REG_STOP_ON_ERROR \
+       0x300040UL
+#define  SRC_REG_SOFT_RST \
+       0x23874cUL
+#define  TCFC_REG_ACTIVITY_COUNTER \
+       0x2d8800UL
+#define  TCM_REG_INIT \
+       0x1180000UL
+#define  TM_REG_PXP_READ_DATA_FIFO_INIT \
+       0x2c0014UL
+#define  TSDM_REG_ENABLE_IN1 \
+       0xfb0004UL
+#define  TSEM_REG_ENABLE_IN \
+       0x1700004UL
+#define  TDIF_REG_STOP_ON_ERROR \
+       0x310040UL
+#define  UCM_REG_INIT \
+       0x1280000UL
+#define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+       0x051004UL
+#define  USDM_REG_ENABLE_IN1 \
+       0xfd0004UL
+#define  USEM_REG_ENABLE_IN \
+       0x1900004UL
+#define  XCM_REG_INIT \
+       0x1000000UL
+#define  XSDM_REG_ENABLE_IN1 \
+       0xf80004UL
+#define  XSEM_REG_ENABLE_IN \
+       0x1400004UL
+#define  YCM_REG_INIT \
+       0x1080000UL
+#define  YSDM_REG_ENABLE_IN1 \
+       0xf90004UL
+#define  YSEM_REG_ENABLE_IN \
+       0x1500004UL
+#define  XYLD_REG_SCBD_STRICT_PRIO \
+       0x4c0000UL
+#define  TMLD_REG_SCBD_STRICT_PRIO \
+       0x4d0000UL
+#define  MULD_REG_SCBD_STRICT_PRIO \
+       0x4e0000UL
+#define  YULD_REG_SCBD_STRICT_PRIO \
+       0x4c8000UL
+#define  MISC_REG_SHARED_MEM_ADDR \
+       0x008c20UL
+#define  DMAE_REG_GO_C0 \
+       0x00c048UL
+#define  DMAE_REG_GO_C1 \
+       0x00c04cUL
+#define  DMAE_REG_GO_C2 \
+       0x00c050UL
+#define  DMAE_REG_GO_C3 \
+       0x00c054UL
+#define  DMAE_REG_GO_C4 \
+       0x00c058UL
+#define  DMAE_REG_GO_C5 \
+       0x00c05cUL
+#define  DMAE_REG_GO_C6 \
+       0x00c060UL
+#define  DMAE_REG_GO_C7 \
+       0x00c064UL
+#define  DMAE_REG_GO_C8 \
+       0x00c068UL
+#define  DMAE_REG_GO_C9 \
+       0x00c06cUL
+#define  DMAE_REG_GO_C10       \
+       0x00c070UL
+#define  DMAE_REG_GO_C11       \
+       0x00c074UL
+#define  DMAE_REG_GO_C12       \
+       0x00c078UL
+#define  DMAE_REG_GO_C13       \
+       0x00c07cUL
+#define  DMAE_REG_GO_C14       \
+       0x00c080UL
+#define  DMAE_REG_GO_C15       \
+       0x00c084UL
+#define  DMAE_REG_GO_C16       \
+       0x00c088UL
+#define  DMAE_REG_GO_C17       \
+       0x00c08cUL
+#define  DMAE_REG_GO_C18       \
+       0x00c090UL
+#define  DMAE_REG_GO_C19       \
+       0x00c094UL
+#define  DMAE_REG_GO_C20       \
+       0x00c098UL
+#define  DMAE_REG_GO_C21       \
+       0x00c09cUL
+#define  DMAE_REG_GO_C22       \
+       0x00c0a0UL
+#define  DMAE_REG_GO_C23       \
+       0x00c0a4UL
+#define  DMAE_REG_GO_C24       \
+       0x00c0a8UL
+#define  DMAE_REG_GO_C25       \
+       0x00c0acUL
+#define  DMAE_REG_GO_C26       \
+       0x00c0b0UL
+#define  DMAE_REG_GO_C27       \
+       0x00c0b4UL
+#define  DMAE_REG_GO_C28       \
+       0x00c0b8UL
+#define  DMAE_REG_GO_C29       \
+       0x00c0bcUL
+#define  DMAE_REG_GO_C30       \
+       0x00c0c0UL
+#define  DMAE_REG_GO_C31       \
+       0x00c0c4UL
+#define  DMAE_REG_CMD_MEM \
+       0x00c800UL
+#define  QM_REG_MAXPQSIZETXSEL_0       \
+       0x2f0440UL
+#define  QM_REG_SDMCMDREADY \
+       0x2f1e10UL
+#define  QM_REG_SDMCMDADDR \
+       0x2f1e04UL
+#define  QM_REG_SDMCMDDATALSB \
+       0x2f1e08UL
+#define  QM_REG_SDMCMDDATAMSB \
+       0x2f1e0cUL
+#define  QM_REG_SDMCMDGO       \
+       0x2f1e14UL
+#define  QM_REG_RLPFCRD \
+       0x2f4d80UL
+#define  QM_REG_RLPFINCVAL \
+       0x2f4c80UL
+#define  QM_REG_RLGLBLCRD \
+       0x2f4400UL
+#define  QM_REG_RLGLBLINCVAL \
+       0x2f3400UL
+#define  IGU_REG_ATTENTION_ENABLE \
+       0x18083cUL
+#define  IGU_REG_ATTN_MSG_ADDR_L       \
+       0x180820UL
+#define  IGU_REG_ATTN_MSG_ADDR_H       \
+       0x180824UL
+#define  MISC_REG_AEU_GENERAL_ATTN_0 \
+       0x008400UL
+#define  CAU_REG_SB_ADDR_MEMORY \
+       0x1c8000UL
+#define  CAU_REG_SB_VAR_MEMORY \
+       0x1c6000UL
+#define  CAU_REG_PI_MEMORY \
+       0x1d0000UL
+#define  IGU_REG_PF_CONFIGURATION \
+       0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+       0x180804UL
+#define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+       0x00849cUL
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU        \
+       0x0087b4UL
+#define  MISC_REG_AEU_MASK_ATTN_IGU \
+       0x008494UL
+#define  IGU_REG_CLEANUP_STATUS_0 \
+       0x180980UL
+#define  IGU_REG_CLEANUP_STATUS_1 \
+       0x180a00UL
+#define  IGU_REG_CLEANUP_STATUS_2 \
+       0x180a80UL
+#define  IGU_REG_CLEANUP_STATUS_3 \
+       0x180b00UL
+#define  IGU_REG_CLEANUP_STATUS_4 \
+       0x180b80UL
+#define  IGU_REG_COMMAND_REG_32LSB_DATA \
+       0x180840UL
+#define  IGU_REG_COMMAND_REG_CTRL \
+       0x180848UL
+#define  IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN     ( \
+               0x1 << 1)
+#define  IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN      ( \
+               0x1 << 0)
+#define  IGU_REG_MAPPING_MEMORY \
+       0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+       0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+       0x180900UL
+#define  MISCS_REG_GENERIC_POR_0       \
+       0x0096d4UL
+#define  MCP_REG_NVM_CFG4 \
+       0xe0642cUL
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE   ( \
+               0x7 << 0)
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+       0
+#define MCP_REG_CPU_STATE \
+       0xe05004UL
+#define MCP_REG_CPU_EVENT_MASK \
+       0xe05008UL
+#define PGLUE_B_REG_PF_BAR0_SIZE \
+       0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE \
+       0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN  0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL           0x1f0734UL
+#define PRS_REG_VXLAN_PORT             0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0      0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE                0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE            (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT      0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE             (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT       1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE                   (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT             2
+
+#define NIG_REG_VXLAN_CTRL             0x50105cUL
+#define PBF_REG_VXLAN_PORT             0xd80518UL
+#define PBF_REG_NGE_PORT               0xd8051cUL
+#define PRS_REG_NGE_PORT               0x1f086cUL
+#define NIG_REG_NGE_PORT               0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN     0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN      0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN       0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN      0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN     0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE                  0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE                 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER                   0x508b30UL
+#define PBF_REG_NGE_COMP_VER                   0xd80524UL
+#define PRS_REG_NGE_COMP_VER                   0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT     0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT     0x2fa000UL
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644 (file)
index 0000000..a342bfe
--- /dev/null
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+       int rc = 0, i;
+
+       for_each_hwfn(cdev, i) {
+               rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+       int rc = 0, i;
+
+       for_each_hwfn(cdev, i) {
+               rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc = 0, i;
+
+       /* although performed by MCP, this test is per engine */
+       for_each_hwfn(cdev, i) {
+               p_hwfn = &cdev->hwfns[i];
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "failed to acquire ptt\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+               qed_ptt_release(p_hwfn, p_ptt);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc = 0, i;
+
+       /* although performed by MCP, this test is per engine */
+       for_each_hwfn(cdev, i) {
+               p_hwfn = &cdev->hwfns[i];
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "failed to acquire ptt\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+               qed_ptt_release(p_hwfn, p_ptt);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644 (file)
index 0000000..50eb0b4
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644 (file)
index 0000000..a548504
--- /dev/null
@@ -0,0 +1,425 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+       QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
+       QED_SPQ_MODE_CB,        /* Client supplies a callback */
+       QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+       void    (*function)(struct qed_hwfn *,
+                           void *,
+                           union event_ring_data *,
+                           u8 fw_return_code);
+       void    *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ *        ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+                          struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ *  @file
+ *
+ *  QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+       struct pf_start_ramrod_data pf_start;
+       struct pf_update_ramrod_data pf_update;
+       struct rx_queue_start_ramrod_data rx_queue_start;
+       struct rx_queue_update_ramrod_data rx_queue_update;
+       struct rx_queue_stop_ramrod_data rx_queue_stop;
+       struct tx_queue_start_ramrod_data tx_queue_start;
+       struct tx_queue_stop_ramrod_data tx_queue_stop;
+       struct vport_start_ramrod_data vport_start;
+       struct vport_stop_ramrod_data vport_stop;
+       struct vport_update_ramrod_data vport_update;
+       struct vport_filter_update_ramrod_data vport_filter_update;
+
+       struct rdma_init_func_ramrod_data rdma_init_func;
+       struct rdma_close_func_ramrod_data rdma_close_func;
+       struct rdma_register_tid_ramrod_data rdma_register_tid;
+       struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+       struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+       struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+       struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+       struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+       struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+       struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+       struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+       struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+       struct rdma_create_cq_ramrod_data rdma_create_cq;
+       struct rdma_resize_cq_ramrod_data rdma_resize_cq;
+       struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+       struct rdma_srq_create_ramrod_data rdma_create_srq;
+       struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
+       struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+
+       struct iscsi_slow_path_hdr iscsi_empty;
+       struct iscsi_init_ramrod_params iscsi_init;
+       struct iscsi_spe_func_dstry iscsi_destroy;
+       struct iscsi_spe_conn_offload iscsi_conn_offload;
+       struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+       struct iscsi_spe_conn_termination iscsi_conn_terminate;
+
+       struct vf_start_ramrod_data vf_start;
+       struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT   0xffffffff
+
+enum spq_priority {
+       QED_SPQ_PRIORITY_NORMAL,
+       QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+       struct qed_spq_comp_cb  cb;
+       u64                     *done_addr;
+};
+
+struct qed_spq_comp_done {
+       u64     done;
+       u8      fw_return_code;
+};
+
+struct qed_spq_entry {
+       struct list_head                list;
+
+       u8                              flags;
+
+       /* HSI slow path element */
+       struct slow_path_element        elem;
+
+       union ramrod_data               ramrod;
+
+       enum spq_priority               priority;
+
+       /* pending queue for this entry */
+       struct list_head                *queue;
+
+       enum spq_mode                   comp_mode;
+       struct qed_spq_comp_cb          comp_cb;
+       struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+       struct qed_chain        chain;
+       u8                      eq_sb_index;    /* index within the SB */
+       __le16                  *p_fw_cons;     /* ptr to index value */
+};
+
+struct qed_consq {
+       struct qed_chain chain;
+};
+
+struct qed_spq {
+       spinlock_t              lock; /* SPQ lock */
+
+       struct list_head        unlimited_pending;
+       struct list_head        pending;
+       struct list_head        completion_pending;
+       struct list_head        free_pool;
+
+       struct qed_chain        chain;
+
+       /* allocated dma-able memory for spq entries (+ramrod data) */
+       dma_addr_t              p_phys;
+       struct qed_spq_entry    *p_virt;
+
+#define SPQ_RING_SIZE \
+       (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+
+       /* Bitmap for handling out-of-order completions */
+       DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
+       u8                      comp_bitmap_idx;
+
+       /* Statistics */
+       u32                     unlimited_pending_count;
+       u32                     normal_count;
+       u32                     high_count;
+       u32                     comp_sent_count;
+       u32                     comp_count;
+
+       u32                     cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ *        Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+                struct qed_spq_entry *p_ent,
+                u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ *        free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ *                                 pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                         struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+                           u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+                 struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+                struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+                       u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+                     void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+                      __le16 echo,
+                      u8 fw_return_code,
+                      union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ *        struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ *        state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+                   struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION  0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_data {
+       u32                     cid;
+       u16                     opaque_fid;
+
+       /* Information regarding operation upon sending & completion */
+       enum spq_mode           comp_mode;
+       struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+                       struct qed_spq_entry **pp_ent,
+                       u8 cmd,
+                       u8 protocol,
+                       struct qed_sp_init_data *p_data);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_tunn
+ * @param mode
+ * @param allow_npar_tx_switch
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_tunn_start_params *p_tunn,
+                   enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_tunn,
+                             enum spq_mode comp_mode,
+                             struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644 (file)
index 0000000..a52f3fc
--- /dev/null
@@ -0,0 +1,513 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+                       struct qed_spq_entry **pp_ent,
+                       u8 cmd,
+                       u8 protocol,
+                       struct qed_sp_init_data *p_data)
+{
+       u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc;
+
+       if (!pp_ent)
+               return -ENOMEM;
+
+       rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+       if (rc != 0)
+               return rc;
+
+       p_ent = *pp_ent;
+
+       p_ent->elem.hdr.cid             = cpu_to_le32(opaque_cid);
+       p_ent->elem.hdr.cmd_id          = cmd;
+       p_ent->elem.hdr.protocol_id     = protocol;
+
+       p_ent->priority         = QED_SPQ_PRIORITY_NORMAL;
+       p_ent->comp_mode        = p_data->comp_mode;
+       p_ent->comp_done.done   = 0;
+
+       switch (p_ent->comp_mode) {
+       case QED_SPQ_MODE_EBLOCK:
+               p_ent->comp_cb.cookie = &p_ent->comp_done;
+               break;
+
+       case QED_SPQ_MODE_BLOCK:
+               if (!p_data->p_comp_data)
+                       return -EINVAL;
+
+               p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+               break;
+
+       case QED_SPQ_MODE_CB:
+               if (!p_data->p_comp_data)
+                       p_ent->comp_cb.function = NULL;
+               else
+                       p_ent->comp_cb = *p_data->p_comp_data;
+               break;
+
+       default:
+               DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+                  opaque_cid, cmd, protocol,
+                  (unsigned long)&p_ent->ramrod,
+                  D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+                          QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+       return 0;
+}
+
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+       switch (type) {
+       case QED_TUNN_CLSS_MAC_VLAN:
+               return TUNNEL_CLSS_MAC_VLAN;
+       case QED_TUNN_CLSS_MAC_VNI:
+               return TUNNEL_CLSS_MAC_VNI;
+       case QED_TUNN_CLSS_INNER_MAC_VLAN:
+               return TUNNEL_CLSS_INNER_MAC_VLAN;
+       case QED_TUNN_CLSS_INNER_MAC_VNI:
+               return TUNNEL_CLSS_INNER_MAC_VNI;
+       default:
+               return TUNNEL_CLSS_MAC_VLAN;
+       }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_src,
+                             struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+       unsigned long update_mask = p_src->tunn_mode_update_mask;
+       unsigned long tunn_mode = p_src->tunn_mode;
+       unsigned long new_tunn_mode = 0;
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       }
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+               if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+                       __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+                       __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       }
+
+       p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_src,
+                             struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode = p_src->tunn_mode;
+       enum tunnel_clss type;
+
+       qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan  = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                unsigned long tunn_mode)
+{
+       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               l2gre_enable = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               ipgre_enable = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               vxlan_enable = 1;
+
+       qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+       qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               l2geneve_enable = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               ipgeneve_enable = 1;
+
+       qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+                             ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+                            struct qed_tunn_start_params *p_src,
+                            struct pf_start_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode;
+       enum tunnel_clss type;
+
+       if (!p_src)
+               return;
+
+       tunn_mode = p_src->tunn_mode;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                               cpu_to_le16(p_src->geneve_udp_port);
+       }
+
+       if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_tunn_start_params *p_tunn,
+                   enum qed_mf_mode mode, bool allow_npar_tx_switch)
+{
+       struct pf_start_ramrod_data *p_ramrod = NULL;
+       u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+       u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+       u8 page_cnt;
+
+       /* update initial eq producer */
+       qed_eq_prod_update(p_hwfn,
+                          qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_PF_START,
+                                PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.pf_start;
+
+       p_ramrod->event_ring_sb_id      = cpu_to_le16(sb);
+       p_ramrod->event_ring_sb_index   = sb_index;
+       p_ramrod->path_id               = QED_PATH_ID(p_hwfn);
+       p_ramrod->dont_log_ramrods      = 0;
+       p_ramrod->log_type_mask         = cpu_to_le16(0xf);
+
+       switch (mode) {
+       case QED_MF_DEFAULT:
+       case QED_MF_NPAR:
+               p_ramrod->mf_mode = MF_NPAR;
+               break;
+       case QED_MF_OVLAN:
+               p_ramrod->mf_mode = MF_OVLAN;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+               p_ramrod->mf_mode = MF_NPAR;
+       }
+       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       /* Place EQ address in RAMROD */
+       DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+       page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+       p_ramrod->event_ring_num_pages = page_cnt;
+       DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+       qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+                                    &p_ramrod->tunnel_config);
+
+       if (IS_MF_SI(p_hwfn))
+               p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       case QED_PCI_ISCSI:
+               p_ramrod->personality = PERSONALITY_ISCSI;
+               break;
+       case QED_PCI_ETH_ROCE:
+               p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+                         p_hwfn->hw_info.personality);
+               p_ramrod->personality = PERSONALITY_ETH;
+       }
+
+       if (p_hwfn->cdev->p_iov_info) {
+               struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+               p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+               p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+       }
+       p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+       p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
+                  sb, sb_index,
+                  p_ramrod->outer_tag);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       if (p_tunn) {
+               qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                    p_tunn->tunn_mode);
+               p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_CB;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+                                     &p_ent->ramrod.pf_update);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+                             struct qed_tunn_update_params *p_tunn,
+                             enum spq_mode comp_mode,
+                             struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+                                     &p_ent->ramrod.pf_update.tunnel_config);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       if (p_tunn->update_vxlan_udp_port)
+               qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                       p_tunn->vxlan_udp_port);
+       if (p_tunn->update_geneve_udp_port)
+               qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                        p_tunn->geneve_udp_port);
+
+       qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+       p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+       return rc;
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+                                &init_data);
+       if (rc)
+               return rc;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644 (file)
index 0000000..d73456e
--- /dev/null
@@ -0,0 +1,917 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
+#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+                               void *cookie,
+                               union event_ring_data *data,
+                               u8 fw_return_code)
+{
+       struct qed_spq_comp_done *comp_done;
+
+       comp_done = (struct qed_spq_comp_done *)cookie;
+
+       comp_done->done                 = 0x1;
+       comp_done->fw_return_code       = fw_return_code;
+
+       /* make update visible to waiting thread */
+       smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+                        struct qed_spq_entry *p_ent,
+                        u8 *p_fw_ret)
+{
+       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       struct qed_spq_comp_done *comp_done;
+       int rc;
+
+       comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               smp_rmb();
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return 0;
+               }
+               usleep_range(5000, 10000);
+               sleep_count--;
+       }
+
+       DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+       rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc != 0)
+               DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+       /* Retry after drain */
+       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               smp_rmb();
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return 0;
+               }
+               usleep_range(5000, 10000);
+               sleep_count--;
+       }
+
+       if (comp_done->done == 1) {
+               if (p_fw_ret)
+                       *p_fw_ret = comp_done->fw_return_code;
+               return 0;
+       }
+
+       DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+       return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+                  struct qed_spq_entry *p_ent)
+{
+       p_ent->flags = 0;
+
+       switch (p_ent->comp_mode) {
+       case QED_SPQ_MODE_EBLOCK:
+       case QED_SPQ_MODE_BLOCK:
+               p_ent->comp_cb.function = qed_spq_blocking_cb;
+               break;
+       case QED_SPQ_MODE_CB:
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+                  p_ent->elem.hdr.cid,
+                  p_ent->elem.hdr.cmd_id,
+                  p_ent->elem.hdr.protocol_id,
+                  p_ent->elem.data_ptr.hi,
+                  p_ent->elem.data_ptr.lo,
+                  D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+                          QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+                                 struct qed_spq *p_spq)
+{
+       u16                             pq;
+       struct qed_cxt_info             cxt_info;
+       struct core_conn_context        *p_cxt;
+       union qed_qm_pq_params          pq_params;
+       int                             rc;
+
+       cxt_info.iid = p_spq->cid;
+
+       rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+       if (rc < 0) {
+               DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+                         p_spq->cid);
+               return;
+       }
+
+       p_cxt = cxt_info.p_cxt;
+
+       SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+       /* QM physical queue */
+       memset(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+       p_cxt->xstorm_st_context.spq_base_lo =
+               DMA_LO_LE(p_spq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.spq_base_hi =
+               DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+       DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+                      p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+                          struct qed_spq *p_spq,
+                          struct qed_spq_entry *p_ent)
+{
+       struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+       u16 echo = qed_chain_get_prod_idx(p_chain);
+       struct slow_path_element        *elem;
+       struct core_db_data             db;
+
+       p_ent->elem.hdr.echo    = cpu_to_le16(echo);
+       elem = qed_chain_produce(p_chain);
+       if (!elem) {
+               DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+               return -EINVAL;
+       }
+
+       *elem = p_ent->elem; /* struct assignment */
+
+       /* send a doorbell on the slow hwfn session */
+       memset(&db, 0, sizeof(db));
+       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+       db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+       /* make sure the SPQE is updated before the doorbell */
+       wmb();
+
+       DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+       /* make sure doorbell is rang */
+       wmb();
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+                  qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+                  p_spq->cid, db.params, db.agg_flags,
+                  qed_chain_get_prod_idx(p_chain));
+
+       return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+                          struct event_ring_entry *p_eqe)
+{
+       switch (p_eqe->protocol_id) {
+       case PROTOCOLID_COMMON:
+               return qed_sriov_eqe_event(p_hwfn,
+                                          p_eqe->opcode,
+                                          p_eqe->echo, &p_eqe->data);
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unknown Async completion for protocol: %d\n",
+                         p_eqe->protocol_id);
+               return -EINVAL;
+       }
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+                       u16 prod)
+{
+       u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+                  USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+       REG_WR16(p_hwfn, addr, prod);
+
+       /* keep prod updates ordered */
+       mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+                     void *cookie)
+
+{
+       struct qed_eq *p_eq = cookie;
+       struct qed_chain *p_chain = &p_eq->chain;
+       int rc = 0;
+
+       /* take a snapshot of the FW consumer */
+       u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+       /* Need to guarantee the fw_cons index we use points to a usuable
+        * element (to comply with our chain), so our macros would comply
+        */
+       if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+           qed_chain_get_usable_per_page(p_chain))
+               fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+       /* Complete current segment of eq entries */
+       while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+               struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+               if (!p_eqe) {
+                       rc = -EINVAL;
+                       break;
+               }
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+                          p_eqe->opcode,
+                          p_eqe->protocol_id,
+                          p_eqe->reserved0,
+                          le16_to_cpu(p_eqe->echo),
+                          p_eqe->fw_return_code,
+                          p_eqe->flags);
+
+               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+                       if (qed_async_event_completion(p_hwfn, p_eqe))
+                               rc = -EINVAL;
+               } else if (qed_spq_completion(p_hwfn,
+                                             p_eqe->echo,
+                                             p_eqe->fw_return_code,
+                                             &p_eqe->data)) {
+                       rc = -EINVAL;
+               }
+
+               qed_chain_recycle_consumed(p_chain);
+       }
+
+       qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+       return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+                           u16 num_elem)
+{
+       struct qed_eq *p_eq;
+
+       /* Allocate EQ struct */
+       p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
+       if (!p_eq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+               return NULL;
+       }
+
+       /* Allocate and initialize EQ chain*/
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_PBL,
+                           QED_CHAIN_CNT_TYPE_U16,
+                           num_elem,
+                           sizeof(union event_ring_element),
+                           &p_eq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+               goto eq_allocate_fail;
+       }
+
+       /* register EQ completion on the SP SB */
+       qed_int_register_cb(p_hwfn,
+                           qed_eq_completion,
+                           p_eq,
+                           &p_eq->eq_sb_index,
+                           &p_eq->p_fw_cons);
+
+       return p_eq;
+
+eq_allocate_fail:
+       qed_eq_free(p_hwfn, p_eq);
+       return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+                 struct qed_eq *p_eq)
+{
+       qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+                struct qed_eq *p_eq)
+{
+       if (!p_eq)
+               return;
+       qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+       kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+       struct qed_hwfn *p_hwfn,
+       struct eth_slow_path_rx_cqe *cqe,
+       enum protocol_type protocol)
+{
+       if (IS_VF(p_hwfn->cdev))
+               return 0;
+
+       /* @@@tmp - it's possible we'll eventually want to handle some
+        * actual commands that can arrive here, but for now this is only
+        * used to complete the ramrod using the echo value on the cqe
+        */
+       return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+                          struct eth_slow_path_rx_cqe *cqe)
+{
+       int rc;
+
+       rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+                         cqe->ramrod_cmd_id);
+
+       return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       struct qed_spq_entry *p_virt = NULL;
+       dma_addr_t p_phys = 0;
+       u32 i, capacity;
+
+       INIT_LIST_HEAD(&p_spq->pending);
+       INIT_LIST_HEAD(&p_spq->completion_pending);
+       INIT_LIST_HEAD(&p_spq->free_pool);
+       INIT_LIST_HEAD(&p_spq->unlimited_pending);
+       spin_lock_init(&p_spq->lock);
+
+       /* SPQ empty pool */
+       p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+       p_virt  = p_spq->p_virt;
+
+       capacity = qed_chain_get_capacity(&p_spq->chain);
+       for (i = 0; i < capacity; i++) {
+               DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
+
+               list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+               p_virt++;
+               p_phys += sizeof(struct qed_spq_entry);
+       }
+
+       /* Statistics */
+       p_spq->normal_count             = 0;
+       p_spq->comp_count               = 0;
+       p_spq->comp_sent_count          = 0;
+       p_spq->unlimited_pending_count  = 0;
+
+       bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
+       p_spq->comp_bitmap_idx = 0;
+
+       /* SPQ cid, cannot fail */
+       qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+       qed_spq_hw_initialize(p_hwfn, p_spq);
+
+       /* reset the chain itself */
+       qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq_entry *p_virt = NULL;
+       struct qed_spq *p_spq = NULL;
+       dma_addr_t p_phys = 0;
+       u32 capacity;
+
+       /* SPQ struct */
+       p_spq =
+               kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+       if (!p_spq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+               return -ENOMEM;
+       }
+
+       /* SPQ ring  */
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_SINGLE,
+                           QED_CHAIN_CNT_TYPE_U16,
+                           0,   /* N/A when the mode is SINGLE */
+                           sizeof(struct slow_path_element),
+                           &p_spq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+               goto spq_allocate_fail;
+       }
+
+       /* allocate and fill the SPQ elements (incl. ramrod data list) */
+       capacity = qed_chain_get_capacity(&p_spq->chain);
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   capacity *
+                                   sizeof(struct qed_spq_entry),
+                                   &p_phys, GFP_KERNEL);
+
+       if (!p_virt)
+               goto spq_allocate_fail;
+
+       p_spq->p_virt = p_virt;
+       p_spq->p_phys = p_phys;
+       p_hwfn->p_spq = p_spq;
+
+       return 0;
+
+spq_allocate_fail:
+       qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+       kfree(p_spq);
+       return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       u32 capacity;
+
+       if (!p_spq)
+               return;
+
+       if (p_spq->p_virt) {
+               capacity = qed_chain_get_capacity(&p_spq->chain);
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 capacity *
+                                 sizeof(struct qed_spq_entry),
+                                 p_spq->p_virt, p_spq->p_phys);
+       }
+
+       qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+       ;
+       kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry **pp_ent)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc = 0;
+
+       spin_lock_bh(&p_spq->lock);
+
+       if (list_empty(&p_spq->free_pool)) {
+               p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+               if (!p_ent) {
+                       rc = -ENOMEM;
+                       goto out_unlock;
+               }
+               p_ent->queue = &p_spq->unlimited_pending;
+       } else {
+               p_ent = list_first_entry(&p_spq->free_pool,
+                                        struct qed_spq_entry,
+                                        list);
+               list_del(&p_ent->list);
+               p_ent->queue = &p_spq->pending;
+       }
+
+       *pp_ent = p_ent;
+
+out_unlock:
+       spin_unlock_bh(&p_spq->lock);
+       return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                                  struct qed_spq_entry *p_ent)
+{
+       list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                         struct qed_spq_entry *p_ent)
+{
+       spin_lock_bh(&p_hwfn->p_spq->lock);
+       __qed_spq_return_entry(p_hwfn, p_ent);
+       spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ *        list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry *p_ent,
+                 enum spq_priority priority)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+
+       if (p_ent->queue == &p_spq->unlimited_pending) {
+
+               if (list_empty(&p_spq->free_pool)) {
+                       list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+                       p_spq->unlimited_pending_count++;
+
+                       return 0;
+               } else {
+                       struct qed_spq_entry *p_en2;
+
+                       p_en2 = list_first_entry(&p_spq->free_pool,
+                                                struct qed_spq_entry,
+                                                list);
+                       list_del(&p_en2->list);
+
+                       /* Copy the ring element physical pointer to the new
+                        * entry, since we are about to override the entire ring
+                        * entry and don't want to lose the pointer.
+                        */
+                       p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+                       *p_en2 = *p_ent;
+
+                       /* EBLOCK responsible to free the allocated p_ent */
+                       if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+                               kfree(p_ent);
+
+                       p_ent = p_en2;
+               }
+       }
+
+       /* entry is to be placed in 'pending' queue */
+       switch (priority) {
+       case QED_SPQ_PRIORITY_NORMAL:
+               list_add_tail(&p_ent->list, &p_spq->pending);
+               p_spq->normal_count++;
+               break;
+       case QED_SPQ_PRIORITY_HIGH:
+               list_add(&p_ent->list, &p_spq->pending);
+               p_spq->high_count++;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_spq)
+               return 0xffffffff;      /* illegal */
+       return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+                            struct list_head *head,
+                            u32 keep_reserve)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       int rc;
+
+       while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+              !list_empty(head)) {
+               struct qed_spq_entry *p_ent =
+                       list_first_entry(head, struct qed_spq_entry, list);
+               list_del(&p_ent->list);
+               list_add_tail(&p_ent->list, &p_spq->completion_pending);
+               p_spq->comp_sent_count++;
+
+               rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+               if (rc) {
+                       list_del(&p_ent->list);
+                       __qed_spq_return_entry(p_hwfn, p_ent);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       struct qed_spq_entry *p_ent = NULL;
+
+       while (!list_empty(&p_spq->free_pool)) {
+               if (list_empty(&p_spq->unlimited_pending))
+                       break;
+
+               p_ent = list_first_entry(&p_spq->unlimited_pending,
+                                        struct qed_spq_entry,
+                                        list);
+               if (!p_ent)
+                       return -EINVAL;
+
+               list_del(&p_ent->list);
+
+               qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       }
+
+       return qed_spq_post_list(p_hwfn, &p_spq->pending,
+                                SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+                struct qed_spq_entry *p_ent,
+                u8 *fw_return_code)
+{
+       int rc = 0;
+       struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+       bool b_ret_ent = true;
+
+       if (!p_hwfn)
+               return -EINVAL;
+
+       if (!p_ent) {
+               DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+               return -EINVAL;
+       }
+
+       /* Complete the entry */
+       rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+       spin_lock_bh(&p_spq->lock);
+
+       /* Check return value after LOCK is taken for cleaner error flow */
+       if (rc)
+               goto spq_post_fail;
+
+       /* Add the request to the pending queue */
+       rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       if (rc)
+               goto spq_post_fail;
+
+       rc = qed_spq_pend_post(p_hwfn);
+       if (rc) {
+               /* Since it's possible that pending failed for a different
+                * entry [although unlikely], the failed entry was already
+                * dealt with; No need to return it here.
+                */
+               b_ret_ent = false;
+               goto spq_post_fail;
+       }
+
+       spin_unlock_bh(&p_spq->lock);
+
+       if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+               /* For entries in QED BLOCK mode, the completion code cannot
+                * perform the necessary cleanup - if it did, we couldn't
+                * access p_ent here to see whether it's successful or not.
+                * Thus, after gaining the answer perform the cleanup here.
+                */
+               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+               if (p_ent->queue == &p_spq->unlimited_pending) {
+                       /* This is an allocated p_ent which does not need to
+                        * return to pool.
+                        */
+                       kfree(p_ent);
+                       return rc;
+               }
+
+               if (rc)
+                       goto spq_post_fail2;
+
+               /* return to pool */
+               qed_spq_return_entry(p_hwfn, p_ent);
+       }
+       return rc;
+
+spq_post_fail2:
+       spin_lock_bh(&p_spq->lock);
+       list_del(&p_ent->list);
+       qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+       /* return to the free pool */
+       if (b_ret_ent)
+               __qed_spq_return_entry(p_hwfn, p_ent);
+       spin_unlock_bh(&p_spq->lock);
+
+       return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+                      __le16 echo,
+                      u8 fw_return_code,
+                      union event_ring_data *p_data)
+{
+       struct qed_spq          *p_spq;
+       struct qed_spq_entry    *p_ent = NULL;
+       struct qed_spq_entry    *tmp;
+       struct qed_spq_entry    *found = NULL;
+       int                     rc;
+
+       if (!p_hwfn)
+               return -EINVAL;
+
+       p_spq = p_hwfn->p_spq;
+       if (!p_spq)
+               return -EINVAL;
+
+       spin_lock_bh(&p_spq->lock);
+       list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+                                list) {
+               if (p_ent->elem.hdr.echo == echo) {
+                       u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+
+                       list_del(&p_ent->list);
+
+                       /* Avoid overriding of SPQ entries when getting
+                        * out-of-order completions, by marking the completions
+                        * in a bitmap and increasing the chain consumer only
+                        * for the first successive completed entries.
+                        */
+                       __set_bit(pos, p_spq->p_comp_bitmap);
+
+                       while (test_bit(p_spq->comp_bitmap_idx,
+                                       p_spq->p_comp_bitmap)) {
+                               __clear_bit(p_spq->comp_bitmap_idx,
+                                           p_spq->p_comp_bitmap);
+                               p_spq->comp_bitmap_idx++;
+                               qed_chain_return_produced(&p_spq->chain);
+                       }
+
+                       p_spq->comp_count++;
+                       found = p_ent;
+                       break;
+               }
+
+               /* This is relatively uncommon - depends on scenarios
+                * which have mutliple per-PF sent ramrods.
+                */
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
+                          le16_to_cpu(echo),
+                          le16_to_cpu(p_ent->elem.hdr.echo));
+       }
+
+       /* Release lock before callback, as callback may post
+        * an additional ramrod.
+        */
+       spin_unlock_bh(&p_spq->lock);
+
+       if (!found) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to find an entry this EQE completes\n");
+               return -EEXIST;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+                  p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+       if (found->comp_cb.function)
+               found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+                                       fw_return_code);
+
+       if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+           (found->queue == &p_spq->unlimited_pending))
+               /* EBLOCK  is responsible for returning its own entry into the
+                * free list, unless it originally added the entry into the
+                * unlimited pending list.
+                */
+               qed_spq_return_entry(p_hwfn, found);
+
+       /* Attempt to post pending requests */
+       spin_lock_bh(&p_spq->lock);
+       rc = qed_spq_pend_post(p_hwfn);
+       spin_unlock_bh(&p_spq->lock);
+
+       return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_consq *p_consq;
+
+       /* Allocate ConsQ struct */
+       p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
+       if (!p_consq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+               return NULL;
+       }
+
+       /* Allocate and initialize EQ chain*/
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_PBL,
+                           QED_CHAIN_CNT_TYPE_U16,
+                           QED_CHAIN_PAGE_SIZE / 0x80,
+                           0x80, &p_consq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+               goto consq_allocate_fail;
+       }
+
+       return p_consq;
+
+consq_allocate_fail:
+       qed_consq_free(p_hwfn, p_consq);
+       return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_consq *p_consq)
+{
+       qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+                   struct qed_consq *p_consq)
+{
+       if (!p_consq)
+               return;
+       qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+       kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644 (file)
index 0000000..15399da
--- /dev/null
@@ -0,0 +1,3859 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
+{
+       struct vf_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+       u8 fp_minor;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_vf->opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_VF_START,
+                                PROTOCOLID_COMMON, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_start;
+
+       p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+       p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
+
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       case QED_PCI_ETH_ROCE:
+               p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+                         p_hwfn->hw_info.personality);
+               return -EINVAL;
+       }
+
+       fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+       if (fp_minor > ETH_HSI_VER_MINOR) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+                          p_vf->abs_vf_id,
+                          ETH_HSI_VER_MAJOR,
+                          fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+               fp_minor = ETH_HSI_VER_MINOR;
+       }
+
+       p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+       p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "VF[%d] - Starting using HSI %02x.%02x\n",
+                  p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+                         u32 concrete_vfid, u16 opaque_vfid)
+{
+       struct vf_stop_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_vfid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                COMMON_RAMROD_VF_STOP,
+                                PROTOCOLID_COMMON, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_stop;
+
+       p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                          int rel_vf_id, bool b_enabled_only)
+{
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+               return false;
+       }
+
+       if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+           (rel_vf_id < 0))
+               return false;
+
+       if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+           b_enabled_only)
+               return false;
+
+       return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+                                              u16 relative_vf_id,
+                                              bool b_enabled_only)
+{
+       struct qed_vf_info *vf = NULL;
+
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+               return NULL;
+       }
+
+       if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+               vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+       else
+               DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+                      relative_vf_id);
+
+       return vf;
+}
+
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+                                struct qed_vf_info *p_vf, u16 rx_qid)
+{
+       if (rx_qid >= p_vf->num_rxqs)
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+                          p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+       return rx_qid < p_vf->num_rxqs;
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+                                struct qed_vf_info *p_vf, u16 tx_qid)
+{
+       if (tx_qid >= p_vf->num_txqs)
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+                          p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+       return tx_qid < p_vf->num_txqs;
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+                               struct qed_vf_info *p_vf, u16 sb_idx)
+{
+       int i;
+
+       for (i = 0; i < p_vf->num_sbs; i++)
+               if (p_vf->igu_sbs[i] == sb_idx)
+                       return true;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+                  p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+       return false;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+                            int vfid, struct qed_ptt *p_ptt)
+{
+       struct qed_bulletin_content *p_bulletin;
+       int crc_size = sizeof(p_bulletin->crc);
+       struct qed_dmae_params params;
+       struct qed_vf_info *p_vf;
+
+       p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf)
+               return -EINVAL;
+
+       if (!p_vf->vf_bulletin)
+               return -EINVAL;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+
+       /* Increment bulletin board version and compute crc */
+       p_bulletin->version++;
+       p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+                               p_vf->bulletin.size - crc_size);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+                  p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+       /* propagate bulletin board via dmae to vm memory */
+       memset(&params, 0, sizeof(params));
+       params.flags = QED_DMAE_FLAG_VF_DST;
+       params.dst_vfid = p_vf->abs_vf_id;
+       return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+                                 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+                                 &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+       struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+       int pos = iov->pos;
+
+       DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+       pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+       pci_read_config_word(cdev->pdev,
+                            pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+       pci_read_config_word(cdev->pdev,
+                            pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+       pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+       if (iov->num_vfs) {
+               DP_VERBOSE(cdev,
+                          QED_MSG_IOV,
+                          "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+               iov->num_vfs = 0;
+       }
+
+       pci_read_config_word(cdev->pdev,
+                            pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+       pci_read_config_word(cdev->pdev,
+                            pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+       pci_read_config_word(cdev->pdev,
+                            pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+       pci_read_config_dword(cdev->pdev,
+                             pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+       pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+       pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+       DP_VERBOSE(cdev,
+                  QED_MSG_IOV,
+                  "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+                  iov->nres,
+                  iov->cap,
+                  iov->ctrl,
+                  iov->total_vfs,
+                  iov->initial_vfs,
+                  iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+       /* Some sanity checks */
+       if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+           iov->total_vfs > NUM_OF_VFS(cdev)) {
+               /* This can happen only due to a bug. In this case we set
+                * num_vfs to zero to avoid memory corruption in the code that
+                * assumes max number of vfs
+                */
+               DP_NOTICE(cdev,
+                         "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+                         iov->num_vfs);
+
+               iov->num_vfs = 0;
+               iov->total_vfs = 0;
+       }
+
+       return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt)
+{
+       struct qed_igu_block *p_sb;
+       u16 sb_id;
+       u32 val;
+
+       if (!p_hwfn->hw_info.p_igu_info) {
+               DP_ERR(p_hwfn,
+                      "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+               return;
+       }
+
+       for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+            sb_id++) {
+               p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+               if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+                   !(p_sb->status & QED_IGU_STATUS_PF)) {
+                       val = qed_rd(p_hwfn, p_ptt,
+                                    IGU_REG_MAPPING_MEMORY + sb_id * 4);
+                       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+                       qed_wr(p_hwfn, p_ptt,
+                              IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+               }
+       }
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+       struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+       struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+       struct qed_bulletin_content *p_bulletin_virt;
+       dma_addr_t req_p, rply_p, bulletin_p;
+       union pfvf_tlvs *p_reply_virt_addr;
+       union vfpf_tlvs *p_req_virt_addr;
+       u8 idx = 0;
+
+       memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+       p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+       req_p = p_iov_info->mbx_msg_phys_addr;
+       p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+       rply_p = p_iov_info->mbx_reply_phys_addr;
+       p_bulletin_virt = p_iov_info->p_bulletins;
+       bulletin_p = p_iov_info->bulletins_phys;
+       if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+               DP_ERR(p_hwfn,
+                      "qed_iov_setup_vfdb called without allocating mem first\n");
+               return;
+       }
+
+       for (idx = 0; idx < p_iov->total_vfs; idx++) {
+               struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+               u32 concrete;
+
+               vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+               vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+               vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+               vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+               vf->state = VF_STOPPED;
+               vf->b_init = false;
+
+               vf->bulletin.phys = idx *
+                                   sizeof(struct qed_bulletin_content) +
+                                   bulletin_p;
+               vf->bulletin.p_virt = p_bulletin_virt + idx;
+               vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+               vf->relative_vf_id = idx;
+               vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+               concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+               vf->concrete_fid = concrete;
+               vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+                                (vf->abs_vf_id << 8);
+               vf->vport_id = idx + 1;
+
+               vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+               vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+       }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+       struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+       void **p_v_addr;
+       u16 num_vfs = 0;
+
+       num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+       /* Allocate PF Mailbox buffer (per-VF) */
+       p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+       p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+       *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                      p_iov_info->mbx_msg_size,
+                                      &p_iov_info->mbx_msg_phys_addr,
+                                      GFP_KERNEL);
+       if (!*p_v_addr)
+               return -ENOMEM;
+
+       /* Allocate PF Mailbox Reply buffer (per-VF) */
+       p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+       p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+       *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                      p_iov_info->mbx_reply_size,
+                                      &p_iov_info->mbx_reply_phys_addr,
+                                      GFP_KERNEL);
+       if (!*p_v_addr)
+               return -ENOMEM;
+
+       p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+                                    num_vfs;
+       p_v_addr = &p_iov_info->p_bulletins;
+       *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                      p_iov_info->bulletins_size,
+                                      &p_iov_info->bulletins_phys,
+                                      GFP_KERNEL);
+       if (!*p_v_addr)
+               return -ENOMEM;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+                  p_iov_info->mbx_msg_virt_addr,
+                  (u64) p_iov_info->mbx_msg_phys_addr,
+                  p_iov_info->mbx_reply_virt_addr,
+                  (u64) p_iov_info->mbx_reply_phys_addr,
+                  p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+       return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+       struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+       if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_iov_info->mbx_msg_size,
+                                 p_iov_info->mbx_msg_virt_addr,
+                                 p_iov_info->mbx_msg_phys_addr);
+
+       if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_iov_info->mbx_reply_size,
+                                 p_iov_info->mbx_reply_virt_addr,
+                                 p_iov_info->mbx_reply_phys_addr);
+
+       if (p_iov_info->p_bulletins)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_iov_info->bulletins_size,
+                                 p_iov_info->p_bulletins,
+                                 p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_pf_iov *p_sriov;
+
+       if (!IS_PF_SRIOV(p_hwfn)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "No SR-IOV - no need for IOV db\n");
+               return 0;
+       }
+
+       p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+       if (!p_sriov) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+               return -ENOMEM;
+       }
+
+       p_hwfn->pf_iov_info = p_sriov;
+
+       return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+               return;
+
+       qed_iov_setup_vfdb(p_hwfn);
+       qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+       if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+               qed_iov_free_vfdb(p_hwfn);
+               kfree(p_hwfn->pf_iov_info);
+       }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+       kfree(cdev->p_iov_info);
+       cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       int pos;
+       int rc;
+
+       if (IS_VF(p_hwfn->cdev))
+               return 0;
+
+       /* Learn the PCI configuration */
+       pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+                                     PCI_EXT_CAP_ID_SRIOV);
+       if (!pos) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+               return 0;
+       }
+
+       /* Allocate a new struct for IOV information */
+       cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+       if (!cdev->p_iov_info) {
+               DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+               return -ENOMEM;
+       }
+       cdev->p_iov_info->pos = pos;
+
+       rc = qed_iov_pci_cfg_info(cdev);
+       if (rc)
+               return rc;
+
+       /* We want PF IOV to be synonemous with the existance of p_iov_info;
+        * In case the capability is published but there are no VFs, simply
+        * de-allocate the struct.
+        */
+       if (!cdev->p_iov_info->total_vfs) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "IOV capabilities, but no VFs are published\n");
+               kfree(cdev->p_iov_info);
+               cdev->p_iov_info = NULL;
+               return 0;
+       }
+
+       /* Calculate the first VF index - this is a bit tricky; Basically,
+        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+        * after the first engine's VFs.
+        */
+       cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+                                          p_hwfn->abs_pf_id - 16;
+       if (QED_PATH_ID(p_hwfn))
+               cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "First VF in hwfn 0x%08x\n",
+                  cdev->p_iov_info->first_vf_in_pf);
+
+       return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+       /* Check PF supports sriov */
+       if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+           !IS_PF_SRIOV_ALLOC(p_hwfn))
+               return false;
+
+       /* Check VF validity */
+       if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+               return false;
+
+       return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+                                     u16 rel_vf_id, u8 to_disable)
+{
+       struct qed_vf_info *vf;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+               if (!vf)
+                       continue;
+
+               vf->to_disable = to_disable;
+       }
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+       u16 i;
+
+       if (!IS_QED_SRIOV(cdev))
+               return;
+
+       for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+               qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+       qed_wr(p_hwfn, p_ptt,
+              PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+              1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+       int i;
+
+       /* Set VF masks and configuration - pretend */
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+       /* unpretend */
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       /* iterate over all queues, clear sb consumer */
+       for (i = 0; i < vf->num_sbs; i++)
+               qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+                                               vf->igu_sbs[i],
+                                               vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_vf_info *vf, bool enable)
+{
+       u32 igu_vf_conf;
+
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+       if (enable)
+               igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+       else
+               igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+       /* unpretend */
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct qed_vf_info *vf)
+{
+       u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+       int rc;
+
+       if (vf->to_disable)
+               return 0;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "Enable internal access for vf %x [abs %x]\n",
+                  vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+       qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+       qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+       rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+       if (rc)
+               return rc;
+
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+       STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+       qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+                    p_hwfn->hw_info.hw_mode);
+
+       /* unpretend */
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       vf->state = VF_FREE;
+
+       return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ *      zone table.
+ *      In E4, queue zone permission table size is 320x9. There
+ *      are 320 VF queues for single engine device (256 for dual
+ *      engine device), and each entry has the following format:
+ *      {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     struct qed_vf_info *vf, u8 enable)
+{
+       u32 reg_addr, val;
+       u16 qzone_id = 0;
+       int qid;
+
+       for (qid = 0; qid < vf->num_rxqs; qid++) {
+               qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+                               &qzone_id);
+
+               reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+               val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+               qed_wr(p_hwfn, p_ptt, reg_addr, val);
+       }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     struct qed_vf_info *vf)
+{
+       /* Reset vf in IGU - interrupts are still disabled */
+       qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+       qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+       /* Permission Table */
+       qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_vf_info *vf, u16 num_rx_queues)
+{
+       struct qed_igu_block *igu_blocks;
+       int qid = 0, igu_id = 0;
+       u32 val = 0;
+
+       igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+               num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+       p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+       SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+       SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+       while ((qid < num_rx_queues) &&
+              (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+               if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+                       struct cau_sb_entry sb_entry;
+
+                       vf->igu_sbs[qid] = (u16)igu_id;
+                       igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+                       SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+                       qed_wr(p_hwfn, p_ptt,
+                              IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+                              val);
+
+                       /* Configure igu sb in CAU which were marked valid */
+                       qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                             p_hwfn->rel_pf_id,
+                                             vf->abs_vf_id, 1);
+                       qed_dmae_host2grc(p_hwfn, p_ptt,
+                                         (u64)(uintptr_t)&sb_entry,
+                                         CAU_REG_SB_VAR_MEMORY +
+                                         igu_id * sizeof(u64), 2, 0);
+                       qid++;
+               }
+               igu_id++;
+       }
+
+       vf->num_sbs = (u8) num_rx_queues;
+
+       return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct qed_vf_info *vf)
+{
+       struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+       int idx, igu_id;
+       u32 addr, val;
+
+       /* Invalidate igu CAM lines and mark them as free */
+       for (idx = 0; idx < vf->num_sbs; idx++) {
+               igu_id = vf->igu_sbs[idx];
+               addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+               val = qed_rd(p_hwfn, p_ptt, addr);
+               SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+               qed_wr(p_hwfn, p_ptt, addr, val);
+
+               p_info->igu_map.igu_blocks[igu_id].status |=
+                   QED_IGU_STATUS_FREE;
+
+               p_hwfn->hw_info.p_igu_info->free_blks++;
+       }
+
+       vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 u16 rel_vf_id, u16 num_rx_queues)
+{
+       u8 num_of_vf_avaiable_chains = 0;
+       struct qed_vf_info *vf = NULL;
+       int rc = 0;
+       u32 cids;
+       u8 i;
+
+       vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       if (!vf) {
+               DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+               return -EINVAL;
+       }
+
+       if (vf->b_init) {
+               DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+               return -EINVAL;
+       }
+
+       /* Limit number of queues according to number of CIDs */
+       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+                  vf->relative_vf_id, num_rx_queues, (u16) cids);
+       num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+       num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+                                                            p_ptt,
+                                                            vf,
+                                                            num_rx_queues);
+       if (!num_of_vf_avaiable_chains) {
+               DP_ERR(p_hwfn, "no available igu sbs\n");
+               return -ENOMEM;
+       }
+
+       /* Choose queue number and index ranges */
+       vf->num_rxqs = num_of_vf_avaiable_chains;
+       vf->num_txqs = num_of_vf_avaiable_chains;
+
+       for (i = 0; i < vf->num_rxqs; i++) {
+               u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+                                                          vf->igu_sbs[i]);
+
+               if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+                       DP_NOTICE(p_hwfn,
+                                 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+                                 vf->relative_vf_id, queue_id);
+                       return -EINVAL;
+               }
+
+               /* CIDs are per-VF, so no problem having them 0-based. */
+               vf->vf_queues[i].fw_rx_qid = queue_id;
+               vf->vf_queues[i].fw_tx_qid = queue_id;
+               vf->vf_queues[i].fw_cid = i;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+                          vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+       }
+       rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+       if (!rc) {
+               vf->b_init = true;
+
+               if (IS_LEAD_HWFN(p_hwfn))
+                       p_hwfn->cdev->p_iov_info->num_vfs++;
+       }
+
+       return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+                            u16 vfid,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *p_caps)
+{
+       struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+                                                      vfid,
+                                                      false);
+       struct qed_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+       p_bulletin->req_autoneg = params->speed.autoneg;
+       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+       p_bulletin->req_forced_speed = params->speed.forced_speed;
+       p_bulletin->req_autoneg_pause = params->pause.autoneg;
+       p_bulletin->req_forced_rx = params->pause.forced_rx;
+       p_bulletin->req_forced_tx = params->pause.forced_tx;
+       p_bulletin->req_loopback = params->loopback_mode;
+
+       p_bulletin->link_up = link->link_up;
+       p_bulletin->speed = link->speed;
+       p_bulletin->full_duplex = link->full_duplex;
+       p_bulletin->autoneg = link->an;
+       p_bulletin->autoneg_complete = link->an_complete;
+       p_bulletin->parallel_detection = link->parallel_detection;
+       p_bulletin->pfc_enabled = link->pfc_enabled;
+       p_bulletin->partner_adv_speed = link->partner_adv_speed;
+       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+       p_bulletin->partner_adv_pause = link->partner_adv_pause;
+       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+       p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+       struct qed_mcp_link_capabilities caps;
+       struct qed_mcp_link_params params;
+       struct qed_mcp_link_state link;
+       struct qed_vf_info *vf = NULL;
+
+       vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!vf) {
+               DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+               return -EINVAL;
+       }
+
+       if (vf->bulletin.p_virt)
+               memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+       memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+       /* Get the link configuration back in bulletin so
+        * that when VFs are re-enabled they get the actual
+        * link configuration.
+        */
+       memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+       memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+       memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+       qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+       /* Forget the VF's acquisition message */
+       memset(&vf->acquire, 0, sizeof(vf->acquire));
+
+       /* disablng interrupts and resetting permission table was done during
+        * vf-close, however, we could get here without going through vf_close
+        */
+       /* Disable Interrupts for VF */
+       qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+       /* Reset Permission table */
+       qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+       vf->num_rxqs = 0;
+       vf->num_txqs = 0;
+       qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+       if (vf->b_init) {
+               vf->b_init = false;
+
+               if (IS_LEAD_HWFN(p_hwfn))
+                       p_hwfn->cdev->p_iov_info->num_vfs--;
+       }
+
+       return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+       return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+       struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+       tl->type = type;
+       tl->length = length;
+
+       /* Offset should keep pointing to next TLV (the end of the last) */
+       *offset += length;
+
+       /* Return a pointer to the start of the added tlv */
+       return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+       u16 i = 1, total_length = 0;
+       struct channel_tlv *tlv;
+
+       do {
+               tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+               /* output tlv */
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "TLV number %d: type %d, length %d\n",
+                          i, tlv->type, tlv->length);
+
+               if (tlv->type == CHANNEL_TLV_LIST_END)
+                       return;
+
+               /* Validate entry - protect against malicious VFs */
+               if (!tlv->length) {
+                       DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+                       return;
+               }
+
+               total_length += tlv->length;
+
+               if (total_length >= sizeof(struct tlv_buffer_size)) {
+                       DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+                       return;
+               }
+
+               i++;
+       } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_vf_info *p_vf,
+                                 u16 length, u8 status)
+{
+       struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct qed_dmae_params params;
+       u8 eng_vf_id;
+
+       mbx->reply_virt->default_resp.hdr.status = status;
+
+       qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+       eng_vf_id = p_vf->abs_vf_id;
+
+       memset(&params, 0, sizeof(struct qed_dmae_params));
+       params.flags = QED_DMAE_FLAG_VF_DST;
+       params.dst_vfid = eng_vf_id;
+
+       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+                          mbx->req_virt->first_tlv.reply_address +
+                          sizeof(u64),
+                          (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+                          &params);
+
+       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                          mbx->req_virt->first_tlv.reply_address,
+                          sizeof(u64) / 4, &params);
+
+       REG_WR(p_hwfn,
+              GTT_BAR0_MAP_REG_USDM_RAM +
+              USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+                               enum qed_iov_vport_update_flag flag)
+{
+       switch (flag) {
+       case QED_IOV_VP_UPDATE_ACTIVATE:
+               return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+       case QED_IOV_VP_UPDATE_VLAN_STRIP:
+               return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+       case QED_IOV_VP_UPDATE_TX_SWITCH:
+               return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+       case QED_IOV_VP_UPDATE_MCAST:
+               return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+       case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+               return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+       case QED_IOV_VP_UPDATE_RSS:
+               return CHANNEL_TLV_VPORT_UPDATE_RSS;
+       case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+               return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+       case QED_IOV_VP_UPDATE_SGE_TPA:
+               return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+       default:
+               return 0;
+       }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+                                           struct qed_vf_info *p_vf,
+                                           struct qed_iov_vf_mbx *p_mbx,
+                                           u8 status,
+                                           u16 tlvs_mask, u16 tlvs_accepted)
+{
+       struct pfvf_def_resp_tlv *resp;
+       u16 size, total_len, i;
+
+       memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+       p_mbx->offset = (u8 *)p_mbx->reply_virt;
+       size = sizeof(struct pfvf_def_resp_tlv);
+       total_len = size;
+
+       qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+       /* Prepare response for all extended tlvs if they are found by PF */
+       for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+               if (!(tlvs_mask & (1 << i)))
+                       continue;
+
+               resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+                                  qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+               if (tlvs_accepted & (1 << i))
+                       resp->hdr.status = status;
+               else
+                       resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] - vport_update response: TLV %d, status %02x\n",
+                          p_vf->relative_vf_id,
+                          qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+               total_len += size;
+       }
+
+       qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+                   sizeof(struct channel_list_end_tlv));
+
+       return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                struct qed_vf_info *vf_info,
+                                u16 type, u16 length, u8 status)
+{
+       struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+       qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+                   sizeof(struct channel_list_end_tlv));
+
+       qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+                                                     u16 relative_vf_id,
+                                                     bool b_enabled_only)
+{
+       struct qed_vf_info *vf = NULL;
+
+       vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+       if (!vf)
+               return NULL;
+
+       return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+       struct qed_public_vf_info *vf_info;
+
+       vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+       if (!vf_info)
+               return;
+
+       /* Clear the VF mac */
+       memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+                              struct qed_vf_info *p_vf)
+{
+       u32 i;
+
+       p_vf->vf_bulletin = 0;
+       p_vf->vport_instance = 0;
+       p_vf->configured_features = 0;
+
+       /* If VF previously requested less resources, go back to default */
+       p_vf->num_rxqs = p_vf->num_sbs;
+       p_vf->num_txqs = p_vf->num_sbs;
+
+       p_vf->num_active_rxqs = 0;
+
+       for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+               p_vf->vf_queues[i].rxq_active = 0;
+
+       memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+       memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
+       qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     struct qed_vf_info *p_vf,
+                                     struct vf_pf_resc_request *p_req,
+                                     struct pf_vf_resc *p_resp)
+{
+       int i;
+
+       /* Queue related information */
+       p_resp->num_rxqs = p_vf->num_rxqs;
+       p_resp->num_txqs = p_vf->num_txqs;
+       p_resp->num_sbs = p_vf->num_sbs;
+
+       for (i = 0; i < p_resp->num_sbs; i++) {
+               p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+               p_resp->hw_sbs[i].sb_qid = 0;
+       }
+
+       /* These fields are filled for backward compatibility.
+        * Unused by modern vfs.
+        */
+       for (i = 0; i < p_resp->num_rxqs; i++) {
+               qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+                               (u16 *)&p_resp->hw_qid[i]);
+               p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+       }
+
+       /* Filter related information */
+       p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+                                       p_req->num_mac_filters);
+       p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+                                        p_req->num_vlan_filters);
+
+       /* This isn't really needed/enforced, but some legacy VFs might depend
+        * on the correct filling of this field.
+        */
+       p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+       /* Validate sufficient resources for VF */
+       if (p_resp->num_rxqs < p_req->num_rxqs ||
+           p_resp->num_txqs < p_req->num_txqs ||
+           p_resp->num_sbs < p_req->num_sbs ||
+           p_resp->num_mac_filters < p_req->num_mac_filters ||
+           p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+           p_resp->num_mc_filters < p_req->num_mc_filters) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+                          p_vf->abs_vf_id,
+                          p_req->num_rxqs,
+                          p_resp->num_rxqs,
+                          p_req->num_rxqs,
+                          p_resp->num_txqs,
+                          p_req->num_sbs,
+                          p_resp->num_sbs,
+                          p_req->num_mac_filters,
+                          p_resp->num_mac_filters,
+                          p_req->num_vlan_filters,
+                          p_resp->num_vlan_filters,
+                          p_req->num_mc_filters, p_resp->num_mc_filters);
+               return PFVF_STATUS_NO_RESOURCE;
+       }
+
+       return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+                                        struct pfvf_stats_info *p_stats)
+{
+       p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+                                 offsetof(struct mstorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+       p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+                                 offsetof(struct ustorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+       p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+                                 offsetof(struct pstorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+       p_stats->tstats.address = 0;
+       p_stats->tstats.len = 0;
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_vf_info *vf)
+{
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+       struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+       struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+       u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+       struct pf_vf_resc *resc = &resp->resc;
+       int rc;
+
+       memset(resp, 0, sizeof(*resp));
+
+       /* Validate FW compatibility */
+       if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+               DP_INFO(p_hwfn,
+                       "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                       vf->abs_vf_id,
+                       req->vfdev_info.eth_fp_hsi_major,
+                       req->vfdev_info.eth_fp_hsi_minor,
+                       ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+               /* Write the PF version so that VF would know which version
+                * is supported.
+                */
+               pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+               pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+               goto out;
+       }
+
+       /* On 100g PFs, prevent old VFs from loading */
+       if ((p_hwfn->cdev->num_hwfns > 1) &&
+           !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+               DP_INFO(p_hwfn,
+                       "VF[%d] is running an old driver that doesn't support 100g\n",
+                       vf->abs_vf_id);
+               goto out;
+       }
+
+       /* Store the acquire message */
+       memcpy(&vf->acquire, req, sizeof(vf->acquire));
+
+       vf->opaque_fid = req->vfdev_info.opaque_fid;
+
+       vf->vf_bulletin = req->bulletin_addr;
+       vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+                           vf->bulletin.size : req->bulletin_size;
+
+       /* fill in pfdev info */
+       pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+       pfdev_info->db_size = 0;
+       pfdev_info->indices_per_sb = PIS_PER_SB;
+
+       pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+                                  PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+       if (p_hwfn->cdev->num_hwfns > 1)
+               pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+       qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+
+       memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+       pfdev_info->fw_major = FW_MAJOR_VERSION;
+       pfdev_info->fw_minor = FW_MINOR_VERSION;
+       pfdev_info->fw_rev = FW_REVISION_VERSION;
+       pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+       pfdev_info->minor_fp_hsi = min_t(u8,
+                                        ETH_HSI_VER_MINOR,
+                                        req->vfdev_info.eth_fp_hsi_minor);
+       pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+       qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+       pfdev_info->dev_type = p_hwfn->cdev->type;
+       pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+       /* Fill resources available to VF; Make sure there are enough to
+        * satisfy the VF's request.
+        */
+       vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+                                                 &req->resc_request, resc);
+       if (vfpf_status != PFVF_STATUS_SUCCESS)
+               goto out;
+
+       /* Start the VF in FW */
+       rc = qed_sp_vf_start(p_hwfn, vf);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+               vfpf_status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       /* Fill agreed size of bulletin board in response */
+       resp->bulletin_size = vf->bulletin.size;
+       qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+                  "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+                  vf->abs_vf_id,
+                  resp->pfdev_info.chip_num,
+                  resp->pfdev_info.db_size,
+                  resp->pfdev_info.indices_per_sb,
+                  resp->pfdev_info.capabilities,
+                  resc->num_rxqs,
+                  resc->num_txqs,
+                  resc->num_sbs,
+                  resc->num_mac_filters,
+                  resc->num_vlan_filters);
+       vf->state = VF_ACQUIRED;
+
+       /* Prepare Response */
+out:
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+                            sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+                                 struct qed_vf_info *p_vf, bool val)
+{
+       struct qed_sp_vport_update_params params;
+       int rc;
+
+       if (val == p_vf->spoof_chk) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Spoofchk value[%d] is already configured\n", val);
+               return 0;
+       }
+
+       memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+       params.opaque_fid = p_vf->opaque_fid;
+       params.vport_id = p_vf->vport_id;
+       params.update_anti_spoofing_en_flg = 1;
+       params.anti_spoofing_en = val;
+
+       rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+       if (!rc) {
+               p_vf->spoof_chk = val;
+               p_vf->req_spoofchk_val = p_vf->spoof_chk;
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Spoofchk val[%d] configured\n", val);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+                          val, p_vf->relative_vf_id);
+       }
+
+       return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+                                           struct qed_vf_info *p_vf)
+{
+       struct qed_filter_ucast filter;
+       int rc = 0;
+       int i;
+
+       memset(&filter, 0, sizeof(filter));
+       filter.is_rx_filter = 1;
+       filter.is_tx_filter = 1;
+       filter.vport_to_add_to = p_vf->vport_id;
+       filter.opcode = QED_FILTER_ADD;
+
+       /* Reconfigure vlans */
+       for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+               if (!p_vf->shadow_config.vlans[i].used)
+                       continue;
+
+               filter.type = QED_FILTER_VLAN;
+               filter.vlan = p_vf->shadow_config.vlans[i].vid;
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+                          filter.vlan, p_vf->relative_vf_id);
+               rc = qed_sp_eth_filter_ucast(p_hwfn,
+                                            p_vf->opaque_fid,
+                                            &filter,
+                                            QED_SPQ_MODE_CB, NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to configure VLAN [%04x] to VF [%04x]\n",
+                                 filter.vlan, p_vf->relative_vf_id);
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+                                  struct qed_vf_info *p_vf, u64 events)
+{
+       int rc = 0;
+
+       if ((events & (1 << VLAN_ADDR_FORCED)) &&
+           !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+               rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+       return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+                                         struct qed_vf_info *p_vf, u64 events)
+{
+       int rc = 0;
+       struct qed_filter_ucast filter;
+
+       if (!p_vf->vport_instance)
+               return -EINVAL;
+
+       if (events & (1 << MAC_ADDR_FORCED)) {
+               /* Since there's no way [currently] of removing the MAC,
+                * we can always assume this means we need to force it.
+                */
+               memset(&filter, 0, sizeof(filter));
+               filter.type = QED_FILTER_MAC;
+               filter.opcode = QED_FILTER_REPLACE;
+               filter.is_rx_filter = 1;
+               filter.is_tx_filter = 1;
+               filter.vport_to_add_to = p_vf->vport_id;
+               ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+               rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                            &filter, QED_SPQ_MODE_CB, NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "PF failed to configure MAC for VF\n");
+                       return rc;
+               }
+
+               p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+       }
+
+       if (events & (1 << VLAN_ADDR_FORCED)) {
+               struct qed_sp_vport_update_params vport_update;
+               u8 removal;
+               int i;
+
+               memset(&filter, 0, sizeof(filter));
+               filter.type = QED_FILTER_VLAN;
+               filter.is_rx_filter = 1;
+               filter.is_tx_filter = 1;
+               filter.vport_to_add_to = p_vf->vport_id;
+               filter.vlan = p_vf->bulletin.p_virt->pvid;
+               filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+                                             QED_FILTER_FLUSH;
+
+               /* Send the ramrod */
+               rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                            &filter, QED_SPQ_MODE_CB, NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "PF failed to configure VLAN for VF\n");
+                       return rc;
+               }
+
+               /* Update the default-vlan & silent vlan stripping */
+               memset(&vport_update, 0, sizeof(vport_update));
+               vport_update.opaque_fid = p_vf->opaque_fid;
+               vport_update.vport_id = p_vf->vport_id;
+               vport_update.update_default_vlan_enable_flg = 1;
+               vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+               vport_update.update_default_vlan_flg = 1;
+               vport_update.default_vlan = filter.vlan;
+
+               vport_update.update_inner_vlan_removal_flg = 1;
+               removal = filter.vlan ? 1
+                                     : p_vf->shadow_config.inner_vlan_removal;
+               vport_update.inner_vlan_removal_flg = removal;
+               vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+               rc = qed_sp_vport_update(p_hwfn,
+                                        &vport_update,
+                                        QED_SPQ_MODE_EBLOCK, NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "PF failed to configure VF vport for vlan\n");
+                       return rc;
+               }
+
+               /* Update all the Rx queues */
+               for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+                       u16 qid;
+
+                       if (!p_vf->vf_queues[i].rxq_active)
+                               continue;
+
+                       qid = p_vf->vf_queues[i].fw_rx_qid;
+
+                       rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+                                                        1, 0, 1,
+                                                        QED_SPQ_MODE_EBLOCK,
+                                                        NULL);
+                       if (rc) {
+                               DP_NOTICE(p_hwfn,
+                                         "Failed to send Rx update fo queue[0x%04x]\n",
+                                         qid);
+                               return rc;
+                       }
+               }
+
+               if (filter.vlan)
+                       p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+               else
+                       p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+       }
+
+       /* If forced features are terminated, we need to configure the shadow
+        * configuration back again.
+        */
+       if (events)
+               qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+       return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      struct qed_vf_info *vf)
+{
+       struct qed_sp_vport_start_params params = { 0 };
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_vport_start_tlv *start;
+       u8 status = PFVF_STATUS_SUCCESS;
+       struct qed_vf_info *vf_info;
+       u64 *p_bitmap;
+       int sb_id;
+       int rc;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "Failed to get VF info, invalid vfid [%d]\n",
+                         vf->relative_vf_id);
+               return;
+       }
+
+       vf->state = VF_ENABLED;
+       start = &mbx->req_virt->start_vport;
+
+       /* Initialize Status block in CAU */
+       for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+               if (!start->sb_addr[sb_id]) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "VF[%d] did not fill the address of SB %d\n",
+                                  vf->relative_vf_id, sb_id);
+                       break;
+               }
+
+               qed_int_cau_conf_sb(p_hwfn, p_ptt,
+                                   start->sb_addr[sb_id],
+                                   vf->igu_sbs[sb_id],
+                                   vf->abs_vf_id, 1);
+       }
+       qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+       vf->mtu = start->mtu;
+       vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+       /* Take into consideration configuration forced by hypervisor;
+        * If none is configured, use the supplied VF values [for old
+        * vfs that would still be fine, since they passed '0' as padding].
+        */
+       p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+       if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+               u8 vf_req = start->only_untagged;
+
+               vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+               *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+       }
+
+       params.tpa_mode = start->tpa_mode;
+       params.remove_inner_vlan = start->inner_vlan_removal;
+       params.tx_switching = true;
+
+       params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+       params.drop_ttl0 = false;
+       params.concrete_fid = vf->concrete_fid;
+       params.opaque_fid = vf->opaque_fid;
+       params.vport_id = vf->vport_id;
+       params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+       params.mtu = vf->mtu;
+
+       rc = qed_sp_eth_vport_start(p_hwfn, &params);
+       if (rc != 0) {
+               DP_ERR(p_hwfn,
+                      "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+               status = PFVF_STATUS_FAILURE;
+       } else {
+               vf->vport_instance++;
+
+               /* Force configuration if needed on the newly opened vport */
+               qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+               __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+       }
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+                            sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     struct qed_vf_info *vf)
+{
+       u8 status = PFVF_STATUS_SUCCESS;
+       int rc;
+
+       vf->vport_instance--;
+       vf->spoof_chk = false;
+
+       rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+       if (rc != 0) {
+               DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+                      rc);
+               status = PFVF_STATUS_FAILURE;
+       }
+
+       /* Forget the configuration on the vport */
+       vf->configured_features = 0;
+       memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+                            sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt,
+                                         struct qed_vf_info *vf, u8 status)
+{
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct pfvf_start_queue_resp_tlv *p_tlv;
+       struct vfpf_start_rxq_tlv *req;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+                           sizeof(*p_tlv));
+       qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+                   sizeof(struct channel_list_end_tlv));
+
+       /* Update the TLV with the response */
+       if (status == PFVF_STATUS_SUCCESS) {
+               req = &mbx->req_virt->start_rxq;
+               p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+                               offsetof(struct mstorm_vf_zone,
+                                        non_trigger.eth_rx_queue_producers) +
+                               sizeof(struct eth_rx_prod_data) * req->rx_qid;
+       }
+
+       qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_vf_info *vf)
+{
+       struct qed_queue_start_common_params params;
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_NO_RESOURCE;
+       struct vfpf_start_rxq_tlv *req;
+       int rc;
+
+       memset(&params, 0, sizeof(params));
+       req = &mbx->req_virt->start_rxq;
+
+       if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+           !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+               goto out;
+
+       params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
+       params.vf_qid = req->rx_qid;
+       params.vport_id = vf->vport_id;
+       params.sb = req->hw_sb;
+       params.sb_idx = req->sb_index;
+
+       rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+                                        vf->vf_queues[req->rx_qid].fw_cid,
+                                        &params,
+                                        vf->abs_vf_id + 0x10,
+                                        req->bd_max_bytes,
+                                        req->rxq_addr,
+                                        req->cqe_pbl_addr, req->cqe_pbl_size);
+
+       if (rc) {
+               status = PFVF_STATUS_FAILURE;
+       } else {
+               status = PFVF_STATUS_SUCCESS;
+               vf->vf_queues[req->rx_qid].rxq_active = true;
+               vf->num_active_rxqs++;
+       }
+
+out:
+       qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt,
+                                         struct qed_vf_info *p_vf, u8 status)
+{
+       struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct pfvf_start_queue_resp_tlv *p_tlv;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+                           sizeof(*p_tlv));
+       qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+                   sizeof(struct channel_list_end_tlv));
+
+       /* Update the TLV with the response */
+       if (status == PFVF_STATUS_SUCCESS) {
+               u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+               p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
+                                           DQ_DEMS_LEGACY);
+       }
+
+       qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_vf_info *vf)
+{
+       struct qed_queue_start_common_params params;
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_NO_RESOURCE;
+       union qed_qm_pq_params pq_params;
+       struct vfpf_start_txq_tlv *req;
+       int rc;
+
+       /* Prepare the parameters which would choose the right PQ */
+       memset(&pq_params, 0, sizeof(pq_params));
+       pq_params.eth.is_vf = 1;
+       pq_params.eth.vf_id = vf->relative_vf_id;
+
+       memset(&params, 0, sizeof(params));
+       req = &mbx->req_virt->start_txq;
+
+       if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+           !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+               goto out;
+
+       params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
+       params.vport_id = vf->vport_id;
+       params.sb = req->hw_sb;
+       params.sb_idx = req->sb_index;
+
+       rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+                                        vf->opaque_fid,
+                                        vf->vf_queues[req->tx_qid].fw_cid,
+                                        &params,
+                                        vf->abs_vf_id + 0x10,
+                                        req->pbl_addr,
+                                        req->pbl_size, &pq_params);
+
+       if (rc) {
+               status = PFVF_STATUS_FAILURE;
+       } else {
+               status = PFVF_STATUS_SUCCESS;
+               vf->vf_queues[req->tx_qid].txq_active = true;
+       }
+
+out:
+       qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+                               struct qed_vf_info *vf,
+                               u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+       int rc = 0;
+       int qid;
+
+       if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+               return -EINVAL;
+
+       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+               if (vf->vf_queues[qid].rxq_active) {
+                       rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+                                                     vf->vf_queues[qid].
+                                                     fw_rx_qid, false,
+                                                     cqe_completion);
+
+                       if (rc)
+                               return rc;
+               }
+               vf->vf_queues[qid].rxq_active = false;
+               vf->num_active_rxqs--;
+       }
+
+       return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+                               struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+       int rc = 0;
+       int qid;
+
+       if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+               return -EINVAL;
+
+       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+               if (vf->vf_queues[qid].txq_active) {
+                       rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+                                                     vf->vf_queues[qid].
+                                                     fw_tx_qid);
+
+                       if (rc)
+                               return rc;
+               }
+               vf->vf_queues[qid].txq_active = false;
+       }
+       return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_vf_info *vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_SUCCESS;
+       struct vfpf_stop_rxqs_tlv *req;
+       int rc;
+
+       /* We give the option of starting from qid != 0, in this case we
+        * need to make sure that qid + num_qs doesn't exceed the actual
+        * amount of queues that exist.
+        */
+       req = &mbx->req_virt->stop_rxqs;
+       rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                 req->num_rxqs, req->cqe_completion);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+                            length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_vf_info *vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_SUCCESS;
+       struct vfpf_stop_txqs_tlv *req;
+       int rc;
+
+       /* We give the option of starting from qid != 0, in this case we
+        * need to make sure that qid + num_qs doesn't exceed the actual
+        * amount of queues that exist.
+        */
+       req = &mbx->req_virt->stop_txqs;
+       rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+                            length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      struct qed_vf_info *vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_update_rxq_tlv *req;
+       u8 status = PFVF_STATUS_SUCCESS;
+       u8 complete_event_flg;
+       u8 complete_cqe_flg;
+       u16 qid;
+       int rc;
+       u8 i;
+
+       req = &mbx->req_virt->update_rxq;
+       complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+       complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+       for (i = 0; i < req->num_rxqs; i++) {
+               qid = req->rx_qid + i;
+
+               if (!vf->vf_queues[qid].rxq_active) {
+                       DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+                                 qid);
+                       status = PFVF_STATUS_FAILURE;
+                       break;
+               }
+
+               rc = qed_sp_eth_rx_queues_update(p_hwfn,
+                                                vf->vf_queues[qid].fw_rx_qid,
+                                                1,
+                                                complete_cqe_flg,
+                                                complete_event_flg,
+                                                QED_SPQ_MODE_EBLOCK, NULL);
+
+               if (rc) {
+                       status = PFVF_STATUS_FAILURE;
+                       break;
+               }
+       }
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+                            length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+                              void *p_tlvs_list, u16 req_type)
+{
+       struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+       int len = 0;
+
+       do {
+               if (!p_tlv->length) {
+                       DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+                       return NULL;
+               }
+
+               if (p_tlv->type == req_type) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "Extended tlv type %d, length %d found\n",
+                                  p_tlv->type, p_tlv->length);
+                       return p_tlv;
+               }
+
+               len += p_tlv->length;
+               p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+               if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+                       DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+                       return NULL;
+               }
+       } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+       return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+                           struct qed_sp_vport_update_params *p_data,
+                           struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_activate_tlv *p_act_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+       p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+                   qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_act_tlv)
+               return;
+
+       p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+       p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+       p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+       p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+                            struct qed_sp_vport_update_params *p_data,
+                            struct qed_vf_info *p_vf,
+                            struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+       p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_vlan_tlv)
+               return;
+
+       p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+       /* Ignore the VF request if we're forcing a vlan */
+       if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+               p_data->update_inner_vlan_removal_flg = 1;
+               p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+       }
+
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+                           struct qed_sp_vport_update_params *p_data,
+                           struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+       p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+                         qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+                                                  tlv);
+       if (!p_tx_switch_tlv)
+               return;
+
+       p_data->update_tx_switching_flg = 1;
+       p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+                                 struct qed_sp_vport_update_params *p_data,
+                                 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+       p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+           qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_mcast_tlv)
+               return;
+
+       p_data->update_approx_mcast_flg = 1;
+       memcpy(p_data->bins, p_mcast_tlv->bins,
+              sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+                             struct qed_sp_vport_update_params *p_data,
+                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+       struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+       p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+           qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_accept_tlv)
+               return;
+
+       p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+       p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+       p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+       p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+                                 struct qed_sp_vport_update_params *p_data,
+                                 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+       p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+                           qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+                                                    tlv);
+       if (!p_accept_any_vlan)
+               return;
+
+       p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+       p_data->update_accept_any_vlan_flg =
+                   p_accept_any_vlan->update_accept_any_vlan_flg;
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+                           struct qed_vf_info *vf,
+                           struct qed_sp_vport_update_params *p_data,
+                           struct qed_rss_params *p_rss,
+                           struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+       u16 i, q_idx, max_q_idx;
+       u16 table_size;
+
+       p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+                   qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_rss_tlv) {
+               p_data->rss_params = NULL;
+               return;
+       }
+
+       memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+       p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+                                     VFPF_UPDATE_RSS_CONFIG_FLAG);
+       p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+                                           VFPF_UPDATE_RSS_CAPS_FLAG);
+       p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+                                        VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+       p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+                                  VFPF_UPDATE_RSS_KEY_FLAG);
+
+       p_rss->rss_enable = p_rss_tlv->rss_enable;
+       p_rss->rss_eng_id = vf->relative_vf_id + 1;
+       p_rss->rss_caps = p_rss_tlv->rss_caps;
+       p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+       memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+              sizeof(p_rss->rss_ind_table));
+       memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+       table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+                          (1 << p_rss_tlv->rss_table_size_log));
+
+       max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+       for (i = 0; i < table_size; i++) {
+               u16 index = vf->vf_queues[0].fw_rx_qid;
+
+               q_idx = p_rss->rss_ind_table[i];
+               if (q_idx >= max_q_idx)
+                       DP_NOTICE(p_hwfn,
+                                 "rss_ind_table[%d] = %d, rxq is out of range\n",
+                                 i, q_idx);
+               else if (!vf->vf_queues[q_idx].rxq_active)
+                       DP_NOTICE(p_hwfn,
+                                 "rss_ind_table[%d] = %d, rxq is not active\n",
+                                 i, q_idx);
+               else
+                       index = vf->vf_queues[q_idx].fw_rx_qid;
+               p_rss->rss_ind_table[i] = index;
+       }
+
+       p_data->rss_params = p_rss;
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+                               struct qed_vf_info *vf,
+                               struct qed_sp_vport_update_params *p_data,
+                               struct qed_sge_tpa_params *p_sge_tpa,
+                               struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+       p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+           qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+       if (!p_sge_tpa_tlv) {
+               p_data->sge_tpa_params = NULL;
+               return;
+       }
+
+       memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+       p_sge_tpa->update_tpa_en_flg =
+           !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+       p_sge_tpa->update_tpa_param_flg =
+           !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+               VFPF_UPDATE_TPA_PARAM_FLAG);
+
+       p_sge_tpa->tpa_ipv4_en_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+       p_sge_tpa->tpa_ipv6_en_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+       p_sge_tpa->tpa_pkt_split_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+       p_sge_tpa->tpa_hdr_data_split_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+       p_sge_tpa->tpa_gro_consistent_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+       p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+       p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+       p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+       p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+       p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+       p_data->sge_tpa_params = p_sge_tpa;
+
+       *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       struct qed_vf_info *vf)
+{
+       struct qed_sp_vport_update_params params;
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct qed_sge_tpa_params sge_tpa_params;
+       struct qed_rss_params rss_params;
+       u8 status = PFVF_STATUS_SUCCESS;
+       u16 tlvs_mask = 0;
+       u16 length;
+       int rc;
+
+       /* Valiate PF can send such a request */
+       if (!vf->vport_instance) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "No VPORT instance available for VF[%d], failing vport update\n",
+                          vf->abs_vf_id);
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       memset(&params, 0, sizeof(params));
+       params.opaque_fid = vf->opaque_fid;
+       params.vport_id = vf->vport_id;
+       params.rss_params = NULL;
+
+       /* Search for extended tlvs list and update values
+        * from VF in struct qed_sp_vport_update_params.
+        */
+       qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+       qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+       qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+       qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+       qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+       qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+                                   mbx, &tlvs_mask);
+       qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+       qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+                                       &sge_tpa_params, mbx, &tlvs_mask);
+
+       /* Just log a message if there is no single extended tlv in buffer.
+        * When all features of vport update ramrod would be requested by VF
+        * as extended TLVs in buffer then an error can be returned in response
+        * if there is no extended TLV present in buffer.
+        */
+       if (!tlvs_mask) {
+               DP_NOTICE(p_hwfn,
+                         "No feature tlvs found for vport update\n");
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+out:
+       length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+                                                 tlvs_mask, tlvs_mask);
+       qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+                                        struct qed_vf_info *p_vf,
+                                        struct qed_filter_ucast *p_params)
+{
+       int i;
+
+       /* First remove entries and then add new ones */
+       if (p_params->opcode == QED_FILTER_REMOVE) {
+               for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+                       if (p_vf->shadow_config.vlans[i].used &&
+                           p_vf->shadow_config.vlans[i].vid ==
+                           p_params->vlan) {
+                               p_vf->shadow_config.vlans[i].used = false;
+                               break;
+                       }
+               if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF [%d] - Tries to remove a non-existing vlan\n",
+                                  p_vf->relative_vf_id);
+                       return -EINVAL;
+               }
+       } else if (p_params->opcode == QED_FILTER_REPLACE ||
+                  p_params->opcode == QED_FILTER_FLUSH) {
+               for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+                       p_vf->shadow_config.vlans[i].used = false;
+       }
+
+       /* In forced mode, we're willing to remove entries - but we don't add
+        * new ones.
+        */
+       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+               return 0;
+
+       if (p_params->opcode == QED_FILTER_ADD ||
+           p_params->opcode == QED_FILTER_REPLACE) {
+               for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+                       if (p_vf->shadow_config.vlans[i].used)
+                               continue;
+
+                       p_vf->shadow_config.vlans[i].used = true;
+                       p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+                       break;
+               }
+
+               if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF [%d] - Tries to configure more than %d vlan filters\n",
+                                  p_vf->relative_vf_id,
+                                  QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf,
+                                       struct qed_filter_ucast *p_params)
+{
+       int i;
+
+       /* If we're in forced-mode, we don't allow any change */
+       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+               return 0;
+
+       /* First remove entries and then add new ones */
+       if (p_params->opcode == QED_FILTER_REMOVE) {
+               for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+                       if (ether_addr_equal(p_vf->shadow_config.macs[i],
+                                            p_params->mac)) {
+                               memset(p_vf->shadow_config.macs[i], 0,
+                                      ETH_ALEN);
+                               break;
+                       }
+               }
+
+               if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "MAC isn't configured\n");
+                       return -EINVAL;
+               }
+       } else if (p_params->opcode == QED_FILTER_REPLACE ||
+                  p_params->opcode == QED_FILTER_FLUSH) {
+               for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+                       memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+       }
+
+       /* List the new MAC address */
+       if (p_params->opcode != QED_FILTER_ADD &&
+           p_params->opcode != QED_FILTER_REPLACE)
+               return 0;
+
+       for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+               if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+                       ether_addr_copy(p_vf->shadow_config.macs[i],
+                                       p_params->mac);
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "Added MAC at %d entry in shadow\n", i);
+                       break;
+               }
+       }
+
+       if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+                                struct qed_vf_info *p_vf,
+                                struct qed_filter_ucast *p_params)
+{
+       int rc = 0;
+
+       if (p_params->type == QED_FILTER_MAC) {
+               rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+               if (rc)
+                       return rc;
+       }
+
+       if (p_params->type == QED_FILTER_VLAN)
+               rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+       return rc;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+                     int vfid, struct qed_filter_ucast *params)
+{
+       struct qed_public_vf_info *vf;
+
+       vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+       if (!vf)
+               return -EINVAL;
+
+       /* No real decision to make; Store the configured MAC */
+       if (params->type == QED_FILTER_MAC ||
+           params->type == QED_FILTER_MAC_VLAN)
+               ether_addr_copy(vf->mac, params->mac);
+
+       return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       struct qed_vf_info *vf)
+{
+       struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+       struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_ucast_filter_tlv *req;
+       u8 status = PFVF_STATUS_SUCCESS;
+       struct qed_filter_ucast params;
+       int rc;
+
+       /* Prepare the unicast filter params */
+       memset(&params, 0, sizeof(struct qed_filter_ucast));
+       req = &mbx->req_virt->ucast_filter;
+       params.opcode = (enum qed_filter_opcode)req->opcode;
+       params.type = (enum qed_filter_ucast_type)req->type;
+
+       params.is_rx_filter = 1;
+       params.is_tx_filter = 1;
+       params.vport_to_remove_from = vf->vport_id;
+       params.vport_to_add_to = vf->vport_id;
+       memcpy(params.mac, req->mac, ETH_ALEN);
+       params.vlan = req->vlan;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+                  vf->abs_vf_id, params.opcode, params.type,
+                  params.is_rx_filter ? "RX" : "",
+                  params.is_tx_filter ? "TX" : "",
+                  params.vport_to_add_to,
+                  params.mac[0], params.mac[1],
+                  params.mac[2], params.mac[3],
+                  params.mac[4], params.mac[5], params.vlan);
+
+       if (!vf->vport_instance) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+                          vf->abs_vf_id);
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       /* Update shadow copy of the VF configuration */
+       if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       /* Determine if the unicast filtering is acceptible by PF */
+       if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+           (params.type == QED_FILTER_VLAN ||
+            params.type == QED_FILTER_MAC_VLAN)) {
+               /* Once VLAN is forced or PVID is set, do not allow
+                * to add/replace any further VLANs.
+                */
+               if (params.opcode == QED_FILTER_ADD ||
+                   params.opcode == QED_FILTER_REPLACE)
+                       status = PFVF_STATUS_FORCED;
+               goto out;
+       }
+
+       if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+           (params.type == QED_FILTER_MAC ||
+            params.type == QED_FILTER_MAC_VLAN)) {
+               if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+                   (params.opcode != QED_FILTER_ADD &&
+                    params.opcode != QED_FILTER_REPLACE))
+                       status = PFVF_STATUS_FORCED;
+               goto out;
+       }
+
+       rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+       if (rc) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+                                    QED_SPQ_MODE_CB, NULL);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+out:
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+                            sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      struct qed_vf_info *vf)
+{
+       int i;
+
+       /* Reset the SBs */
+       for (i = 0; i < vf->num_sbs; i++)
+               qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+                                               vf->igu_sbs[i],
+                                               vf->opaque_fid, false);
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+                            sizeof(struct pfvf_def_resp_tlv),
+                            PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+
+       /* Disable Interrupts for VF */
+       qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+       /* Reset Permission table */
+       qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+                            length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct qed_vf_info *p_vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       int rc = 0;
+
+       qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+       if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+               /* Stopping the VF */
+               rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+                                   p_vf->opaque_fid);
+
+               if (rc) {
+                       DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+                              rc);
+                       status = PFVF_STATUS_FAILURE;
+               }
+
+               p_vf->state = VF_STOPPED;
+       }
+
+       qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+                            length, status);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+                        struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+       int cnt;
+       u32 val;
+
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+       for (cnt = 0; cnt < 50; cnt++) {
+               val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+               if (!val)
+                       break;
+               msleep(20);
+       }
+       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       if (cnt == 50) {
+               DP_ERR(p_hwfn,
+                      "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+                      p_vf->abs_vf_id, val);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+                       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+       u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+       int i, cnt;
+
+       /* Read initial consumers & producers */
+       for (i = 0; i < MAX_NUM_VOQS; i++) {
+               u32 prod;
+
+               cons[i] = qed_rd(p_hwfn, p_ptt,
+                                PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+                                i * 0x40);
+               prod = qed_rd(p_hwfn, p_ptt,
+                             PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+                             i * 0x40);
+               distance[i] = prod - cons[i];
+       }
+
+       /* Wait for consumers to pass the producers */
+       i = 0;
+       for (cnt = 0; cnt < 50; cnt++) {
+               for (; i < MAX_NUM_VOQS; i++) {
+                       u32 tmp;
+
+                       tmp = qed_rd(p_hwfn, p_ptt,
+                                    PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+                                    i * 0x40);
+                       if (distance[i] > tmp - cons[i])
+                               break;
+               }
+
+               if (i == MAX_NUM_VOQS)
+                       break;
+
+               msleep(20);
+       }
+
+       if (cnt == 50) {
+               DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+                      p_vf->abs_vf_id, i);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+                              struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+       int rc;
+
+       rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+       if (rc)
+               return rc;
+
+       rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u16 rel_vf_id, u32 *ack_vfs)
+{
+       struct qed_vf_info *p_vf;
+       int rc = 0;
+
+       p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       if (!p_vf)
+               return 0;
+
+       if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+           (1ULL << (rel_vf_id % 64))) {
+               u16 vfid = p_vf->abs_vf_id;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF[%d] - Handling FLR\n", vfid);
+
+               qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+               /* If VF isn't active, no need for anything but SW */
+               if (!p_vf->b_init)
+                       goto cleanup;
+
+               rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+               if (rc)
+                       goto cleanup;
+
+               rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+               if (rc) {
+                       DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+                       return rc;
+               }
+
+               /* VF_STOPPED has to be set only after final cleanup
+                * but prior to re-enabling the VF.
+                */
+               p_vf->state = VF_STOPPED;
+
+               rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+               if (rc) {
+                       DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+                              vfid);
+                       return rc;
+               }
+cleanup:
+               /* Mark VF for ack and clean pending state */
+               if (p_vf->state == VF_RESET)
+                       p_vf->state = VF_STOPPED;
+               ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+               p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+                   ~(1ULL << (rel_vf_id % 64));
+               p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+                   ~(1ULL << (rel_vf_id % 64));
+       }
+
+       return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 ack_vfs[VF_MAX_STATIC / 32];
+       int rc = 0;
+       u16 i;
+
+       memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+       /* Since BRB <-> PRS interface can't be tested as part of the flr
+        * polling due to HW limitations, simply sleep a bit. And since
+        * there's no need to wait per-vf, do it before looping.
+        */
+       msleep(100);
+
+       for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+               qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+       rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+       return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+       u16 i, found = 0;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "[%08x,...,%08x]: %08x\n",
+                          i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+       if (!p_hwfn->cdev->p_iov_info) {
+               DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+               return 0;
+       }
+
+       /* Mark VFs */
+       for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+               struct qed_vf_info *p_vf;
+               u8 vfid;
+
+               p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+               if (!p_vf)
+                       continue;
+
+               vfid = p_vf->abs_vf_id;
+               if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+                       u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+                       u16 rel_vf_id = p_vf->relative_vf_id;
+
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "VF[%d] [rel %d] got FLR-ed\n",
+                                  vfid, rel_vf_id);
+
+                       p_vf->state = VF_RESET;
+
+                       /* No need to lock here, since pending_flr should
+                        * only change here and before ACKing MFw. Since
+                        * MFW will not trigger an additional attention for
+                        * VF flr until ACKs, we're safe.
+                        */
+                       p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+                       found = 1;
+               }
+       }
+
+       return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+                            u16 vfid,
+                            struct qed_mcp_link_params *p_params,
+                            struct qed_mcp_link_state *p_link,
+                            struct qed_mcp_link_capabilities *p_caps)
+{
+       struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+                                                      vfid,
+                                                      false);
+       struct qed_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+
+       if (p_params)
+               __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+       if (p_link)
+               __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+       if (p_caps)
+               __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt, int vfid)
+{
+       struct qed_iov_vf_mbx *mbx;
+       struct qed_vf_info *p_vf;
+
+       p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf)
+               return;
+
+       mbx = &p_vf->vf_mbx;
+
+       /* qed_iov_process_mbx_request */
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+
+       mbx->first_tlv = mbx->req_virt->first_tlv;
+
+       /* check if tlv type is known */
+       if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+               switch (mbx->first_tlv.tl.type) {
+               case CHANNEL_TLV_ACQUIRE:
+                       qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_START:
+                       qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_TEARDOWN:
+                       qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_START_RXQ:
+                       qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_START_TXQ:
+                       qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_STOP_RXQS:
+                       qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_STOP_TXQS:
+                       qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_UPDATE_RXQ:
+                       qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_UPDATE:
+                       qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_UCAST_FILTER:
+                       qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_CLOSE:
+                       qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_INT_CLEANUP:
+                       qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_RELEASE:
+                       qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+                       break;
+               }
+       } else {
+               /* unknown TLV - this may belong to a VF driver from the future
+                * - a version written after this PF driver was written, which
+                * supports features unknown as of yet. Too bad since we don't
+                * support them. Or this may be because someone wrote a crappy
+                * VF driver and is sending garbage over the channel.
+                */
+               DP_NOTICE(p_hwfn,
+                         "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+                         p_vf->abs_vf_id,
+                         mbx->first_tlv.tl.type,
+                         mbx->first_tlv.tl.length,
+                         mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+               /* Try replying in case reply address matches the acquisition's
+                * posted address.
+                */
+               if (p_vf->acquire.first_tlv.reply_address &&
+                   (mbx->first_tlv.reply_address ==
+                    p_vf->acquire.first_tlv.reply_address)) {
+                       qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+                                            mbx->first_tlv.tl.type,
+                                            sizeof(struct pfvf_def_resp_tlv),
+                                            PFVF_STATUS_NOT_SUPPORTED);
+               } else {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+                                  p_vf->abs_vf_id);
+               }
+       }
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+       u64 add_bit = 1ULL << (vfid % 64);
+
+       p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+                                                   u64 *events)
+{
+       u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+       memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+       memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+                             u16 abs_vfid, struct regpair *vf_msg)
+{
+       u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+       struct qed_vf_info *p_vf;
+
+       if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+                          abs_vfid);
+               return 0;
+       }
+       p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+       /* List the physical address of the request so that handler
+        * could later on copy the message from it.
+        */
+       p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+       /* Mark the event and schedule the workqueue */
+       qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+       qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+       return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+                       u8 opcode, __le16 echo, union event_ring_data *data)
+{
+       switch (opcode) {
+       case COMMON_EVENT_VF_PF_CHANNEL:
+               return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+                                         &data->vf_pf_channel.msg_addr);
+       default:
+               DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+                       opcode);
+               return -EINVAL;
+       }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+       u16 i;
+
+       if (!p_iov)
+               goto out;
+
+       for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+               if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+                       return i;
+
+out:
+       return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+                              int vfid)
+{
+       struct qed_dmae_params params;
+       struct qed_vf_info *vf_info;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return -EINVAL;
+
+       memset(&params, 0, sizeof(struct qed_dmae_params));
+       params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+       params.src_vfid = vf_info->abs_vf_id;
+
+       if (qed_dmae_host2host(p_hwfn, ptt,
+                              vf_info->vf_mbx.pending_req,
+                              vf_info->vf_mbx.req_phys,
+                              sizeof(union vfpf_tlvs) / 4, &params)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Failed to copy message from VF 0x%02x\n", vfid);
+
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+                                           u8 *mac, int vfid)
+{
+       struct qed_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+               return;
+       }
+
+       feature = 1 << MAC_ADDR_FORCED;
+       memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+       vf_info->bulletin.p_virt->valid_bitmap |= feature;
+       /* Forced MAC will disable MAC_ADDR */
+       vf_info->bulletin.p_virt->valid_bitmap &=
+                               ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+       qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+                                     u16 pvid, int vfid)
+{
+       struct qed_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+               return;
+       }
+
+       feature = 1 << VLAN_ADDR_FORCED;
+       vf_info->bulletin.p_virt->pvid = pvid;
+       if (pvid)
+               vf_info->bulletin.p_virt->valid_bitmap |= feature;
+       else
+               vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+       qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+       struct qed_vf_info *p_vf_info;
+
+       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf_info)
+               return false;
+
+       return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+       struct qed_vf_info *p_vf_info;
+
+       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf_info)
+               return true;
+
+       return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+       struct qed_vf_info *vf_info;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return false;
+
+       return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+       struct qed_vf_info *vf;
+       int rc = -EINVAL;
+
+       if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+               DP_NOTICE(p_hwfn,
+                         "SR-IOV sanity check failed, can't set spoofchk\n");
+               goto out;
+       }
+
+       vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf)
+               goto out;
+
+       if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+               /* After VF VPORT start PF will configure spoof check */
+               vf->req_spoofchk_val = val;
+               rc = 0;
+               goto out;
+       }
+
+       rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+       return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+                                          u16 rel_vf_id)
+{
+       struct qed_vf_info *p_vf;
+
+       p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return NULL;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+               return NULL;
+
+       return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct qed_vf_info *p_vf;
+
+       p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return 0;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+               return 0;
+
+       return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt, int vfid, int val)
+{
+       struct qed_vf_info *vf;
+       u8 abs_vp_id = 0;
+       int rc;
+
+       vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!vf)
+               return -EINVAL;
+
+       rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+       if (rc)
+               return rc;
+
+       return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+       struct qed_vf_info *vf;
+       u8 vport_id;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+                       DP_NOTICE(p_hwfn,
+                                 "SR-IOV sanity check failed, can't set min rate\n");
+                       return -EINVAL;
+               }
+       }
+
+       vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+       vport_id = vf->vport_id;
+
+       return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+       struct qed_wfq_data *vf_vp_wfq;
+       struct qed_vf_info *vf_info;
+
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return 0;
+
+       vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+       if (vf_vp_wfq->configured)
+               return vf_vp_wfq->min_speed;
+       else
+               return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+       smp_mb__before_atomic();
+       set_bit(flag, &hwfn->iov_task_flags);
+       smp_mb__after_atomic();
+       DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+       queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i)
+           queue_delayed_work(cdev->hwfns[i].iov_wq,
+                              &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+       int i, j;
+
+       for_each_hwfn(cdev, i)
+           if (cdev->hwfns[i].iov_wq)
+               flush_workqueue(cdev->hwfns[i].iov_wq);
+
+       /* Mark VFs for disablement */
+       qed_iov_set_vfs_to_disable(cdev, true);
+
+       if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+               pci_disable_sriov(cdev->pdev);
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+               /* Failure to acquire the ptt in 100g creates an odd error
+                * where the first engine has already relased IOV.
+                */
+               if (!ptt) {
+                       DP_ERR(hwfn, "Failed to acquire ptt\n");
+                       return -EBUSY;
+               }
+
+               /* Clean WFQ db and configure equal weight for all vports */
+               qed_clean_wfq_db(hwfn, ptt);
+
+               qed_for_each_vf(hwfn, j) {
+                       int k;
+
+                       if (!qed_iov_is_valid_vfid(hwfn, j, true))
+                               continue;
+
+                       /* Wait until VF is disabled before releasing */
+                       for (k = 0; k < 100; k++) {
+                               if (!qed_iov_is_vf_stopped(hwfn, j))
+                                       msleep(20);
+                               else
+                                       break;
+                       }
+
+                       if (k < 100)
+                               qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+                                                         ptt, j);
+                       else
+                               DP_ERR(hwfn,
+                                      "Timeout waiting for VF's FLR to end\n");
+               }
+
+               qed_ptt_release(hwfn, ptt);
+       }
+
+       qed_iov_set_vfs_to_disable(cdev, false);
+
+       return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+       struct qed_sb_cnt_info sb_cnt_info;
+       int i, j, rc;
+
+       if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+               DP_NOTICE(cdev, "Can start at most %d VFs\n",
+                         RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+               return -EINVAL;
+       }
+
+       /* Initialize HW for VF access */
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[j];
+               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               int num_sbs = 0, limit = 16;
+
+               if (!ptt) {
+                       DP_ERR(hwfn, "Failed to acquire ptt\n");
+                       rc = -EBUSY;
+                       goto err;
+               }
+
+               if (IS_MF_DEFAULT(hwfn))
+                       limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+               num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+               for (i = 0; i < num; i++) {
+                       if (!qed_iov_is_valid_vfid(hwfn, i, false))
+                               continue;
+
+                       rc = qed_iov_init_hw_for_vf(hwfn,
+                                                   ptt, i, num_sbs / num);
+                       if (rc) {
+                               DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+                               qed_ptt_release(hwfn, ptt);
+                               goto err;
+                       }
+               }
+
+               qed_ptt_release(hwfn, ptt);
+       }
+
+       /* Enable SRIOV PCIe functions */
+       rc = pci_enable_sriov(cdev->pdev, num);
+       if (rc) {
+               DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+               goto err;
+       }
+
+       return num;
+
+err:
+       qed_sriov_disable(cdev, false);
+       return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+       if (!IS_QED_SRIOV(cdev)) {
+               DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (num_vfs_param)
+               return qed_sriov_enable(cdev, num_vfs_param);
+       else
+               return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+       int i;
+
+       if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "Cannot set a VF MAC; Sriov is not enabled\n");
+               return -EINVAL;
+       }
+
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+               return -EINVAL;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf_info;
+
+               vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+               if (!vf_info)
+                       continue;
+
+               /* Set the forced MAC, and schedule the IOV task */
+               ether_addr_copy(vf_info->forced_mac, mac);
+               qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+       }
+
+       return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+       int i;
+
+       if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "Cannot set a VF MAC; Sriov is not enabled\n");
+               return -EINVAL;
+       }
+
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+               return -EINVAL;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf_info;
+
+               vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+               if (!vf_info)
+                       continue;
+
+               /* Set the forced vlan, and schedule the IOV task */
+               vf_info->forced_vlan = vid;
+               qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+       }
+
+       return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+                            int vf_id, struct ifla_vf_info *ivi)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_public_vf_info *vf_info;
+       struct qed_mcp_link_state link;
+       u32 tx_rate;
+
+       /* Sanitize request */
+       if (IS_VF(cdev))
+               return -EINVAL;
+
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "VF index [%d] isn't active\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+       qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+       /* Fill information about VF */
+       ivi->vf = vf_id;
+
+       if (is_valid_ether_addr(vf_info->forced_mac))
+               ether_addr_copy(ivi->mac, vf_info->forced_mac);
+       else
+               ether_addr_copy(ivi->mac, vf_info->mac);
+
+       ivi->vlan = vf_info->forced_vlan;
+       ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+       ivi->linkstate = vf_info->link_state;
+       tx_rate = vf_info->tx_rate;
+       ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+       ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+       return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+       struct qed_mcp_link_capabilities caps;
+       struct qed_mcp_link_params params;
+       struct qed_mcp_link_state link;
+       int i;
+
+       if (!hwfn->pf_iov_info)
+               return;
+
+       /* Update bulletin of all future possible VFs with link configuration */
+       for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+               struct qed_public_vf_info *vf_info;
+
+               vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+               if (!vf_info)
+                       continue;
+
+               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+               memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+                      sizeof(caps));
+
+               /* Modify link according to the VF's configured link state */
+               switch (vf_info->link_state) {
+               case IFLA_VF_LINK_STATE_DISABLE:
+                       link.link_up = false;
+                       break;
+               case IFLA_VF_LINK_STATE_ENABLE:
+                       link.link_up = true;
+                       /* Set speed according to maximum supported by HW.
+                        * that is 40G for regular devices and 100G for CMT
+                        * mode devices.
+                        */
+                       link.speed = (hwfn->cdev->num_hwfns > 1) ?
+                                    100000 : 40000;
+               default:
+                       /* In auto mode pass PF link image to VF */
+                       break;
+               }
+
+               if (link.link_up && vf_info->tx_rate) {
+                       struct qed_ptt *ptt;
+                       int rate;
+
+                       rate = min_t(int, vf_info->tx_rate, link.speed);
+
+                       ptt = qed_ptt_acquire(hwfn);
+                       if (!ptt) {
+                               DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+                               return;
+                       }
+
+                       if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+                               vf_info->tx_rate = rate;
+                               link.speed = rate;
+                       }
+
+                       qed_ptt_release(hwfn, ptt);
+               }
+
+               qed_iov_set_link(hwfn, i, &params, &link, &caps);
+       }
+
+       qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+                                int vf_id, int link_state)
+{
+       int i;
+
+       /* Sanitize request */
+       if (IS_VF(cdev))
+               return -EINVAL;
+
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+               DP_VERBOSE(cdev, QED_MSG_IOV,
+                          "VF index [%d] isn't active\n", vf_id);
+               return -EINVAL;
+       }
+
+       /* Handle configuration of link state */
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf;
+
+               vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+               if (!vf)
+                       continue;
+
+               if (vf->link_state == link_state)
+                       continue;
+
+               vf->link_state = link_state;
+               qed_inform_vf_link_state(&cdev->hwfns[i]);
+       }
+
+       return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+       int i, rc = -EINVAL;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf;
+
+               if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+                       DP_NOTICE(p_hwfn,
+                                 "SR-IOV sanity check failed, can't set tx rate\n");
+                       return -EINVAL;
+               }
+
+               vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+               vf->tx_rate = rate;
+
+               qed_inform_vf_link_state(p_hwfn);
+       }
+
+       return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+                          int vfid, u32 min_rate, u32 max_rate)
+{
+       int rc_min = 0, rc_max = 0;
+
+       if (max_rate)
+               rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+       if (min_rate)
+               rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+       if (rc_max | rc_min)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+       u64 events[QED_VF_ARRAY_LENGTH];
+       struct qed_ptt *ptt;
+       int i;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_VERBOSE(hwfn, QED_MSG_IOV,
+                          "Can't acquire PTT; re-scheduling\n");
+               qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+               return;
+       }
+
+       qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+       DP_VERBOSE(hwfn, QED_MSG_IOV,
+                  "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+                  events[0], events[1], events[2]);
+
+       qed_for_each_vf(hwfn, i) {
+               /* Skip VFs with no pending messages */
+               if (!(events[i / 64] & (1ULL << (i % 64))))
+                       continue;
+
+               DP_VERBOSE(hwfn, QED_MSG_IOV,
+                          "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+                          i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+               /* Copy VF's message to PF's request buffer for that VF */
+               if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+                       continue;
+
+               qed_iov_process_mbx_req(hwfn, ptt, i);
+       }
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+       int i;
+
+       qed_for_each_vf(hwfn, i) {
+               struct qed_public_vf_info *info;
+               bool update = false;
+               u8 *mac;
+
+               info = qed_iov_get_public_vf_info(hwfn, i, true);
+               if (!info)
+                       continue;
+
+               /* Update data on bulletin board */
+               mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+               if (is_valid_ether_addr(info->forced_mac) &&
+                   (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+                       DP_VERBOSE(hwfn,
+                                  QED_MSG_IOV,
+                                  "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+                                  i,
+                                  hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+                       /* Update bulletin board with forced MAC */
+                       qed_iov_bulletin_set_forced_mac(hwfn,
+                                                       info->forced_mac, i);
+                       update = true;
+               }
+
+               if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+                   info->forced_vlan) {
+                       DP_VERBOSE(hwfn,
+                                  QED_MSG_IOV,
+                                  "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+                                  info->forced_vlan,
+                                  i,
+                                  hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+                       qed_iov_bulletin_set_forced_vlan(hwfn,
+                                                        info->forced_vlan, i);
+                       update = true;
+               }
+
+               if (update)
+                       qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+       }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+       struct qed_ptt *ptt;
+       int i;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+               qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+               return;
+       }
+
+       qed_for_each_vf(hwfn, i)
+           qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+       struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+                                            iov_task.work);
+       int rc;
+
+       if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+               return;
+
+       if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+               if (!ptt) {
+                       qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+                       return;
+               }
+
+               rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+               if (rc)
+                       qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+               qed_ptt_release(hwfn, ptt);
+       }
+
+       if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+               qed_handle_vf_msg(hwfn);
+
+       if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+                              &hwfn->iov_task_flags))
+               qed_handle_pf_set_vf_unicast(hwfn);
+
+       if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+                              &hwfn->iov_task_flags))
+               qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               if (!cdev->hwfns[i].iov_wq)
+                       continue;
+
+               if (schedule_first) {
+                       qed_schedule_iov(&cdev->hwfns[i],
+                                        QED_IOV_WQ_STOP_WQ_FLAG);
+                       cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+               }
+
+               flush_workqueue(cdev->hwfns[i].iov_wq);
+               destroy_workqueue(cdev->hwfns[i].iov_wq);
+       }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+       char name[NAME_SIZE];
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               /* PFs needs a dedicated workqueue only if they support IOV.
+                * VFs always require one.
+                */
+               if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+                       continue;
+
+               snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+                        cdev->pdev->bus->number,
+                        PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+               p_hwfn->iov_wq = create_singlethread_workqueue(name);
+               if (!p_hwfn->iov_wq) {
+                       DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+                       return -ENOMEM;
+               }
+
+               if (IS_PF(cdev))
+                       INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+               else
+                       INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+       }
+
+       return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+       .configure = &qed_sriov_configure,
+       .set_mac = &qed_sriov_pf_set_mac,
+       .set_vlan = &qed_sriov_pf_set_vlan,
+       .get_config = &qed_get_vf_config,
+       .set_link_state = &qed_set_vf_link_state,
+       .set_spoof = &qed_spoof_configure,
+       .set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644 (file)
index 0000000..0dd23e4
--- /dev/null
@@ -0,0 +1,395 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+#define QED_VF_ARRAY_LENGTH (3)
+
+#ifdef CONFIG_QED_SRIOV
+#define IS_VF(cdev)             ((cdev)->b_is_vf)
+#define IS_PF(cdev)             (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_VF(cdev)             (0)
+#define IS_PF(cdev)             (1)
+#define IS_PF_SRIOV(p_hwfn)     (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS        \
+       (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+       QED_IOV_VP_UPDATE_ACTIVATE,
+       QED_IOV_VP_UPDATE_VLAN_STRIP,
+       QED_IOV_VP_UPDATE_TX_SWITCH,
+       QED_IOV_VP_UPDATE_MCAST,
+       QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+       QED_IOV_VP_UPDATE_RSS,
+       QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+       QED_IOV_VP_UPDATE_SGE_TPA,
+       QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+       /* These copies will later be reflected in the bulletin board,
+        * but this copy should be newer.
+        */
+       u8 forced_mac[ETH_ALEN];
+       u16 forced_vlan;
+       u8 mac[ETH_ALEN];
+
+       /* IFLA_VF_LINK_STATE_<X> */
+       int link_state;
+
+       /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+       int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+       int pos;                /* capability position */
+       int nres;               /* number of resources */
+       u32 cap;                /* SR-IOV Capabilities */
+       u16 ctrl;               /* SR-IOV Control */
+       u16 total_vfs;          /* total VFs associated with the PF */
+       u16 num_vfs;            /* number of vfs that have been started */
+       u16 initial_vfs;        /* initial VFs associated with the PF */
+       u16 nr_virtfn;          /* number of VFs available */
+       u16 offset;             /* first VF Routing ID offset */
+       u16 stride;             /* following VF stride */
+       u16 vf_device_id;       /* VF device id */
+       u32 pgsz;               /* page size for BAR alignment */
+       u8 link;                /* Function Dependency Link */
+
+       u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+       union vfpf_tlvs *req_virt;
+       dma_addr_t req_phys;
+       union pfvf_tlvs *reply_virt;
+       dma_addr_t reply_phys;
+
+       /* Address in VF where a pending message is located */
+       dma_addr_t pending_req;
+
+       u8 *offset;
+
+       /* saved VF request header */
+       struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+       u16 fw_rx_qid;
+       u16 fw_tx_qid;
+       u8 fw_cid;
+       u8 rxq_active;
+       u8 txq_active;
+};
+
+enum vf_state {
+       VF_FREE = 0,            /* VF ready to be acquired holds no resc */
+       VF_ACQUIRED,            /* VF, acquired, but not initalized */
+       VF_ENABLED,             /* VF, Enabled */
+       VF_RESET,               /* VF, FLR'd, pending cleanup */
+       VF_STOPPED              /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+       bool used;
+       u16 vid;
+};
+
+struct qed_vf_shadow_config {
+       /* Shadow copy of all guest vlans */
+       struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+       /* Shadow copy of all configured MACs; Empty if forcing MACs */
+       u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
+       u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+       struct qed_iov_vf_mbx vf_mbx;
+       enum vf_state state;
+       bool b_init;
+       u8 to_disable;
+
+       struct qed_bulletin bulletin;
+       dma_addr_t vf_bulletin;
+
+       /* PF saves a copy of the last VF acquire message */
+       struct vfpf_acquire_tlv acquire;
+
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u16 mtu;
+
+       u8 vport_id;
+       u8 relative_vf_id;
+       u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf)    (QED_PATH_ID(p_hwfn) ?                \
+                                        (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+                                        (p_vf)->abs_vf_id)
+
+       u8 vport_instance;
+       u8 num_rxqs;
+       u8 num_txqs;
+
+       u8 num_sbs;
+
+       u8 num_mac_filters;
+       u8 num_vlan_filters;
+       struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+       u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+       u8 num_active_rxqs;
+       struct qed_public_vf_info p_vf_info;
+       bool spoof_chk;
+       bool req_spoofchk_val;
+
+       /* Stores the configuration requested by VF */
+       struct qed_vf_shadow_config shadow_config;
+
+       /* A bitfield using bulletin's valid-map bits, used to indicate
+        * which of the bulletin board features have been configured.
+        */
+       u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK        ((1 << MAC_ADDR_FORCED) | \
+                                                (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+       struct qed_vf_info vfs_array[MAX_NUM_VFS];
+       u64 pending_events[QED_VF_ARRAY_LENGTH];
+       u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+       /* Allocate message address continuosuly and split to each VF */
+       void *mbx_msg_virt_addr;
+       dma_addr_t mbx_msg_phys_addr;
+       u32 mbx_msg_size;
+       void *mbx_reply_virt_addr;
+       dma_addr_t mbx_reply_phys_addr;
+       u32 mbx_reply_size;
+       void *p_bulletins;
+       dma_addr_t bulletins_phys;
+       u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+       QED_IOV_WQ_MSG_FLAG,
+       QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+       QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+       QED_IOV_WQ_STOP_WQ_FLAG,
+       QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ *  reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+                       u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+                              void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+                                            u16 rel_vf_id)
+{
+       return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+       return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+       return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+                                     u8 opcode,
+                                     __le16 echo, union event_ring_data *data)
+{
+       return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+                                     u32 *disabled_vfs)
+{
+       return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+       return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+                                   enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+       return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i)                     \
+       for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+            _i < MAX_NUM_VFS;                            \
+            _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644 (file)
index 0000000..9b780b3
--- /dev/null
@@ -0,0 +1,1141 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       void *p_tlv;
+
+       /* This lock is released when we receive PF's response
+        * in qed_send_msg2pf().
+        * So, qed_vf_pf_prep() and qed_send_msg2pf()
+        * must come in sequence.
+        */
+       mutex_lock(&(p_iov->mutex));
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "preparing to send 0x%04x tlv over vf pf channel\n",
+                  type);
+
+       /* Reset Requst offset */
+       p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+       /* Clear mailbox - both request and reply */
+       memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+       memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+       /* Init type and length */
+       p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+       /* Init first tlv header */
+       ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+           (u64)p_iov->pf2vf_reply_phys;
+
+       return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+       union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+       struct ustorm_trigger_vf_zone trigger;
+       struct ustorm_vf_zone *zone_data;
+       int rc = 0, time = 100;
+
+       zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+       /* output tlvs list */
+       qed_dp_tlv_list(p_hwfn, p_req);
+
+       /* need to add the END TLV to the message size */
+       resp_size += sizeof(struct channel_list_end_tlv);
+
+       /* Send TLVs over HW channel */
+       memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+       trigger.vf_pf_msg_valid = 1;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+                  GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                            PXP_CONCRETE_FID_PFID),
+                  upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+                  lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+                  &zone_data->non_trigger.vf_pf_msg_addr,
+                  *((u32 *)&trigger), &zone_data->trigger);
+
+       REG_WR(p_hwfn,
+              (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+              lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+       REG_WR(p_hwfn,
+              (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+              upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+       /* The message data must be written first, to prevent trigger before
+        * data is written.
+        */
+       wmb();
+
+       REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+       /* When PF would be done with the response, it would write back to the
+        * `done' address. Poll until then.
+        */
+       while ((!*done) && time) {
+               msleep(25);
+               time--;
+       }
+
+       if (!*done) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF <-- PF Timeout [Type %d]\n",
+                          p_req->first_tlv.tl.type);
+               rc = -EBUSY;
+               goto exit;
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "PF response: %d [Type %d]\n",
+                          *done, p_req->first_tlv.tl.type);
+       }
+
+exit:
+       mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+       return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+                                         struct vf_pf_resc_request *p_req,
+                                         struct pf_vf_resc *p_resp)
+{
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+                  p_req->num_rxqs,
+                  p_resp->num_rxqs,
+                  p_req->num_rxqs,
+                  p_resp->num_txqs,
+                  p_req->num_sbs,
+                  p_resp->num_sbs,
+                  p_req->num_mac_filters,
+                  p_resp->num_mac_filters,
+                  p_req->num_vlan_filters,
+                  p_resp->num_vlan_filters,
+                  p_req->num_mc_filters, p_resp->num_mc_filters);
+
+       /* humble our request */
+       p_req->num_txqs = p_resp->num_txqs;
+       p_req->num_rxqs = p_resp->num_rxqs;
+       p_req->num_sbs = p_resp->num_sbs;
+       p_req->num_mac_filters = p_resp->num_mac_filters;
+       p_req->num_vlan_filters = p_resp->num_vlan_filters;
+       p_req->num_mc_filters = p_resp->num_mc_filters;
+}
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+       struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+       struct vf_pf_resc_request *p_resc;
+       bool resources_acquired = false;
+       struct vfpf_acquire_tlv *req;
+       int rc = 0, attempts = 0;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+       p_resc = &req->resc_request;
+
+       /* starting filling the request */
+       req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+       p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+       p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+       p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+       p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+       p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+       req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+       req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+       req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+       req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+       req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+       req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+       req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
+
+       /* Fill capability field with any non-deprecated config we support */
+       req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+       /* pf 2 vf bulletin board address */
+       req->bulletin_addr = p_iov->bulletin.phys;
+       req->bulletin_size = p_iov->bulletin.size;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       while (!resources_acquired) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV, "attempting to acquire resources\n");
+
+               /* send acquire request */
+               rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+               if (rc)
+                       return rc;
+
+               /* copy acquire response from buffer to p_hwfn */
+               memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+               attempts++;
+
+               if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+                       /* PF agrees to allocate our resources */
+                       if (!(resp->pfdev_info.capabilities &
+                             PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+                               DP_INFO(p_hwfn,
+                                       "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+                               return -EINVAL;
+                       }
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+                       resources_acquired = true;
+               } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+                          attempts < VF_ACQUIRE_THRESH) {
+                       qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+                                                     &resp->resc);
+
+                       /* Clear response buffer */
+                       memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+               } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
+                          pfdev_info->major_fp_hsi &&
+                          (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+                       DP_NOTICE(p_hwfn,
+                                 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+                                 pfdev_info->major_fp_hsi,
+                                 pfdev_info->minor_fp_hsi,
+                                 ETH_HSI_VER_MAJOR,
+                                 ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
+                       return -EINVAL;
+               } else {
+                       DP_ERR(p_hwfn,
+                              "PF returned error %d to VF acquisition request\n",
+                              resp->hdr.status);
+                       return -EAGAIN;
+               }
+       }
+
+       /* Update bulletin board size with response from PF */
+       p_iov->bulletin.size = resp->bulletin_size;
+
+       /* get HW info */
+       p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+       p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+       p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+       /* Learn of the possibility of CMT */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+                       DP_NOTICE(p_hwfn, "100g VF\n");
+                       p_hwfn->cdev->num_hwfns = 2;
+               }
+       }
+
+       if (ETH_HSI_VER_MINOR &&
+           (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+               DP_INFO(p_hwfn,
+                       "PF is using older fastpath HSI; %02x.%02x is configured\n",
+                       ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+       }
+
+       return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov;
+       u32 reg;
+
+       /* Set number of hwfns - might be overriden once leading hwfn learns
+        * actual configuration from PF.
+        */
+       if (IS_LEAD_HWFN(p_hwfn))
+               p_hwfn->cdev->num_hwfns = 1;
+
+       /* Set the doorbell bar. Assumption: regview is set */
+       p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+                                         PXP_VF_BAR0_START_DQ;
+
+       reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+       p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+       reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+       /* Allocate vf sriov info */
+       p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+       if (!p_iov) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+               return -ENOMEM;
+       }
+
+       /* Allocate vf2pf msg */
+       p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                                 sizeof(union vfpf_tlvs),
+                                                 &p_iov->vf2pf_request_phys,
+                                                 GFP_KERNEL);
+       if (!p_iov->vf2pf_request) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to allocate `vf2pf_request' DMA memory\n");
+               goto free_p_iov;
+       }
+
+       p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                               sizeof(union pfvf_tlvs),
+                                               &p_iov->pf2vf_reply_phys,
+                                               GFP_KERNEL);
+       if (!p_iov->pf2vf_reply) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to allocate `pf2vf_reply' DMA memory\n");
+               goto free_vf2pf_request;
+       }
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_IOV,
+                  "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+                  p_iov->vf2pf_request,
+                  (u64) p_iov->vf2pf_request_phys,
+                  p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+       /* Allocate Bulletin board */
+       p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+       p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                                   p_iov->bulletin.size,
+                                                   &p_iov->bulletin.phys,
+                                                   GFP_KERNEL);
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+                  p_iov->bulletin.p_virt,
+                  (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+       mutex_init(&p_iov->mutex);
+
+       p_hwfn->vf_iov_info = p_iov;
+
+       p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+       return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(union vfpf_tlvs),
+                         p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+       kfree(p_iov);
+
+       return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+                       u8 rx_qid,
+                       u16 sb,
+                       u8 sb_index,
+                       u16 bd_max_bytes,
+                       dma_addr_t bd_chain_phys_addr,
+                       dma_addr_t cqe_pbl_addr,
+                       u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_start_queue_resp_tlv *resp;
+       struct vfpf_start_rxq_tlv *req;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+       req->rx_qid = rx_qid;
+       req->cqe_pbl_addr = cqe_pbl_addr;
+       req->cqe_pbl_size = cqe_pbl_size;
+       req->rxq_addr = bd_chain_phys_addr;
+       req->hw_sb = sb;
+       req->sb_index = sb_index;
+       req->bd_max_bytes = bd_max_bytes;
+       req->stat_id = -1;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->queue_start;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       /* Learn the address of the producer from the response */
+       if (pp_prod) {
+               u32 init_prod_val = 0;
+
+               *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+                          rx_qid, *pp_prod, resp->offset);
+
+               /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+               __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+                                 (u32 *)&init_prod_val);
+       }
+
+       return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_stop_rxqs_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+       req->rx_qid = rx_qid;
+       req->num_rxqs = 1;
+       req->cqe_completion = cqe_completion;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+                       u16 tx_queue_id,
+                       u16 sb,
+                       u8 sb_index,
+                       dma_addr_t pbl_addr,
+                       u16 pbl_size, void __iomem **pp_doorbell)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_start_queue_resp_tlv *resp;
+       struct vfpf_start_txq_tlv *req;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+       req->tx_qid = tx_queue_id;
+
+       /* Tx */
+       req->pbl_addr = pbl_addr;
+       req->pbl_size = pbl_size;
+       req->hw_sb = sb;
+       req->sb_index = sb_index;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->queue_start;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       if (pp_doorbell) {
+               *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+                          tx_queue_id, *pp_doorbell, resp->offset);
+       }
+exit:
+
+       return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_stop_txqs_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+       req->tx_qid = tx_qid;
+       req->num_txqs = 1;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+                         u8 vport_id,
+                         u16 mtu,
+                         u8 inner_vlan_removal,
+                         enum qed_tpa_mode tpa_mode,
+                         u8 max_buffers_per_cqe, u8 only_untagged)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_vport_start_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       int rc, i;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+       req->mtu = mtu;
+       req->vport_id = vport_id;
+       req->inner_vlan_removal = inner_vlan_removal;
+       req->tpa_mode = tpa_mode;
+       req->max_buffers_per_cqe = max_buffers_per_cqe;
+       req->only_untagged = only_untagged;
+
+       /* status blocks */
+       for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+               if (p_hwfn->sbs_info[i])
+                       req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+                      sizeof(struct vfpf_first_tlv));
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+                                 struct qed_sp_vport_update_params *p_data,
+                                 u16 tlv)
+{
+       switch (tlv) {
+       case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+               return !!(p_data->update_vport_active_rx_flg ||
+                         p_data->update_vport_active_tx_flg);
+       case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+               return !!p_data->update_tx_switching_flg;
+       case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+               return !!p_data->update_inner_vlan_removal_flg;
+       case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+               return !!p_data->update_accept_any_vlan_flg;
+       case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+               return !!p_data->update_approx_mcast_flg;
+       case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+               return !!(p_data->accept_flags.update_rx_mode_config ||
+                         p_data->accept_flags.update_tx_mode_config);
+       case CHANNEL_TLV_VPORT_UPDATE_RSS:
+               return !!p_data->rss_params;
+       case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+               return !!p_data->sge_tpa_params;
+       default:
+               DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+                       tlv);
+               return false;
+       }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+                                 struct qed_sp_vport_update_params *p_data)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *p_resp;
+       u16 tlv;
+
+       for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+            tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+               if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+                       continue;
+
+               p_resp = (struct pfvf_def_resp_tlv *)
+                        qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+                                                 tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "TLV[%d] Configuration %s\n",
+                                  tlv,
+                                  (p_resp && p_resp->hdr.status) ? "succeeded"
+                                                                 : "failed");
+       }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_update_params *p_params)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_vport_update_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       u8 update_rx, update_tx;
+       u32 resp_size = 0;
+       u16 size, tlv;
+       int rc;
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       resp_size = sizeof(*resp);
+
+       update_rx = p_params->update_vport_active_rx_flg;
+       update_tx = p_params->update_vport_active_tx_flg;
+
+       /* clear mailbox and prep header tlv */
+       qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+       /* Prepare extended tlvs */
+       if (update_rx || update_tx) {
+               struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+               size = sizeof(struct vfpf_vport_update_activate_tlv);
+               p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+                                       CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+                                       size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (update_rx) {
+                       p_act_tlv->update_rx = update_rx;
+                       p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+               }
+
+               if (update_tx) {
+                       p_act_tlv->update_tx = update_tx;
+                       p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+               }
+       }
+
+       if (p_params->update_tx_switching_flg) {
+               struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+               size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+               tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+               p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+                                             tlv, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+       }
+
+       if (p_params->update_approx_mcast_flg) {
+               struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+               size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+               p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+                                         CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               memcpy(p_mcast_tlv->bins, p_params->bins,
+                      sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+       }
+
+       update_rx = p_params->accept_flags.update_rx_mode_config;
+       update_tx = p_params->accept_flags.update_tx_mode_config;
+
+       if (update_rx || update_tx) {
+               struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+               size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+               p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (update_rx) {
+                       p_accept_tlv->update_rx_mode = update_rx;
+                       p_accept_tlv->rx_accept_filter =
+                           p_params->accept_flags.rx_accept_filter;
+               }
+
+               if (update_tx) {
+                       p_accept_tlv->update_tx_mode = update_tx;
+                       p_accept_tlv->tx_accept_filter =
+                           p_params->accept_flags.tx_accept_filter;
+               }
+       }
+
+       if (p_params->rss_params) {
+               struct qed_rss_params *rss_params = p_params->rss_params;
+               struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+               size = sizeof(struct vfpf_vport_update_rss_tlv);
+               p_rss_tlv = qed_add_tlv(p_hwfn,
+                                       &p_iov->offset,
+                                       CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (rss_params->update_rss_config)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_CONFIG_FLAG;
+               if (rss_params->update_rss_capabilities)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_CAPS_FLAG;
+               if (rss_params->update_rss_ind_table)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+               if (rss_params->update_rss_key)
+                       p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+               p_rss_tlv->rss_enable = rss_params->rss_enable;
+               p_rss_tlv->rss_caps = rss_params->rss_caps;
+               p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+               memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+                      sizeof(rss_params->rss_ind_table));
+               memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+                      sizeof(rss_params->rss_key));
+       }
+
+       if (p_params->update_accept_any_vlan_flg) {
+               struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+               size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+               p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+               p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+               p_any_vlan_tlv->update_accept_any_vlan_flg =
+                   p_params->update_accept_any_vlan_flg;
+       }
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+       return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp;
+       struct vfpf_first_tlv *req;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EAGAIN;
+
+       p_hwfn->b_int_enabled = 0;
+
+       return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp;
+       struct vfpf_first_tlv *req;
+       u32 size;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+       if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+               rc = -EAGAIN;
+
+       p_hwfn->b_int_enabled = 0;
+
+       if (p_iov->vf2pf_request)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(union vfpf_tlvs),
+                                 p_iov->vf2pf_request,
+                                 p_iov->vf2pf_request_phys);
+       if (p_iov->pf2vf_reply)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(union pfvf_tlvs),
+                                 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+       if (p_iov->bulletin.p_virt) {
+               size = sizeof(struct qed_bulletin_content);
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 size,
+                                 p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+       }
+
+       kfree(p_hwfn->vf_iov_info);
+       p_hwfn->vf_iov_info = NULL;
+
+       return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+                           struct qed_filter_mcast *p_filter_cmd)
+{
+       struct qed_sp_vport_update_params sp_params;
+       int i;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.update_approx_mcast_flg = 1;
+
+       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+               for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+                       u32 bit;
+
+                       bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+                       __set_bit(bit, sp_params.bins);
+               }
+       }
+
+       qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+                          struct qed_filter_ucast *p_ucast)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_ucast_filter_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+       req->opcode = (u8) p_ucast->opcode;
+       req->type = (u8) p_ucast->type;
+       memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+       req->vlan = p_ucast->vlan;
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EAGAIN;
+
+       return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+                      sizeof(struct vfpf_first_tlv));
+
+       /* add list termination tlv */
+       qed_add_tlv(p_hwfn, &p_iov->offset,
+                   CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+       rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+       if (!p_iov) {
+               DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+               return 0;
+       }
+
+       return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+       struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct qed_bulletin_content shadow;
+       u32 crc, crc_size;
+
+       crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+       *p_change = 0;
+
+       /* Need to guarantee PF is not in the middle of writing it */
+       memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+       /* If version did not update, no need to do anything */
+       if (shadow.version == p_iov->bulletin_shadow.version)
+               return 0;
+
+       /* Verify the bulletin we see is valid */
+       crc = crc32(0, (u8 *)&shadow + crc_size,
+                   p_iov->bulletin.size - crc_size);
+       if (crc != shadow.crc)
+               return -EAGAIN;
+
+       /* Set the shadow bulletin and process it */
+       memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "Read a bulletin update %08x\n", shadow.version);
+
+       *p_change = 1;
+
+       return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                             struct qed_mcp_link_params *p_params,
+                             struct qed_bulletin_content *p_bulletin)
+{
+       memset(p_params, 0, sizeof(*p_params));
+
+       p_params->speed.autoneg = p_bulletin->req_autoneg;
+       p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+       p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+       p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+       p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+       p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+       p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                           struct qed_mcp_link_params *params)
+{
+       __qed_vf_get_link_params(p_hwfn, params,
+                                &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                            struct qed_mcp_link_state *p_link,
+                            struct qed_bulletin_content *p_bulletin)
+{
+       memset(p_link, 0, sizeof(*p_link));
+
+       p_link->link_up = p_bulletin->link_up;
+       p_link->speed = p_bulletin->speed;
+       p_link->full_duplex = p_bulletin->full_duplex;
+       p_link->an = p_bulletin->autoneg;
+       p_link->an_complete = p_bulletin->autoneg_complete;
+       p_link->parallel_detection = p_bulletin->parallel_detection;
+       p_link->pfc_enabled = p_bulletin->pfc_enabled;
+       p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+       p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+       p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+       p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+       p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                          struct qed_mcp_link_state *link)
+{
+       __qed_vf_get_link_state(p_hwfn, link,
+                               &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                           struct qed_mcp_link_capabilities *p_link_caps,
+                           struct qed_bulletin_content *p_bulletin)
+{
+       memset(p_link_caps, 0, sizeof(*p_link_caps));
+       p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                         struct qed_mcp_link_capabilities *p_link_caps)
+{
+       __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+                              &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+       *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+       memcpy(port_mac,
+              p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+       struct qed_vf_iov *p_vf;
+
+       p_vf = p_hwfn->vf_iov_info;
+       *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+       struct qed_bulletin_content *bulletin;
+
+       bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+       if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+               return true;
+
+       /* Forbid VF from changing a MAC enforced by PF */
+       if (ether_addr_equal(bulletin->mac, mac))
+               return false;
+
+       return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+                                   u8 *dst_mac, u8 *p_is_forced)
+{
+       struct qed_bulletin_content *bulletin;
+
+       bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+       if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+               if (p_is_forced)
+                       *p_is_forced = 1;
+       } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+               if (p_is_forced)
+                       *p_is_forced = 0;
+       } else {
+               return false;
+       }
+
+       ether_addr_copy(dst_mac, bulletin->mac);
+
+       return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+                          u16 *fw_major, u16 *fw_minor,
+                          u16 *fw_rev, u16 *fw_eng)
+{
+       struct pf_vf_pfdev_info *info;
+
+       info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+       *fw_major = info->fw_major;
+       *fw_minor = info->fw_minor;
+       *fw_rev = info->fw_rev;
+       *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+       struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+       u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+       void *cookie = hwfn->cdev->ops_cookie;
+
+       is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+                                                     &is_mac_forced);
+       if (is_mac_exist && is_mac_forced && cookie)
+               ops->force_mac(cookie, mac);
+
+       /* Always update link configuration according to bulletin */
+       qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+       struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+                                            iov_task.work);
+       u8 change = 0;
+
+       if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+               return;
+
+       /* Handle bulletin board changes */
+       qed_vf_read_bulletin(hwfn, &change);
+       if (change)
+               qed_handle_bulletin_change(hwfn);
+
+       /* As VF is polling bulletin board, need to constantly re-schedule */
+       queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644 (file)
index 0000000..b23ce58
--- /dev/null
@@ -0,0 +1,999 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+       u8 num_rxqs;
+       u8 num_txqs;
+       u8 num_sbs;
+       u8 num_mac_filters;
+       u8 num_vlan_filters;
+       u8 num_mc_filters;
+       u16 padding;
+};
+
+struct hw_sb_info {
+       u16 hw_sb_id;
+       u8 sb_qid;
+       u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE                 1024
+
+enum {
+       PFVF_STATUS_WAITING,
+       PFVF_STATUS_SUCCESS,
+       PFVF_STATUS_FAILURE,
+       PFVF_STATUS_NOT_SUPPORTED,
+       PFVF_STATUS_NO_RESOURCE,
+       PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+       u16 type;
+       u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+       struct channel_tlv tl;
+       u32 padding;
+       u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+       struct channel_tlv tl;
+       u8 status;
+       u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+       struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+       struct channel_tlv tl;
+       u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE      (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G          (1 << 1) /* VF can support 100g */
+               u64 capabilities;
+               u8 fw_major;
+               u8 fw_minor;
+               u8 fw_revision;
+               u8 fw_engineering;
+               u32 driver_version;
+               u16 opaque_fid; /* ME register value */
+               u8 os_type;     /* VFPF_ACQUIRE_OS_* value */
+               u8 eth_fp_hsi_major;
+               u8 eth_fp_hsi_minor;
+               u8 padding[3];
+       } vfdev_info;
+
+       struct vf_pf_resc_request resc_request;
+
+       u64 bulletin_addr;
+       u32 bulletin_size;
+       u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+       struct channel_tlv tl;
+
+       u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
+
+       u8 rss_enable;
+       u8 rss_caps;
+       u8 rss_table_size_log;  /* The table size is 2 ^ rss_table_size_log */
+       u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+       u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+       u32 address;
+       u32 len;
+};
+
+struct pfvf_stats_info {
+       struct pfvf_storm_stats mstats;
+       struct pfvf_storm_stats pstats;
+       struct pfvf_storm_stats tstats;
+       struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+       struct pfvf_tlv hdr;
+
+       struct pf_vf_pfdev_info {
+               u32 chip_num;
+               u32 mfw_ver;
+
+               u16 fw_major;
+               u16 fw_minor;
+               u16 fw_rev;
+               u16 fw_eng;
+
+               u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED      BIT(0)
+#define PFVF_ACQUIRE_CAP_100G                  BIT(1)  /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE      BIT(2)
+
+               u16 db_size;
+               u8 indices_per_sb;
+               u8 os_type;
+
+               /* These should match the PF's qed_dev values */
+               u16 chip_rev;
+               u8 dev_type;
+
+               u8 padding;
+
+               struct pfvf_stats_info stats_info;
+
+               u8 port_mac[ETH_ALEN];
+
+               /* It's possible PF had to configure an older fastpath HSI
+                * [in case VF is newer than PF]. This is communicated back
+                * to the VF. It can also be used in case of error due to
+                * non-matching versions to shed light in VF about failure.
+                */
+               u8 major_fp_hsi;
+               u8 minor_fp_hsi;
+       } pfdev_info;
+
+       struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF         16
+#define PFVF_MAX_SBS_PER_VF            16
+               struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+               u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+               u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+               u8 num_rxqs;
+               u8 num_txqs;
+               u8 num_sbs;
+               u8 num_mac_filters;
+               u8 num_vlan_filters;
+               u8 num_mc_filters;
+               u8 padding[2];
+       } resc;
+
+       u32 bulletin_size;
+       u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+       struct pfvf_tlv hdr;
+       u32 offset;             /* offset to consumer/producer of queue */
+       u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       /* physical addresses */
+       u64 rxq_addr;
+       u64 deprecated_sge_addr;
+       u64 cqe_pbl_addr;
+
+       u16 cqe_pbl_size;
+       u16 hw_sb;
+       u16 rx_qid;
+       u16 hc_rate;            /* desired interrupts per sec. */
+
+       u16 bd_max_bytes;
+       u16 stat_id;
+       u8 sb_index;
+       u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       /* physical addresses */
+       u64 pbl_addr;
+       u16 pbl_size;
+       u16 stat_id;
+       u16 tx_qid;
+       u16 hw_sb;
+
+       u32 flags;              /* VFPF_QUEUE_FLG_X flags */
+       u16 hc_rate;            /* desired interrupts per sec. */
+       u8 sb_index;
+       u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u16 rx_qid;
+       u8 num_rxqs;
+       u8 cqe_completion;
+       u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u16 tx_qid;
+       u8 num_txqs;
+       u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+       u16 rx_qid;
+       u8 num_rxqs;
+       u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
+
+       u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+       u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
+#define VFPF_Q_FILTER_SET_MAC           0x100  /* set/clear */
+
+       u8 mac[ETH_ALEN];
+       u16 vlan_tag;
+
+       u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+       u32 tpa_mode;
+       u16 dep1;
+       u16 mtu;
+
+       u8 vport_id;
+       u8 inner_vlan_removal;
+
+       u8 only_untagged;
+       u8 max_buffers_per_cqe;
+
+       u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+       struct channel_tlv tl;
+       u8 update_rx;
+       u8 update_tx;
+       u8 active_rx;
+       u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+       struct channel_tlv tl;
+       u8 tx_switching;
+       u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+       struct channel_tlv tl;
+       u8 remove_vlan;
+       u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+       struct channel_tlv tl;
+       u8 padding[4];
+
+       u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+       struct channel_tlv tl;
+       u8 update_rx_mode;
+       u8 update_tx_mode;
+       u8 rx_accept_filter;
+       u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+       struct channel_tlv tl;
+       u8 update_accept_any_vlan_flg;
+       u8 accept_any_vlan;
+
+       u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+       struct channel_tlv tl;
+
+       u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG          BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG          BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG                BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG   BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG      BIT(4)
+
+       u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG        BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG                BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG     BIT(2)
+
+       u8 max_buffers_per_cqe;
+
+       u16 deprecated_sge_buff_size;
+       u16 tpa_max_size;
+       u16 tpa_min_size_to_start;
+       u16 tpa_min_size_to_cont;
+
+       u8 tpa_max_aggs_num;
+       u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+       struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u8 opcode;
+       u8 type;
+
+       u8 mac[ETH_ALEN];
+
+       u16 vlan;
+       u16 padding[3];
+};
+
+struct tlv_buffer_size {
+       u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+       struct vfpf_first_tlv first_tlv;
+       struct vfpf_acquire_tlv acquire;
+       struct vfpf_start_rxq_tlv start_rxq;
+       struct vfpf_start_txq_tlv start_txq;
+       struct vfpf_stop_rxqs_tlv stop_rxqs;
+       struct vfpf_stop_txqs_tlv stop_txqs;
+       struct vfpf_update_rxq_tlv update_rxq;
+       struct vfpf_vport_start_tlv start_vport;
+       struct vfpf_vport_update_tlv vport_update;
+       struct vfpf_ucast_filter_tlv ucast_filter;
+       struct channel_list_end_tlv list_end;
+       struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+       struct pfvf_def_resp_tlv default_resp;
+       struct pfvf_acquire_resp_tlv acquire_resp;
+       struct tlv_buffer_size tlv_buf_size;
+       struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+       /* Alert the VF that a forced MAC was set by the PF */
+       MAC_ADDR_FORCED = 0,
+       /* Alert the VF that a forced VLAN was set by the PF */
+       VLAN_ADDR_FORCED = 2,
+
+       /* Indicate that `default_only_untagged' contains actual data */
+       VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+       VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+       /* Alert the VF that suggested mac was sent by the PF.
+        * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+        */
+       VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+       /* crc of structure to ensure is not in mid-update */
+       u32 crc;
+
+       u32 version;
+
+       /* bitmap indicating which fields hold valid values */
+       u64 valid_bitmap;
+
+       /* used for MAC_ADDR or MAC_ADDR_FORCED */
+       u8 mac[ETH_ALEN];
+
+       /* If valid, 1 => only untagged Rx if no vlan is configured */
+       u8 default_only_untagged;
+       u8 padding;
+
+       /* The following is a 'copy' of qed_mcp_link_state,
+        * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+        * possible the structs will increase further along the road we cannot
+        * have it here; Instead we need to have all of its fields.
+        */
+       u8 req_autoneg;
+       u8 req_autoneg_pause;
+       u8 req_forced_rx;
+       u8 req_forced_tx;
+       u8 padding2[4];
+
+       u32 req_adv_speed;
+       u32 req_forced_speed;
+       u32 req_loopback;
+       u32 padding3;
+
+       u8 link_up;
+       u8 full_duplex;
+       u8 autoneg;
+       u8 autoneg_complete;
+       u8 parallel_detection;
+       u8 pfc_enabled;
+       u8 partner_tx_flow_ctrl_en;
+       u8 partner_rx_flow_ctrl_en;
+       u8 partner_adv_pause;
+       u8 sfp_tx_fault;
+       u8 padding4[6];
+
+       u32 speed;
+       u32 partner_adv_speed;
+
+       u32 capability_speed;
+
+       /* Forced vlan */
+       u16 pvid;
+       u16 padding5;
+};
+
+struct qed_bulletin {
+       dma_addr_t phys;
+       struct qed_bulletin_content *p_virt;
+       u32 size;
+};
+
+enum {
+       CHANNEL_TLV_NONE,       /* ends tlv sequence */
+       CHANNEL_TLV_ACQUIRE,
+       CHANNEL_TLV_VPORT_START,
+       CHANNEL_TLV_VPORT_UPDATE,
+       CHANNEL_TLV_VPORT_TEARDOWN,
+       CHANNEL_TLV_START_RXQ,
+       CHANNEL_TLV_START_TXQ,
+       CHANNEL_TLV_STOP_RXQS,
+       CHANNEL_TLV_STOP_TXQS,
+       CHANNEL_TLV_UPDATE_RXQ,
+       CHANNEL_TLV_INT_CLEANUP,
+       CHANNEL_TLV_CLOSE,
+       CHANNEL_TLV_RELEASE,
+       CHANNEL_TLV_LIST_END,
+       CHANNEL_TLV_UCAST_FILTER,
+       CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+       CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+       CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+       CHANNEL_TLV_VPORT_UPDATE_MCAST,
+       CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+       CHANNEL_TLV_VPORT_UPDATE_RSS,
+       CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+       CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+       CHANNEL_TLV_MAX,
+
+       /* Required for iterating over vport-update tlvs.
+        * Will break in case non-sequential vport-update tlvs.
+        */
+       CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+       union vfpf_tlvs *vf2pf_request;
+       dma_addr_t vf2pf_request_phys;
+       union pfvf_tlvs *pf2vf_reply;
+       dma_addr_t pf2vf_reply_phys;
+
+       /* Should be taken whenever the mailbox buffers are accessed */
+       struct mutex mutex;
+       u8 *offset;
+
+       /* Bulletin Board */
+       struct qed_bulletin bulletin;
+       struct qed_bulletin_content bulletin_shadow;
+
+       /* we set aside a copy of the acquire response */
+       struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                           struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                          struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                         struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+                                u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+                          u16 *fw_major, u16 *fw_minor,
+                          u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ *      sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid                   - zero based within the VF
+ * @param rx_queue_id           - zero based within the VF
+ * @param sb                    - VF status block for this queue
+ * @param sb_index              - Index within the status block
+ * @param bd_max_bytes          - maximum number of bytes per bd
+ * @param bd_chain_phys_addr    - physical address of bd chain
+ * @param cqe_pbl_addr          - physical address of pbl
+ * @param cqe_pbl_size          - pbl size
+ * @param pp_prod               - pointer to the producer to be
+ *                               used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+                       u8 rx_queue_id,
+                       u16 sb,
+                       u8 sb_index,
+                       u16 bd_max_bytes,
+                       dma_addr_t bd_chain_phys_addr,
+                       dma_addr_t cqe_pbl_addr,
+                       u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ *        PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id           - zero based within the VF
+ * @param sb                    - status block for this queue
+ * @param sb_index              - index within the status block
+ * @param bd_chain_phys_addr    - physical address of tx chain
+ * @param pp_doorbell           - pointer to address to which to
+ *                      write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+                       u16 tx_queue_id,
+                       u16 sb,
+                       u8 sb_index,
+                       dma_addr_t pbl_addr,
+                       u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+                      u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ *        sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+                         u8 vport_id,
+                         u16 mtu,
+                         u8 inner_vlan_removal,
+                         enum qed_tpa_mode tpa_mode,
+                         u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+                          struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+                           struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                             struct qed_mcp_link_params *p_params,
+                             struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                            struct qed_mcp_link_state *p_link,
+                            struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                           struct qed_mcp_link_capabilities *p_link_caps,
+                           struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                                         struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                                        struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                    struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+                                              u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+       return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+                                        u16 *fw_major, u16 *fw_minor,
+                                        u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+                                     u8 rx_queue_id,
+                                     u16 sb,
+                                     u8 sb_index,
+                                     u16 bd_max_bytes,
+                                     dma_addr_t bd_chain_phys_adr,
+                                     dma_addr_t cqe_pbl_addr,
+                                     u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+                                     u16 tx_queue_id,
+                                     u16 sb,
+                                     u8 sb_index,
+                                     dma_addr_t pbl_addr,
+                                     u16 pbl_size, void __iomem **pp_doorbell)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+                                    u16 rx_qid, bool cqe_completion)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+       return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+                      struct qed_sp_vport_update_params *p_params)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+       return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+       return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+                                       u8 vport_id,
+                                       u16 mtu,
+                                       u8 inner_vlan_removal,
+                                       enum qed_tpa_mode tpa_mode,
+                                       u8 max_buffers_per_cqe,
+                                       u8 only_untagged)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+       return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+                                        struct qed_filter_ucast *p_param)
+{
+       return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+                                         struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+       return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                                           struct qed_mcp_link_params
+                                           *p_params,
+                                           struct qed_bulletin_content
+                                           *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+                                          struct qed_mcp_link_state *p_link,
+                                          struct qed_bulletin_content
+                                          *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+                      struct qed_mcp_link_capabilities *p_link_caps,
+                      struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644 (file)
index 0000000..74a4985
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644 (file)
index 0000000..02b06d4
--- /dev/null
@@ -0,0 +1,342 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION             8
+#define QEDE_MINOR_VERSION             10
+#define QEDE_REVISION_VERSION          1
+#define QEDE_ENGINEERING_VERSION       20
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+               __stringify(QEDE_MINOR_VERSION) "."             \
+               __stringify(QEDE_REVISION_VERSION) "."          \
+               __stringify(QEDE_ENGINEERING_VERSION)
+
+#define DRV_MODULE_SYM         qede
+
+struct qede_stats {
+       u64 no_buff_discards;
+       u64 rx_ucast_bytes;
+       u64 rx_mcast_bytes;
+       u64 rx_bcast_bytes;
+       u64 rx_ucast_pkts;
+       u64 rx_mcast_pkts;
+       u64 rx_bcast_pkts;
+       u64 mftag_filter_discards;
+       u64 mac_filter_discards;
+       u64 tx_ucast_bytes;
+       u64 tx_mcast_bytes;
+       u64 tx_bcast_bytes;
+       u64 tx_ucast_pkts;
+       u64 tx_mcast_pkts;
+       u64 tx_bcast_pkts;
+       u64 tx_err_drop_pkts;
+       u64 coalesced_pkts;
+       u64 coalesced_events;
+       u64 coalesced_aborts_num;
+       u64 non_coalesced_pkts;
+       u64 coalesced_bytes;
+
+       /* port */
+       u64 rx_64_byte_packets;
+       u64 rx_65_to_127_byte_packets;
+       u64 rx_128_to_255_byte_packets;
+       u64 rx_256_to_511_byte_packets;
+       u64 rx_512_to_1023_byte_packets;
+       u64 rx_1024_to_1518_byte_packets;
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
+       u64 rx_crc_errors;
+       u64 rx_mac_crtl_frames;
+       u64 rx_pause_frames;
+       u64 rx_pfc_frames;
+       u64 rx_align_errors;
+       u64 rx_carrier_errors;
+       u64 rx_oversize_packets;
+       u64 rx_jabbers;
+       u64 rx_undersize_packets;
+       u64 rx_fragments;
+       u64 tx_64_byte_packets;
+       u64 tx_65_to_127_byte_packets;
+       u64 tx_128_to_255_byte_packets;
+       u64 tx_256_to_511_byte_packets;
+       u64 tx_512_to_1023_byte_packets;
+       u64 tx_1024_to_1518_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_vlan {
+       struct list_head list;
+       u16 vid;
+       bool configured;
+};
+
+struct qede_dev {
+       struct qed_dev                  *cdev;
+       struct net_device               *ndev;
+       struct pci_dev                  *pdev;
+
+       u32                             dp_module;
+       u8                              dp_level;
+
+       u32 flags;
+#define QEDE_FLAG_IS_VF        BIT(0)
+#define IS_VF(edev)    (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
+       const struct qed_eth_ops        *ops;
+
+       struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+                                (edev)->dev_info.num_tc)
+
+       struct qede_fastpath            *fp_array;
+       u16                             req_rss;
+       u16                             num_rss;
+       u8                              num_tc;
+#define QEDE_RSS_CNT(edev)             ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev)             ((edev)->num_rss *      \
+                                        (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx)     ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx)      ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx)    \
+       (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+                                                       (edev), (txqidx))])
+
+       struct qed_int_info             int_info;
+       unsigned char                   primary_mac[ETH_ALEN];
+
+       /* Smaller private varaiant of the RTNL lock */
+       struct mutex                    qede_lock;
+       u32                             state; /* Protected by qede_lock */
+       u16                             rx_buf_size;
+       u32                             rx_copybreak;
+
+       /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD                   (ETH_HLEN + 8 + 8)
+       /* Max supported alignment is 256 (8 shift)
+        * minimal alignment shift 6 is optimal for 57xxx HW performance
+        */
+#define QEDE_RX_ALIGN_SHIFT            max(6, min(8, L1_CACHE_SHIFT))
+       /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+        * at the end of skb->data, to avoid wasting a full cache line.
+        * This reduces memory use (skb->truesize).
+        */
+#define QEDE_FW_RX_ALIGN_END                                   \
+       max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT,                  \
+             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+       struct qede_stats               stats;
+#define QEDE_RSS_INDIR_INITED  BIT(0)
+#define QEDE_RSS_KEY_INITED    BIT(1)
+#define QEDE_RSS_CAPS_INITED   BIT(2)
+       u32 rss_params_inited; /* bit-field to track initialized rss params */
+       struct qed_update_vport_rss_params      rss_params;
+       u16                     q_num_rx_buffers; /* Must be a power of two */
+       u16                     q_num_tx_buffers; /* Must be a power of two */
+
+       bool gro_disable;
+       struct list_head vlan_list;
+       u16 configured_vlans;
+       u16 non_configured_vlans;
+       bool accept_any_vlan;
+       struct delayed_work             sp_task;
+       unsigned long                   sp_flags;
+       u16                             vxlan_dst_port;
+       u16                             geneve_dst_port;
+};
+
+enum QEDE_STATE {
+       QEDE_STATE_CLOSED,
+       QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo)               ((((u64)(hi)) << 32) + (lo))
+
+#define        MAX_NUM_TC      8
+#define        MAX_NUM_PRI     8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+       struct page *data;
+       dma_addr_t mapping;
+       unsigned int page_offset;
+};
+
+enum qede_agg_state {
+       QEDE_AGG_STATE_NONE  = 0,
+       QEDE_AGG_STATE_START = 1,
+       QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+       struct sw_rx_data replace_buf;
+       dma_addr_t replace_buf_mapping;
+       struct sw_rx_data start_buf;
+       dma_addr_t start_buf_mapping;
+       struct eth_fast_path_rx_tpa_start_cqe start_cqe;
+       enum qede_agg_state agg_state;
+       struct sk_buff *skb;
+       int frag_id;
+       u16 vlan_tag;
+};
+
+struct qede_rx_queue {
+       __le16                  *hw_cons_ptr;
+       struct sw_rx_data       *sw_rx_ring;
+       u16                     sw_rx_cons;
+       u16                     sw_rx_prod;
+       struct qed_chain        rx_bd_ring;
+       struct qed_chain        rx_comp_ring;
+       void __iomem            *hw_rxq_prod_addr;
+
+       /* GRO */
+       struct qede_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
+       int                     rx_buf_size;
+       unsigned int            rx_buf_seg_size;
+
+       u16                     num_rx_buffers;
+       u16                     rxq_id;
+
+       u64                     rx_hw_errors;
+       u64                     rx_alloc_errors;
+       u64                     rx_ip_frags;
+};
+
+union db_prod {
+       struct eth_db_data data;
+       u32             raw;
+};
+
+struct sw_tx_bd {
+       struct sk_buff *skb;
+       u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD              BIT(0)
+};
+
+struct qede_tx_queue {
+       int                     index; /* Queue index */
+       __le16                  *hw_cons_ptr;
+       struct sw_tx_bd         *sw_tx_ring;
+       u16                     sw_tx_cons;
+       u16                     sw_tx_prod;
+       struct qed_chain        tx_pbl;
+       void __iomem            *doorbell_addr;
+       union db_prod           tx_db;
+
+       u16                     num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd)              HILO_U64(le32_to_cpu((bd)->addr.hi), \
+                                                le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len)                          \
+       do {                                                            \
+               (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr));      \
+               (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr));      \
+               (bd)->nbytes = cpu_to_le16(len);                        \
+       } while (0)
+#define BD_UNMAP_LEN(bd)               (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+       struct qede_dev *edev;
+       u8                      rss_id;
+       struct napi_struct      napi;
+       struct qed_sb_info      *sb_info;
+       struct qede_rx_queue    *rxq;
+       struct qede_tx_queue    *txqs;
+
+#define VEC_NAME_SIZE  (sizeof(((struct net_device *)0)->name) + 8)
+       char    name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN             0
+#define XMIT_L4_CSUM           BIT(0)
+#define XMIT_LSO               BIT(1)
+#define XMIT_ENC               BIT(2)
+
+#define QEDE_CSUM_ERROR                        BIT(0)
+#define QEDE_CSUM_UNNECESSARY          BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY     BIT(2)
+
+#define QEDE_SP_RX_MODE                        1
+#define QEDE_SP_VXLAN_PORT_CONFIG      2
+#define QEDE_SP_GENEVE_PORT_CONFIG     3
+
+union qede_reload_args {
+       u16 mtu;
+};
+
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+                void (*func)(struct qede_dev *edev,
+                             union qede_reload_args *args),
+                union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+                            u8 count);
+
+#define RX_RING_SIZE_POW       13
+#define RX_RING_SIZE           ((u16)BIT(RX_RING_SIZE_POW))
+#define NUM_RX_BDS_MAX         (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN         128
+#define NUM_RX_BDS_DEF         NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW       13
+#define TX_RING_SIZE           ((u16)BIT(TX_RING_SIZE_POW))
+#define NUM_TX_BDS_MAX         (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN         128
+#define NUM_TX_BDS_DEF         NUM_TX_BDS_MAX
+
+#define QEDE_MIN_PKT_LEN       64
+#define QEDE_RX_HDR_SIZE       256
+#define        for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644 (file)
index 0000000..03e8c02
--- /dev/null
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+                                    u8 *perm_addr)
+{
+       memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+                                   u8 *prio_type, u8 *pgid, u8 *bw_pct,
+                                   u8 *up_map)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+                                    pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+                                    int pgid, u8 *bw_pct)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+                                   u8 *prio_type, u8 *pgid, u8 *bw_pct,
+                                   u8 *up_map)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+                                    up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+                                    int pgid, u8 *bw_pct)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+                                u8 *setting)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+                                u8 setting)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+                                   u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+                                           bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+                                   u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+                                           bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+                                    u8 bw_pct)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+                                    u8 bw_pct)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+                            u8 up)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+                               u8 *flags)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+                                     struct dcb_peer_app_info *info,
+                                     u16 *count)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+                                      struct dcb_app *app)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+                                     struct cee_pfc *pfc)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+                                    struct cee_pg *pg)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+                                 struct ieee_pfc *pfc)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+                                 struct ieee_pfc *pfc)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+                                 struct ieee_ets *ets)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+                                 struct ieee_ets *ets)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+                                 struct dcb_app *app)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+                                 struct dcb_app *app)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+                                      struct ieee_pfc *pfc)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+                                      struct ieee_ets *ets)
+{
+       struct qede_dev *edev = netdev_priv(netdev);
+
+       return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+       .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+       .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+       .ieee_getets = qede_dcbnl_ieee_getets,
+       .ieee_setets = qede_dcbnl_ieee_setets,
+       .ieee_getapp = qede_dcbnl_ieee_getapp,
+       .ieee_setapp = qede_dcbnl_ieee_setapp,
+       .getdcbx = qede_dcbnl_getdcbx,
+       .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+       .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+       .getstate = qede_dcbnl_getstate,
+       .setstate = qede_dcbnl_setstate,
+       .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+       .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+       .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+       .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+       .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+       .getpfccfg = qede_dcbnl_getpfccfg,
+       .setpfccfg = qede_dcbnl_setpfccfg,
+       .getcap = qede_dcbnl_getcap,
+       .getnumtcs = qede_dcbnl_getnumtcs,
+       .getpfcstate = qede_dcbnl_getpfcstate,
+       .getapp = qede_dcbnl_getapp,
+       .getdcbx = qede_dcbnl_getdcbx,
+       .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+       .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+       .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+       .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+       .setall = qede_dcbnl_setall,
+       .setnumtcs = qede_dcbnl_setnumtcs,
+       .setpfcstate = qede_dcbnl_setpfcstate,
+       .setapp = qede_dcbnl_setapp,
+       .setdcbx = qede_dcbnl_setdcbx,
+       .setfeatcfg = qede_dcbnl_setfeatcfg,
+       .getfeatcfg = qede_dcbnl_getfeatcfg,
+       .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+       .peer_getapptable = qede_dcbnl_peer_getapptable,
+       .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+       .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+       dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644 (file)
index 0000000..f8492ca
--- /dev/null
@@ -0,0 +1,1294 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+        {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name)                _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name)           _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+        (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+        {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
+static const struct {
+       u64 offset;
+       char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+       QEDE_RQSTAT(rx_hw_errors),
+       QEDE_RQSTAT(rx_alloc_errors),
+       QEDE_RQSTAT(rx_ip_frags),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+       (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+                   qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+       u64 offset;
+       char string[ETH_GSTRING_LEN];
+       bool pf_only;
+} qede_stats_arr[] = {
+       QEDE_STAT(rx_ucast_bytes),
+       QEDE_STAT(rx_mcast_bytes),
+       QEDE_STAT(rx_bcast_bytes),
+       QEDE_STAT(rx_ucast_pkts),
+       QEDE_STAT(rx_mcast_pkts),
+       QEDE_STAT(rx_bcast_pkts),
+
+       QEDE_STAT(tx_ucast_bytes),
+       QEDE_STAT(tx_mcast_bytes),
+       QEDE_STAT(tx_bcast_bytes),
+       QEDE_STAT(tx_ucast_pkts),
+       QEDE_STAT(tx_mcast_pkts),
+       QEDE_STAT(tx_bcast_pkts),
+
+       QEDE_PF_STAT(rx_64_byte_packets),
+       QEDE_PF_STAT(rx_65_to_127_byte_packets),
+       QEDE_PF_STAT(rx_128_to_255_byte_packets),
+       QEDE_PF_STAT(rx_256_to_511_byte_packets),
+       QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+       QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+       QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+       QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+       QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+       QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+       QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+       QEDE_PF_STAT(tx_64_byte_packets),
+       QEDE_PF_STAT(tx_65_to_127_byte_packets),
+       QEDE_PF_STAT(tx_128_to_255_byte_packets),
+       QEDE_PF_STAT(tx_256_to_511_byte_packets),
+       QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+       QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+       QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+       QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+       QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+       QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+       QEDE_PF_STAT(rx_mac_crtl_frames),
+       QEDE_PF_STAT(tx_mac_ctrl_frames),
+       QEDE_PF_STAT(rx_pause_frames),
+       QEDE_PF_STAT(tx_pause_frames),
+       QEDE_PF_STAT(rx_pfc_frames),
+       QEDE_PF_STAT(tx_pfc_frames),
+
+       QEDE_PF_STAT(rx_crc_errors),
+       QEDE_PF_STAT(rx_align_errors),
+       QEDE_PF_STAT(rx_carrier_errors),
+       QEDE_PF_STAT(rx_oversize_packets),
+       QEDE_PF_STAT(rx_jabbers),
+       QEDE_PF_STAT(rx_undersize_packets),
+       QEDE_PF_STAT(rx_fragments),
+       QEDE_PF_STAT(tx_lpi_entry_count),
+       QEDE_PF_STAT(tx_total_collisions),
+       QEDE_PF_STAT(brb_truncates),
+       QEDE_PF_STAT(brb_discards),
+       QEDE_STAT(no_buff_discards),
+       QEDE_PF_STAT(mftag_filter_discards),
+       QEDE_PF_STAT(mac_filter_discards),
+       QEDE_STAT(tx_err_drop_pkts),
+
+       QEDE_STAT(coalesced_pkts),
+       QEDE_STAT(coalesced_events),
+       QEDE_STAT(coalesced_aborts_num),
+       QEDE_STAT(non_coalesced_pkts),
+       QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+       (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+                       + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+enum {
+       QEDE_PRI_FLAG_CMT,
+       QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+       "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+       QEDE_ETHTOOL_INT_LOOPBACK,
+       QEDE_ETHTOOL_INTERRUPT_TEST,
+       QEDE_ETHTOOL_MEMORY_TEST,
+       QEDE_ETHTOOL_REGISTER_TEST,
+       QEDE_ETHTOOL_CLOCK_TEST,
+       QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+       "Internal loopback (offline)",
+       "Interrupt (online)\t",
+       "Memory (online)\t\t",
+       "Register (online)\t",
+       "Clock (online)\t\t",
+};
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+       int i, j, k;
+
+       for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+               if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+                       continue;
+               strcpy(buf + j * ETH_GSTRING_LEN,
+                      qede_stats_arr[i].string);
+               j++;
+       }
+
+       for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+               strcpy(buf + j * ETH_GSTRING_LEN,
+                      qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               qede_get_strings_stats(edev, buf);
+               break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(buf, qede_private_arr,
+                      ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+               break;
+       case ETH_SS_TEST:
+               memcpy(buf, qede_tests_str_arr,
+                      ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+               break;
+       default:
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "Unsupported stringset 0x%08x\n", stringset);
+       }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+                                  struct ethtool_stats *stats, u64 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int sidx, cnt = 0;
+       int qid;
+
+       qede_fill_by_demand_stats(edev);
+
+       mutex_lock(&edev->qede_lock);
+
+       for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+               if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+                       continue;
+               buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+       }
+
+       for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+               buf[cnt] = 0;
+               for (qid = 0; qid < edev->num_rss; qid++)
+                       buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+               cnt++;
+       }
+
+       mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int num_stats = QEDE_NUM_STATS;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               if (IS_VF(edev)) {
+                       int i;
+
+                       for (i = 0; i < QEDE_NUM_STATS; i++)
+                               if (qede_stats_arr[i].pf_only)
+                                       num_stats--;
+               }
+               return num_stats + QEDE_NUM_RQSTATS;
+       case ETH_SS_PRIV_FLAGS:
+               return QEDE_PRI_FLAG_LEN;
+       case ETH_SS_TEST:
+               if (!IS_VF(edev))
+                       return QEDE_ETHTOOL_TEST_MAX;
+               else
+                       return 0;
+       default:
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "Unsupported stringset 0x%08x\n", stringset);
+               return -EINVAL;
+       }
+}
+
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       cmd->supported = current_link.supported_caps;
+       cmd->advertising = current_link.advertised_caps;
+       if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+               ethtool_cmd_speed_set(cmd, current_link.speed);
+               cmd->duplex = current_link.duplex;
+       } else {
+               cmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+       }
+       cmd->port = current_link.port;
+       cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+                                               AUTONEG_DISABLE;
+       cmd->lp_advertising = current_link.lp_caps;
+
+       return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+       struct qed_link_params params;
+       u32 speed;
+
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+               DP_INFO(edev,
+                       "Link settings are not allowed to be changed\n");
+               return -EOPNOTSUPP;
+       }
+
+       memset(&current_link, 0, sizeof(current_link));
+       memset(&params, 0, sizeof(params));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       speed = ethtool_cmd_speed(cmd);
+       params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+       params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+       if (cmd->autoneg == AUTONEG_ENABLE) {
+               params.autoneg = true;
+               params.forced_speed = 0;
+               params.adv_speeds = cmd->advertising;
+       } else { /* forced speed */
+               params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+               params.autoneg = false;
+               params.forced_speed = speed;
+               switch (speed) {
+               case SPEED_10000:
+                       if (!(current_link.supported_caps &
+                           SUPPORTED_10000baseKR_Full)) {
+                               DP_INFO(edev, "10G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = SUPPORTED_10000baseKR_Full;
+                       break;
+               case SPEED_40000:
+                       if (!(current_link.supported_caps &
+                           SUPPORTED_40000baseLR4_Full)) {
+                               DP_INFO(edev, "40G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+                       break;
+               default:
+                       DP_INFO(edev, "Unsupported speed %u\n", speed);
+                       return -EINVAL;
+               }
+       }
+
+       params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &params);
+
+       return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+                            struct ethtool_drvinfo *info)
+{
+       char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       strlcpy(info->driver, "qede", sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+       snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+                edev->dev_info.common.fw_major,
+                edev->dev_info.common.fw_minor,
+                edev->dev_info.common.fw_rev,
+                edev->dev_info.common.fw_eng);
+
+       snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+                (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+                (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+                (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+                edev->dev_info.common.mfw_rev & 0xFF);
+
+       if ((strlen(storm) + strlen(mfw) + strlen("mfw storm  ")) <
+           sizeof(info->fw_version)) {
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                        "mfw %s storm %s", mfw, storm);
+       } else {
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                        "%s %s", mfw, storm);
+       }
+
+       strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+              edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+
+       qede_config_debug(level, &dp_module, &dp_level);
+
+       edev->dp_level = dp_level;
+       edev->dp_module = dp_module;
+       edev->ops->common->update_msglvl(edev->cdev,
+                                        dp_module, dp_level);
+}
+
+static int qede_nway_reset(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+       struct qed_link_params link_params;
+
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+               DP_INFO(edev,
+                       "Link settings are not allowed to be changed\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (!netif_running(dev))
+               return 0;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+       if (!current_link.link_up)
+               return 0;
+
+       /* Toggle the link */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = false;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+       link_params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       return 0;
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       return current_link.link_up;
+}
+
+static int qede_get_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 rxc, txc;
+
+       memset(coal, 0, sizeof(struct ethtool_coalesce));
+       edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
+
+       coal->rx_coalesce_usecs = rxc;
+       coal->tx_coalesce_usecs = txc;
+
+       return 0;
+}
+
+static int qede_set_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int i, rc = 0;
+       u16 rxc, txc;
+       u8 sb_id;
+
+       if (!netif_running(dev)) {
+               DP_INFO(edev, "Interface is down\n");
+               return -EINVAL;
+       }
+
+       if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+           coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+               DP_INFO(edev,
+                       "Can't support requested %s coalesce value [max supported value %d]\n",
+                       coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+                                                                  : "tx",
+                       QED_COALESCE_MAX);
+               return -EINVAL;
+       }
+
+       rxc = (u16)coal->rx_coalesce_usecs;
+       txc = (u16)coal->tx_coalesce_usecs;
+       for_each_rss(i) {
+               sb_id = edev->fp_array[i].sb_info->igu_sb_id;
+               rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
+                                                    (u8)i, sb_id);
+               if (rc) {
+                       DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+static void qede_get_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *ering)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       ering->rx_max_pending = NUM_RX_BDS_MAX;
+       ering->rx_pending = edev->q_num_rx_buffers;
+       ering->tx_max_pending = NUM_TX_BDS_MAX;
+       ering->tx_pending = edev->q_num_tx_buffers;
+}
+
+static int qede_set_ringparam(struct net_device *dev,
+                             struct ethtool_ringparam *ering)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                  "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+                  ering->rx_pending, ering->tx_pending);
+
+       /* Validate legality of configuration */
+       if (ering->rx_pending > NUM_RX_BDS_MAX ||
+           ering->rx_pending < NUM_RX_BDS_MIN ||
+           ering->tx_pending > NUM_TX_BDS_MAX ||
+           ering->tx_pending < NUM_TX_BDS_MIN) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
+                          NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
+                          NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
+               return -EINVAL;
+       }
+
+       /* Change ring size and re-load */
+       edev->q_num_rx_buffers = ering->rx_pending;
+       edev->q_num_tx_buffers = ering->tx_pending;
+
+       if (netif_running(edev->ndev))
+               qede_reload(edev, NULL, NULL);
+
+       return 0;
+}
+
+static void qede_get_pauseparam(struct net_device *dev,
+                               struct ethtool_pauseparam *epause)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+               epause->autoneg = true;
+       if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+               epause->rx_pause = true;
+       if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+               epause->tx_pause = true;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "ethtool_pauseparam: cmd %d  autoneg %d  rx_pause %d  tx_pause %d\n",
+                  epause->cmd, epause->autoneg, epause->rx_pause,
+                  epause->tx_pause);
+}
+
+static int qede_set_pauseparam(struct net_device *dev,
+                              struct ethtool_pauseparam *epause)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_params params;
+       struct qed_link_output current_link;
+
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+               DP_INFO(edev,
+                       "Pause settings are not allowed to be changed\n");
+               return -EOPNOTSUPP;
+       }
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       memset(&params, 0, sizeof(params));
+       params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+       if (epause->autoneg) {
+               if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
+                       DP_INFO(edev, "autoneg not supported\n");
+                       return -EINVAL;
+               }
+               params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+       }
+       if (epause->rx_pause)
+               params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+       if (epause->tx_pause)
+               params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+       params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &params);
+
+       return 0;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+       edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE      9600
+#define ETH_MIN_PACKET_SIZE            60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       union qede_reload_args args;
+
+       if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+           ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+               DP_ERR(edev, "Can't support requested MTU size\n");
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                  "Configuring MTU size of %d\n", new_mtu);
+
+       /* Set the mtu field and re-start the interface if needed*/
+       args.mtu = new_mtu;
+
+       if (netif_running(edev->ndev))
+               qede_reload(edev, &qede_update_mtu, &args);
+
+       qede_update_mtu(edev, &args);
+
+       return 0;
+}
+
+static void qede_get_channels(struct net_device *dev,
+                             struct ethtool_channels *channels)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+       channels->combined_count = QEDE_RSS_CNT(edev);
+}
+
+static int qede_set_channels(struct net_device *dev,
+                            struct ethtool_channels *channels)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                  "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+                  channels->rx_count, channels->tx_count,
+                  channels->other_count, channels->combined_count);
+
+       /* We don't support separate rx / tx, nor `other' channels. */
+       if (channels->rx_count || channels->tx_count ||
+           channels->other_count || (channels->combined_count == 0) ||
+           (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "command parameters not supported\n");
+               return -EINVAL;
+       }
+
+       /* Check if there was a change in the active parameters */
+       if (channels->combined_count == QEDE_RSS_CNT(edev)) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "No change in active parameters\n");
+               return 0;
+       }
+
+       /* We need the number of queues to be divisible between the hwfns */
+       if (channels->combined_count % edev->dev_info.common.num_hwfns) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "Number of channels must be divisable by %04x\n",
+                          edev->dev_info.common.num_hwfns);
+               return -EINVAL;
+       }
+
+       /* Set number of queues and reload if necessary */
+       edev->req_rss = channels->combined_count;
+       if (netif_running(dev))
+               qede_reload(edev, NULL, NULL);
+
+       return 0;
+}
+
+static int qede_set_phys_id(struct net_device *dev,
+                           enum ethtool_phys_id_state state)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u8 led_state = 0;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               return 1;       /* cycle on/off once per second */
+
+       case ETHTOOL_ID_ON:
+               led_state = QED_LED_MODE_ON;
+               break;
+
+       case ETHTOOL_ID_OFF:
+               led_state = QED_LED_MODE_OFF;
+               break;
+
+       case ETHTOOL_ID_INACTIVE:
+               led_state = QED_LED_MODE_RESTORE;
+               break;
+       }
+
+       edev->ops->common->set_led(edev->cdev, led_state);
+
+       return 0;
+}
+
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V4_FLOW:
+               if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+                       info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V6_FLOW:
+               if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+                       info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               break;
+       default:
+               info->data = 0;
+               break;
+       }
+
+       return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                         u32 *rules __always_unused)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = edev->num_rss;
+               return 0;
+       case ETHTOOL_GRXFH:
+               return qede_get_rss_flags(edev, info);
+       default:
+               DP_ERR(edev, "Command parameters not supported\n");
+               return -EOPNOTSUPP;
+       }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+       struct qed_update_vport_params vport_update_params;
+       u8 set_caps = 0, clr_caps = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Set rss flags command parameters: flow type = %d, data = %llu\n",
+                  info->flow_type, info->data);
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               /* For TCP only 4-tuple hash is supported */
+               if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+                                 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       case UDP_V4_FLOW:
+               /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+               if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+                                  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       set_caps = QED_RSS_IPV4_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple enabled\n");
+               } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+                       clr_caps = QED_RSS_IPV4_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple disabled\n");
+               } else {
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V6_FLOW:
+               /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+               if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+                                  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       set_caps = QED_RSS_IPV6_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple enabled\n");
+               } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+                       clr_caps = QED_RSS_IPV6_UDP;
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "UDP 4-tuple disabled\n");
+               } else {
+                       return -EINVAL;
+               }
+               break;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               /* For IP only 2-tuple hash is supported */
+               if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IP_USER_FLOW:
+       case ETHER_FLOW:
+               /* RSS is not supported for these protocols */
+               if (info->data) {
+                       DP_INFO(edev, "Command parameters not supported\n");
+                       return -EINVAL;
+               }
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       /* No action is needed if there is no change in the rss capability */
+       if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+                                          ~clr_caps) | set_caps))
+               return 0;
+
+       /* Update internal configuration */
+       edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+                                   set_caps;
+       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+       /* Re-configure if possible */
+       if (netif_running(edev->ndev)) {
+               memset(&vport_update_params, 0, sizeof(vport_update_params));
+               vport_update_params.update_rss_flg = 1;
+               vport_update_params.vport_id = 0;
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
+               return edev->ops->vport_update(edev->cdev,
+                                              &vport_update_params);
+       }
+
+       return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               return qede_set_rss_flags(edev, info);
+       default:
+               DP_INFO(edev, "Command parameters not supported\n");
+               return -EOPNOTSUPP;
+       }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+       return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int i;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (!indir)
+               return 0;
+
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+               indir[i] = edev->rss_params.rss_ind_table[i];
+
+       if (key)
+               memcpy(key, edev->rss_params.rss_key,
+                      qede_get_rxfh_key_size(dev));
+
+       return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+{
+       struct qed_update_vport_params vport_update_params;
+       struct qede_dev *edev = netdev_priv(dev);
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (!indir && !key)
+               return 0;
+
+       if (indir) {
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+                       edev->rss_params.rss_ind_table[i] = indir[i];
+               edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+       }
+
+       if (key) {
+               memcpy(&edev->rss_params.rss_key, key,
+                      qede_get_rxfh_key_size(dev));
+               edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+       }
+
+       if (netif_running(edev->ndev)) {
+               memset(&vport_update_params, 0, sizeof(vport_update_params));
+               vport_update_params.update_rss_flg = 1;
+               vport_update_params.vport_id = 0;
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
+               return edev->ops->vport_update(edev->cdev,
+                                              &vport_update_params);
+       }
+
+       return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+       int i;
+
+       if (!netif_running(edev->ndev))
+               return;
+
+       for_each_rss(i) {
+               /* Update and reenable interrupts */
+               qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+               napi_enable(&edev->fp_array[i].napi);
+       }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               napi_disable(&edev->fp_array[i].napi);
+               /* Disable interrupts */
+               qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+       }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+                                         struct sk_buff *skb)
+{
+       struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+       struct eth_tx_1st_bd *first_bd;
+       dma_addr_t mapping;
+       int i, idx, val;
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring[idx].skb = skb;
+       first_bd = qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+       first_bd->data.bd_flags.bitfields = val;
+       val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+       first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               return -ENOMEM;
+       }
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = 1;
+       txq->sw_tx_prod++;
+       /* 'next page' entries are counted in the producer value */
+       val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+       txq->tx_db.data.bd_prod = val;
+
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+
+       for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+               if (qede_txq_has_work(txq))
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (!qede_txq_has_work(txq)) {
+               DP_NOTICE(edev, "Tx completion didn't happen\n");
+               return -1;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+       txq->sw_tx_cons++;
+       txq->sw_tx_ring[idx].skb = NULL;
+
+       return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+       struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       struct sw_rx_data *sw_rx_data;
+       union eth_rx_cqe *cqe;
+       u8 *data_ptr;
+       int i;
+
+       /* The packet is expected to receive on rx-queue 0 even though RSS is
+        * enabled. This is because the queue 0 is configured as the default
+        * queue and that the loopback traffic is not IP.
+        */
+       for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+               if (qede_has_rx_work(rxq))
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (!qede_has_rx_work(rxq)) {
+               DP_NOTICE(edev, "Failed to receive the traffic\n");
+               return -1;
+       }
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD before reading hw_comp_cons. If the CQE is read before it is
+        * written by FW, then FW writes CQE and SB, and then the CPU reads the
+        * hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+       /* Get the data from the SW ring */
+       sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+       sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+       fp_cqe = &cqe->fast_path_regular;
+       len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+       data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+                    fp_cqe->placement_offset + sw_rx_data->page_offset);
+       for (i = ETH_HLEN; i < len; i++)
+               if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+                       DP_NOTICE(edev, "Loopback test failed\n");
+                       qede_recycle_rx_bd_ring(rxq, edev, 1);
+                       return -1;
+               }
+
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+       return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+       struct qed_link_params link_params;
+       struct sk_buff *skb = NULL;
+       int rc = 0, i;
+       u32 pkt_size;
+       u8 *packet;
+
+       if (!netif_running(edev->ndev)) {
+               DP_NOTICE(edev, "Interface is down\n");
+               return -EINVAL;
+       }
+
+       qede_netif_stop(edev);
+
+       /* Bring up the link in Loopback mode */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+       link_params.loopback_mode = loopback_mode;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Wait for loopback configuration to apply */
+       msleep_interruptible(500);
+
+       /* prepare the loopback packet */
+       pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+       skb = netdev_alloc_skb(edev->ndev, pkt_size);
+       if (!skb) {
+               DP_INFO(edev, "Can't allocate skb\n");
+               rc = -ENOMEM;
+               goto test_loopback_exit;
+       }
+       packet = skb_put(skb, pkt_size);
+       ether_addr_copy(packet, edev->ndev->dev_addr);
+       ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+       memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+       for (i = ETH_HLEN; i < pkt_size; i++)
+               packet[i] = (unsigned char)(i & 0xff);
+
+       rc = qede_selftest_transmit_traffic(edev, skb);
+       if (rc)
+               goto test_loopback_exit;
+
+       rc = qede_selftest_receive_traffic(edev);
+       if (rc)
+               goto test_loopback_exit;
+
+       DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+       dev_kfree_skb(skb);
+
+       /* Bring up the link in Normal mode */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+       link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Wait for loopback configuration to apply */
+       msleep_interruptible(500);
+
+       qede_netif_start(edev);
+
+       return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+                          struct ethtool_test *etest, u64 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Self-test command parameters: offline = %d, external_lb = %d\n",
+                  (etest->flags & ETH_TEST_FL_OFFLINE),
+                  (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+       memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               if (qede_selftest_run_loopback(edev,
+                                              QED_LINK_LOOPBACK_INT_PHY)) {
+                       buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+       }
+
+       if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+               buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+               buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+               buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
+       if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+               buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+}
+
+static int qede_set_tunable(struct net_device *dev,
+                           const struct ethtool_tunable *tuna,
+                           const void *data)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u32 val;
+
+       switch (tuna->id) {
+       case ETHTOOL_RX_COPYBREAK:
+               val = *(u32 *)data;
+               if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+                       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                                  "Invalid rx copy break value, range is [%u, %u]",
+                                  QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+                       return -EINVAL;
+               }
+
+               edev->rx_copybreak = *(u32 *)data;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+                           const struct ethtool_tunable *tuna, void *data)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (tuna->id) {
+       case ETHTOOL_RX_COPYBREAK:
+               *(u32 *)data = edev->rx_copybreak;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+       .get_settings = qede_get_settings,
+       .set_settings = qede_set_settings,
+       .get_drvinfo = qede_get_drvinfo,
+       .get_msglevel = qede_get_msglevel,
+       .set_msglevel = qede_set_msglevel,
+       .nway_reset = qede_nway_reset,
+       .get_link = qede_get_link,
+       .get_coalesce = qede_get_coalesce,
+       .set_coalesce = qede_set_coalesce,
+       .get_ringparam = qede_get_ringparam,
+       .set_ringparam = qede_set_ringparam,
+       .get_pauseparam = qede_get_pauseparam,
+       .set_pauseparam = qede_set_pauseparam,
+       .get_strings = qede_get_strings,
+       .set_phys_id = qede_set_phys_id,
+       .get_ethtool_stats = qede_get_ethtool_stats,
+       .get_priv_flags = qede_get_priv_flags,
+       .get_sset_count = qede_get_sset_count,
+       .get_rxnfc = qede_get_rxnfc,
+       .set_rxnfc = qede_set_rxnfc,
+       .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+       .get_rxfh_key_size = qede_get_rxfh_key_size,
+       .get_rxfh = qede_get_rxfh,
+       .set_rxfh = qede_set_rxfh,
+       .get_channels = qede_get_channels,
+       .set_channels = qede_set_channels,
+       .self_test = qede_self_test,
+       .get_tunable = qede_get_tunable,
+       .set_tunable = qede_set_tunable,
+};
+
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+       .get_settings = qede_get_settings,
+       .get_drvinfo = qede_get_drvinfo,
+       .get_msglevel = qede_get_msglevel,
+       .set_msglevel = qede_set_msglevel,
+       .get_link = qede_get_link,
+       .get_ringparam = qede_get_ringparam,
+       .set_ringparam = qede_set_ringparam,
+       .get_strings = qede_get_strings,
+       .get_ethtool_stats = qede_get_ethtool_stats,
+       .get_priv_flags = qede_get_priv_flags,
+       .get_sset_count = qede_get_sset_count,
+       .get_rxnfc = qede_get_rxnfc,
+       .set_rxnfc = qede_set_rxnfc,
+       .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+       .get_rxfh_key_size = qede_get_rxfh_key_size,
+       .get_rxfh = qede_get_rxfh,
+       .set_rxfh = qede_set_rxfh,
+       .get_channels = qede_get_channels,
+       .set_channels = qede_set_channels,
+       .get_tunable = qede_get_tunable,
+       .set_tunable = qede_set_tunable,
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (IS_VF(edev))
+               dev->ethtool_ops = &qede_vf_ethtool_ops;
+       else
+               dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644 (file)
index 0000000..9544e4c
--- /dev/null
@@ -0,0 +1,3834 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static char version[] =
+       "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40             0x1634
+#define CHIP_NUM_57980S_10             0x1666
+#define CHIP_NUM_57980S_MF             0x1636
+#define CHIP_NUM_57980S_100            0x1644
+#define CHIP_NUM_57980S_50             0x1654
+#define CHIP_NUM_57980S_25             0x1656
+#define CHIP_NUM_57980S_IOV            0x1664
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40                CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10                CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF                CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100       CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50                CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25                CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV       CHIP_NUM_57980S_IOV
+#endif
+
+enum qede_pci_private {
+       QEDE_PRIVATE_PF,
+       QEDE_PRIVATE_VF
+};
+
+static const struct pci_device_id qede_pci_tbl[] = {
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT             (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+                               struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       if (vlan > 4095) {
+               DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+                  vlan, vf);
+
+       return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       DP_VERBOSE(edev, QED_MSG_IOV,
+                  "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+                  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+       if (!is_valid_ether_addr(mac)) {
+               DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+               return -EINVAL;
+       }
+
+       return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+       struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+       struct qed_dev_info *qed_info = &edev->dev_info.common;
+       int rc;
+
+       DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+       rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+       /* Enable/Disable Tx switching for PF */
+       if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+           qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+               struct qed_update_vport_params params;
+
+               memset(&params, 0, sizeof(params));
+               params.vport_id = 0;
+               params.update_tx_switching_flg = 1;
+               params.tx_switching_flg = num_vfs_param ? 1 : 0;
+               edev->ops->vport_update(edev->cdev, &params);
+       }
+
+       return rc;
+}
+#endif
+
+static struct pci_driver qede_pci_driver = {
+       .name = "qede",
+       .id_table = qede_pci_tbl,
+       .probe = qede_probe,
+       .remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+       .sriov_configure = qede_sriov_configure,
+#endif
+};
+
+static void qede_force_mac(void *dev, u8 *mac)
+{
+       struct qede_dev *edev = dev;
+
+       ether_addr_copy(edev->ndev->dev_addr, mac);
+       ether_addr_copy(edev->primary_mac, mac);
+}
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+       {
+               .link_update = qede_link_update,
+       },
+       .force_mac = qede_force_mac,
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+                            void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct ethtool_drvinfo drvinfo;
+       struct qede_dev *edev;
+
+       /* Currently only support name change */
+       if (event != NETDEV_CHANGENAME)
+               goto done;
+
+       /* Check whether this is a qede device */
+       if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+               goto done;
+
+       memset(&drvinfo, 0, sizeof(drvinfo));
+       ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+       if (strcmp(drvinfo.driver, "qede"))
+               goto done;
+       edev = netdev_priv(ndev);
+
+       /* Notify qed of the name change */
+       if (!edev->ops || !edev->ops->common)
+               goto done;
+       edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+                                 "qede");
+
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+       .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+       int ret;
+
+       pr_notice("qede_init: %s\n", version);
+
+       qed_ops = qed_get_eth_ops();
+       if (!qed_ops) {
+               pr_notice("Failed to get qed ethtool operations\n");
+               return -EINVAL;
+       }
+
+       /* Must register notifier before pci ops, since we might miss
+        * interface rename after pci probe and netdev registeration.
+        */
+       ret = register_netdevice_notifier(&qede_netdev_notifier);
+       if (ret) {
+               pr_notice("Failed to register netdevice_notifier\n");
+               qed_put_eth_ops();
+               return -EINVAL;
+       }
+
+       ret = pci_register_driver(&qede_pci_driver);
+       if (ret) {
+               pr_notice("Failed to register driver\n");
+               unregister_netdevice_notifier(&qede_netdev_notifier);
+               qed_put_eth_ops();
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+       pr_notice("qede_cleanup called\n");
+
+       unregister_netdevice_notifier(&qede_netdev_notifier);
+       pci_unregister_driver(&qede_pci_driver);
+       qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+                           struct qede_tx_queue *txq,
+                           int *len)
+{
+       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_bd *tx_data_bd;
+       int bds_consumed = 0;
+       int nbds;
+       bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+       int i, split_bd_len = 0;
+
+       if (unlikely(!skb)) {
+               DP_ERR(edev,
+                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
+               return -1;
+       }
+
+       *len = skb->len;
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+       bds_consumed++;
+
+       nbds = first_bd->data.nbds;
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               bds_consumed++;
+       }
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       while (bds_consumed++ < nbds)
+               qed_chain_consume(&txq->tx_pbl);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring[idx].skb = NULL;
+       txq->sw_tx_ring[idx].flags = 0;
+
+       return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+                                   struct qede_tx_queue *txq,
+                                   struct eth_tx_1st_bd *first_bd,
+                                   int nbd,
+                                   bool data_split)
+{
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct eth_tx_bd *tx_data_bd;
+       int i, split_bd_len = 0;
+
+       /* Return prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod),
+                          first_bd);
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                                         qed_chain_produce(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               nbd--;
+       }
+
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < nbd; i++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               if (tx_data_bd->nbytes)
+                       dma_unmap_page(&edev->pdev->dev,
+                                      BD_UNMAP_ADDR(tx_data_bd),
+                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       /* Return again prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod),
+                          first_bd);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring[idx].skb = NULL;
+       txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+                         struct sk_buff *skb,
+                         int *ipv6_ext)
+{
+       u32 rc = XMIT_L4_CSUM;
+       __be16 l3_proto;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return XMIT_PLAIN;
+
+       l3_proto = vlan_get_protocol(skb);
+       if (l3_proto == htons(ETH_P_IPV6) &&
+           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+               *ipv6_ext = 1;
+
+       if (skb->encapsulation)
+               rc |= XMIT_ENC;
+
+       if (skb_is_gso(skb))
+               rc |= XMIT_LSO;
+
+       return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+                                        struct eth_tx_2nd_bd *second_bd,
+                                        struct eth_tx_3rd_bd *third_bd)
+{
+       u8 l4_proto;
+       u16 bd2_bits1 = 0, bd2_bits2 = 0;
+
+       bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+       bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+       bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+               l4_proto = ipv6_hdr(skb)->nexthdr;
+       else
+               l4_proto = ip_hdr(skb)->protocol;
+
+       if (l4_proto == IPPROTO_UDP)
+               bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+       if (third_bd)
+               third_bd->data.bitfields |=
+                       cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
+
+       second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
+       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+                         skb_frag_t *frag,
+                         struct eth_tx_bd *bd)
+{
+       dma_addr_t mapping;
+
+       /* Map skb non-linear frag data for DMA */
+       mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+                                  skb_frag_size(frag),
+                                  DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+               return -ENOMEM;
+       }
+
+       /* Setup the data pointer of the frag data */
+       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+       return 0;
+}
+
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+       if (is_encap_pkt)
+               return (skb_inner_transport_header(skb) +
+                       inner_tcp_hdrlen(skb) - skb->data);
+       else
+               return (skb_transport_header(skb) +
+                       tcp_hdrlen(skb) - skb->data);
+}
+
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
+                            u8 xmit_type)
+{
+       int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+       if (xmit_type & XMIT_LSO) {
+               int hlen;
+
+               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
+
+               /* linear payload would require its own BD */
+               if (skb_headlen(skb) > hlen)
+                       allowed_frags--;
+       }
+
+       return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+                           struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct netdev_queue *netdev_txq;
+       struct qede_tx_queue *txq;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_2nd_bd *second_bd = NULL;
+       struct eth_tx_3rd_bd *third_bd = NULL;
+       struct eth_tx_bd *tx_data_bd = NULL;
+       u16 txq_index;
+       u8 nbd = 0;
+       dma_addr_t mapping;
+       int rc, frag_idx = 0, ipv6_ext = 0;
+       u8 xmit_type;
+       u16 idx;
+       u16 hlen;
+       bool data_split = false;
+
+       /* Get tx-queue context and netdev index */
+       txq_index = skb_get_queue_mapping(skb);
+       WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+       txq = QEDE_TX_QUEUE(edev, txq_index);
+       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+                              (MAX_SKB_FRAGS + 1));
+
+       xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+       if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+               if (skb_linearize(skb)) {
+                       DP_NOTICE(edev,
+                                 "SKB linearization failed - silently dropping this SKB\n");
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
+#endif
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring[idx].skb = skb;
+       first_bd = (struct eth_tx_1st_bd *)
+                  qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+               qede_update_tx_producer(txq);
+               return NETDEV_TX_OK;
+       }
+       nbd++;
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* In case there is IPv6 with extension headers or LSO we need 2nd and
+        * 3rd BDs.
+        */
+       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+               second_bd = (struct eth_tx_2nd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(second_bd, 0, sizeof(*second_bd));
+
+               nbd++;
+               third_bd = (struct eth_tx_3rd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(third_bd, 0, sizeof(*third_bd));
+
+               nbd++;
+               /* We need to fill in additional data in second_bd... */
+               tx_data_bd = (struct eth_tx_bd *)second_bd;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+       }
+
+       /* Fill the parsing flags & params according to the requested offload */
+       if (xmit_type & XMIT_L4_CSUM) {
+               /* We don't re-calculate IP checksum as it is already done by
+                * the upper stack
+                */
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+               if (xmit_type & XMIT_ENC) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       first_bd->data.bitfields |=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+               }
+
+               /* If the packet is IPv6 with extension header, indicate that
+                * to FW and pass few params, since the device cracker doesn't
+                * support parsing IPv6 with extension header/s.
+                */
+               if (unlikely(ipv6_ext))
+                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+       }
+
+       if (xmit_type & XMIT_LSO) {
+               first_bd->data.bd_flags.bitfields |=
+                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+               third_bd->data.lso_mss =
+                       cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+               if (unlikely(xmit_type & XMIT_ENC)) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, true);
+               } else {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, false);
+               }
+
+               /* @@@TBD - if will not be removed need to check */
+               third_bd->data.bitfields |=
+                       cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
+
+               /* Make life easier for FW guys who can't deal with header and
+                * data on same BD. If we need to split, use the second bd...
+                */
+               if (unlikely(skb_headlen(skb) > hlen)) {
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "TSO split header size is %d (%x:%x)\n",
+                                  first_bd->nbytes, first_bd->addr.hi,
+                                  first_bd->addr.lo);
+
+                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+                                          le32_to_cpu(first_bd->addr.lo)) +
+                                          hlen;
+
+                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+                                             le16_to_cpu(first_bd->nbytes) -
+                                             hlen);
+
+                       /* this marks the BD as one that has no
+                        * individual mapping
+                        */
+                       txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+                       first_bd->nbytes = cpu_to_le16(hlen);
+
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+                       data_split = true;
+               }
+       } else {
+               first_bd->data.bitfields |=
+                   (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+                   ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       }
+
+       /* Handle fragmented skb */
+       /* special handle for frags inside 2nd and 3rd bds.. */
+       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+               rc = map_frag_to_bd(edev,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+                                               data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
+               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+               else
+                       tx_data_bd = NULL;
+
+               frag_idx++;
+       }
+
+       /* map last frags into 4th, 5th .... */
+       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                            qed_chain_produce(&txq->tx_pbl);
+
+               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+               rc = map_frag_to_bd(edev,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+                                               data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = nbd;
+
+       netdev_tx_sent_queue(netdev_txq, skb->len);
+
+       skb_tx_timestamp(skb);
+
+       /* Advance packet producer only before sending the packet since mapping
+        * of pages may fail.
+        */
+       txq->sw_tx_prod++;
+
+       /* 'next page' entries are counted in the producer value */
+       txq->tx_db.data.bd_prod =
+               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+               qede_update_tx_producer(txq);
+
+       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+                     < (MAX_SKB_FRAGS + 1))) {
+               if (skb->xmit_more)
+                       qede_update_tx_producer(txq);
+
+               netif_tx_stop_queue(netdev_txq);
+               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                          "Stop queue was called\n");
+               /* paired memory barrier is in qede_tx_int(), we have to keep
+                * ordering of set_bit() in netif_tx_stop_queue() and read of
+                * fp->bd_tx_cons
+                */
+               smp_mb();
+
+               if (qed_chain_get_elem_left(&txq->tx_pbl)
+                    >= (MAX_SKB_FRAGS + 1) &&
+                   (edev->state == QEDE_STATE_OPEN)) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "Wake queue was called\n");
+               }
+       }
+
+       return NETDEV_TX_OK;
+}
+
+int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+       u16 hw_bd_cons;
+
+       /* Tell compiler that consumer and producer can change */
+       barrier();
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+               return 0;
+
+       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+                      struct qede_tx_queue *txq)
+{
+       struct netdev_queue *netdev_txq;
+       u16 hw_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       int rc;
+
+       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               int len = 0;
+
+               rc = qede_free_tx_pkt(edev, txq, &len);
+               if (rc) {
+                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+                                 hw_bd_cons,
+                                 qed_chain_get_cons_idx(&txq->tx_pbl));
+                       break;
+               }
+
+               bytes_compl += len;
+               pkts_compl++;
+               txq->sw_tx_cons++;
+       }
+
+       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+       /* Need to make the tx_bd_cons update visible to start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that
+        * start_xmit() will miss it and cause the queue to be stopped
+        * forever.
+        * On the other hand we need an rmb() here to ensure the proper
+        * ordering of bit testing in the following
+        * netif_tx_queue_stopped(txq) call.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+               /* Taking tx_lock is needed to prevent reenabling the queue
+                * while it's empty. This could have happen if rx_action() gets
+                * suspended in qede_tx_int() after the condition before
+                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+                *
+                * stops the queue->sees fresh tx_bd_cons->releases the queue->
+                * sends some packets consuming the whole queue again->
+                * stops the queue
+                */
+
+               __netif_tx_lock(netdev_txq, smp_processor_id());
+
+               if ((netif_tx_queue_stopped(netdev_txq)) &&
+                   (edev->state == QEDE_STATE_OPEN) &&
+                   (qed_chain_get_elem_left(&txq->tx_pbl)
+                     >= (MAX_SKB_FRAGS + 1))) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+                                  "Wake queue was called\n");
+               }
+
+               __netif_tx_unlock(netdev_txq);
+       }
+
+       return 0;
+}
+
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+       u16 hw_comp_cons, sw_comp_cons;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+       u8 tc;
+
+       for (tc = 0; tc < fp->edev->num_tc; tc++)
+               if (qede_txq_has_work(&fp->txqs[tc]))
+                       return true;
+       return false;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
+ */
+static inline void qede_reuse_page(struct qede_dev *edev,
+                                  struct qede_rx_queue *rxq,
+                                  struct sw_rx_data *curr_cons)
+{
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *curr_prod;
+       dma_addr_t new_mapping;
+
+       curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       *curr_prod = *curr_cons;
+
+       new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+
+       rxq->sw_rx_prod++;
+       curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+                            struct qede_dev *edev, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(edev, rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
+                                        struct qede_rx_queue *rxq,
+                                        struct sw_rx_data *curr_cons)
+{
+       /* Move to the next segment in the page */
+       curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+       if (curr_cons->page_offset == PAGE_SIZE) {
+               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+                       return -ENOMEM;
+               }
+
+               dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+       } else {
+               /* Increment refcount of the page as we don't want
+                * network stack to take the ownership of the page
+                * which can be recycled multiple times by the driver.
+                */
+               page_ref_inc(curr_cons->data);
+               qede_reuse_page(edev, rxq, curr_cons);
+       }
+
+       return 0;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+                                      struct qede_rx_queue *rxq)
+{
+       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+       struct eth_rx_prod_data rx_prods = {0};
+
+       /* Update producers */
+       rx_prods.bd_prod = cpu_to_le16(bd_prod);
+       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+       /* Make sure that the BD and SGE data is updated before updating the
+        * producers since FW might read the BD/SGE right after the producer
+        * is updated.
+        */
+       wmb();
+
+       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+                       (u32 *)&rx_prods);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the napi lock is released and another qede_poll is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+                          u8 bitfields,
+                          __le32 rss_hash,
+                          enum pkt_hash_types *rxhash_type)
+{
+       enum rss_hash_type htype;
+
+       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+       if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+               *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                               (htype == RSS_HASH_TYPE_IPV6)) ?
+                               PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               return le32_to_cpu(rss_hash);
+       }
+       *rxhash_type = PKT_HASH_TYPE_NONE;
+       return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+       skb_checksum_none_assert(skb);
+
+       if (csum_flag & QEDE_CSUM_UNNECESSARY)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+               skb->csum_level = 1;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+                                   struct qede_fastpath *fp,
+                                   struct sk_buff *skb,
+                                   u16 vlan_tag)
+{
+       if (vlan_tag)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      vlan_tag);
+
+       napi_gro_receive(&fp->napi, skb);
+}
+
+static void qede_set_gro_params(struct qede_dev *edev,
+                               struct sk_buff *skb,
+                               struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+       if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+           PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       else
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+       skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+                                       cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq,
+                             u8 tpa_agg_index,
+                             u16 len_on_bd)
+{
+       struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+                                                        NUM_RX_BDS_MAX];
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+       struct sk_buff *skb = tpa_info->skb;
+
+       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+               goto out;
+
+       /* Add one frag and update the appropriate fields in the skb */
+       skb_fill_page_desc(skb, tpa_info->frag_id++,
+                          current_bd->data, current_bd->page_offset,
+                          len_on_bd);
+
+       if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               page_ref_inc(current_bd->data);
+               goto out;
+       }
+
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+
+       skb->data_len += len_on_bd;
+       skb->truesize += rxq->rx_buf_seg_size;
+       skb->len += len_on_bd;
+
+       return 0;
+
+out:
+       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
+       return -ENOMEM;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+                          struct qede_rx_queue *rxq,
+                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+       dma_addr_t mapping = tpa_info->replace_buf_mapping;
+       struct sw_rx_data *sw_rx_data_cons;
+       struct sw_rx_data *sw_rx_data_prod;
+       enum pkt_hash_types rxhash_type;
+       u32 rxhash;
+
+       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+       sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+       /* Use pre-allocated replacement buffer - we can't release the agg.
+        * start until its over and we don't want to risk allocation failing
+        * here, so re-allocate when aggregation will be over.
+        */
+       sw_rx_data_prod->mapping = replace_buf->mapping;
+
+       sw_rx_data_prod->data = replace_buf->data;
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+       sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+       rxq->sw_rx_prod++;
+
+       /* move partial skb from cons to pool (don't unmap yet)
+        * save mapping, incase we drop the packet later on.
+        */
+       tpa_info->start_buf = *sw_rx_data_cons;
+       mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+                          le32_to_cpu(rx_bd_cons->addr.lo));
+
+       tpa_info->start_buf_mapping = mapping;
+       rxq->sw_rx_cons++;
+
+       /* set tpa state to start only if we are able to allocate skb
+        * for this aggregation, otherwise mark as error and aggregation will
+        * be dropped
+        */
+       tpa_info->skb = netdev_alloc_skb(edev->ndev,
+                                        le16_to_cpu(cqe->len_on_first_bd));
+       if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               goto cons_buf;
+       }
+
+       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+       memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
+
+       /* Start filling in the aggregation info */
+       tpa_info->frag_id = 0;
+       tpa_info->agg_state = QEDE_AGG_STATE_START;
+
+       rxhash = qede_get_rxhash(edev, cqe->bitfields,
+                                cqe->rss_hash, &rxhash_type);
+       skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+       if ((le16_to_cpu(cqe->pars_flags.flags) >>
+            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+                   PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+               tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+       else
+               tpa_info->vlan_tag = 0;
+
+       /* This is needed in order to enable forwarding support */
+       qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+       if (likely(cqe->ext_bd_len_list[0]))
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+       if (unlikely(cqe->ext_bd_len_list[1])) {
+               DP_ERR(edev,
+                      "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+       }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct iphdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+                                 iph->saddr, iph->daddr, 0);
+
+       tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+                                 &iph->saddr, &iph->daddr, 0);
+       tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+                            struct qede_fastpath *fp,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
+#ifdef CONFIG_INET
+       if (skb_shinfo(skb)->gso_size) {
+               skb_set_network_header(skb, 0);
+
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+                       qede_gro_ip_csum(skb);
+                       break;
+               case htons(ETH_P_IPV6):
+                       qede_gro_ipv6_csum(skb);
+                       break;
+               default:
+                       DP_ERR(edev,
+                              "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+                              ntohs(skb->protocol));
+               }
+       }
+#endif
+
+send_skb:
+       skb_record_rx_queue(skb, fp->rss_id);
+       qede_skb_receive(edev, fp, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+                                struct qede_rx_queue *rxq,
+                                struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+       int i;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+                        struct qede_fastpath *fp,
+                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_agg_info *tpa_info;
+       struct sk_buff *skb;
+       int i;
+
+       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       skb = tpa_info->skb;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA emd with more than a single len_list entry\n");
+
+       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+               goto err;
+
+       /* Sanity */
+       if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+               DP_ERR(edev,
+                      "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+                      cqe->num_of_bds, tpa_info->frag_id);
+       if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+               DP_ERR(edev,
+                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+                      le16_to_cpu(cqe->total_packet_len), skb->len);
+
+       memcpy(skb->data,
+              page_address(tpa_info->start_buf.data) +
+               tpa_info->start_cqe.placement_offset +
+               tpa_info->start_buf.page_offset,
+              le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
+
+       /* Recycle [mapped] start buffer for the next replacement */
+       tpa_info->replace_buf = tpa_info->start_buf;
+       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+
+       /* Finalize the SKB */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+        * to skb_shinfo(skb)->gso_segs
+        */
+       NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+       qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+
+       return;
+err:
+       /* The BD starting the aggregation is still mapped; Re-use it for
+        * future aggregations [as replacement buffer]
+        */
+       memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
+              sizeof(struct sw_rx_data));
+       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+       tpa_info->start_buf.data = NULL;
+       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+       dev_kfree_skb_any(tpa_info->skb);
+       tpa_info->skb = NULL;
+}
+
+static bool qede_tunn_exist(u16 flag)
+{
+       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 tcsum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 csum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               csum = QEDE_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return csum;
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+       if (!qede_tunn_exist(flag))
+               return qede_check_notunn_csum(flag);
+       else
+               return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+                                     u16 flag)
+{
+       u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+       if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+                            ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+           (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+               return true;
+
+       return false;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+       struct qede_dev *edev = fp->edev;
+       struct qede_rx_queue *rxq = fp->rxq;
+
+       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+       int rx_pkt = 0;
+       u8 csum_flag;
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+        * read before it is written by FW, then FW writes CQE and SB, and then
+        * the CPU reads the hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Loop to complete all indicated BDs */
+       while (sw_comp_cons != hw_comp_cons) {
+               struct eth_fast_path_rx_reg_cqe *fp_cqe;
+               enum pkt_hash_types rxhash_type;
+               enum eth_rx_cqe_type cqe_type;
+               struct sw_rx_data *sw_rx_data;
+               union eth_rx_cqe *cqe;
+               struct sk_buff *skb;
+               struct page *data;
+               __le16 flags;
+               u16 len, pad;
+               u32 rx_hash;
+
+               /* Get the CQE from the completion ring */
+               cqe = (union eth_rx_cqe *)
+                       qed_chain_consume(&rxq->rx_comp_ring);
+               cqe_type = cqe->fast_path_regular.type;
+
+               if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+                       edev->ops->eth_cqe_completion(
+                                       edev->cdev, fp->rss_id,
+                                       (struct eth_slow_path_rx_cqe *)cqe);
+                       goto next_cqe;
+               }
+
+               if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
+                       switch (cqe_type) {
+                       case ETH_RX_CQE_TYPE_TPA_START:
+                               qede_tpa_start(edev, rxq,
+                                              &cqe->fast_path_tpa_start);
+                               goto next_cqe;
+                       case ETH_RX_CQE_TYPE_TPA_CONT:
+                               qede_tpa_cont(edev, rxq,
+                                             &cqe->fast_path_tpa_cont);
+                               goto next_cqe;
+                       case ETH_RX_CQE_TYPE_TPA_END:
+                               qede_tpa_end(edev, fp,
+                                            &cqe->fast_path_tpa_end);
+                               goto next_rx_only;
+                       default:
+                               break;
+                       }
+               }
+
+               /* Get the data from the SW ring */
+               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+               data = sw_rx_data->data;
+
+               fp_cqe = &cqe->fast_path_regular;
+               len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+               pad = fp_cqe->placement_offset;
+               flags = cqe->fast_path_regular.pars_flags.flags;
+
+               /* If this is an error packet then drop it */
+               parse_flag = le16_to_cpu(flags);
+
+               csum_flag = qede_check_csum(parse_flag);
+               if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+                       if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
+                                                     parse_flag)) {
+                               rxq->rx_ip_frags++;
+                               goto alloc_skb;
+                       }
+
+                       DP_NOTICE(edev,
+                                 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+                                 sw_comp_cons, parse_flag);
+                       rxq->rx_hw_errors++;
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       goto next_cqe;
+               }
+
+alloc_skb:
+               skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+               if (unlikely(!skb)) {
+                       DP_NOTICE(edev,
+                                 "Build_skb failed, dropping incoming packet\n");
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       rxq->rx_alloc_errors++;
+                       goto next_cqe;
+               }
+
+               /* Copy data into SKB */
+               if (len + pad <= edev->rx_copybreak) {
+                       memcpy(skb_put(skb, len),
+                              page_address(data) + pad +
+                               sw_rx_data->page_offset, len);
+                       qede_reuse_page(edev, rxq, sw_rx_data);
+               } else {
+                       struct skb_frag_struct *frag;
+                       unsigned int pull_len;
+                       unsigned char *va;
+
+                       frag = &skb_shinfo(skb)->frags[0];
+
+                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
+                                       pad + sw_rx_data->page_offset,
+                                       len, rxq->rx_buf_seg_size);
+
+                       va = skb_frag_address(frag);
+                       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+                       /* Align the pull_len to optimize memcpy */
+                       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+                       skb_frag_size_sub(frag, pull_len);
+                       frag->page_offset += pull_len;
+                       skb->data_len -= pull_len;
+                       skb->tail += pull_len;
+
+                       if (unlikely(qede_realloc_rx_buffer(edev, rxq,
+                                                           sw_rx_data))) {
+                               DP_ERR(edev, "Failed to allocate rx buffer\n");
+                               /* Incr page ref count to reuse on allocation
+                                * failure so that it doesn't get freed while
+                                * freeing SKB.
+                                */
+
+                               page_ref_inc(sw_rx_data->data);
+                               rxq->rx_alloc_errors++;
+                               qede_recycle_rx_bd_ring(rxq, edev,
+                                                       fp_cqe->bd_num);
+                               dev_kfree_skb_any(skb);
+                               goto next_cqe;
+                       }
+               }
+
+               qede_rx_bd_ring_consume(rxq);
+
+               if (fp_cqe->bd_num != 1) {
+                       u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
+                       u8 num_frags;
+
+                       pkt_len -= len;
+
+                       for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
+                            num_frags--) {
+                               u16 cur_size = pkt_len > rxq->rx_buf_size ?
+                                               rxq->rx_buf_size : pkt_len;
+                               if (unlikely(!cur_size)) {
+                                       DP_ERR(edev,
+                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                                              num_frags);
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
+                                       goto next_cqe;
+                               }
+
+                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
+                                       goto next_cqe;
+                               }
+
+                               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+                               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+                               qede_rx_bd_ring_consume(rxq);
+
+                               dma_unmap_page(&edev->pdev->dev,
+                                              sw_rx_data->mapping,
+                                              PAGE_SIZE, DMA_FROM_DEVICE);
+
+                               skb_fill_page_desc(skb,
+                                                  skb_shinfo(skb)->nr_frags++,
+                                                  sw_rx_data->data, 0,
+                                                  cur_size);
+
+                               skb->truesize += PAGE_SIZE;
+                               skb->data_len += cur_size;
+                               skb->len += cur_size;
+                               pkt_len -= cur_size;
+                       }
+
+                       if (unlikely(pkt_len))
+                               DP_ERR(edev,
+                                      "Mapped all BDs of jumbo, but still have %d bytes\n",
+                                      pkt_len);
+               }
+
+               skb->protocol = eth_type_trans(skb, edev->ndev);
+
+               rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+                                         fp_cqe->rss_hash,
+                                         &rxhash_type);
+
+               skb_set_hash(skb, rx_hash, rxhash_type);
+
+               qede_set_skb_csum(skb, csum_flag);
+
+               skb_record_rx_queue(skb, fp->rss_id);
+
+               qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+next_rx_only:
+               rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+               /* CR TPA - revisit how to handle budget in TPA perhaps
+                * increase on "end"
+                */
+               if (rx_pkt == budget)
+                       break;
+       } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+       /* Update producers */
+       qede_update_rx_prod(edev, rxq);
+
+       return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+                                               napi);
+       struct qede_dev *edev = fp->edev;
+       int rx_work_done = 0;
+       u8 tc;
+
+       for (tc = 0; tc < edev->num_tc; tc++)
+               if (qede_txq_has_work(&fp->txqs[tc]))
+                       qede_tx_int(edev, &fp->txqs[tc]);
+
+       rx_work_done = qede_has_rx_work(fp->rxq) ?
+                       qede_rx_int(fp, budget) : 0;
+       if (rx_work_done < budget) {
+               qed_sb_update_sb_idx(fp->sb_info);
+               /* *_has_*_work() reads the status block,
+                * thus we need to ensure that status block indices
+                * have been actually read (qed_sb_update_sb_idx)
+                * prior to this check (*_has_*_work) so that
+                * we won't write the "newer" value of the status block
+                * to HW (if there was a DMA right after
+                * qede_has_rx_work and if there is no rmb, the memory
+                * reading (qed_sb_update_sb_idx) may be postponed
+                * to right before *_ack_sb). In this case there
+                * will never be another interrupt until there is
+                * another update of the status block, while there
+                * is still unhandled work.
+                */
+               rmb();
+
+               /* Fall out from the NAPI loop if needed */
+               if (!(qede_has_rx_work(fp->rxq) ||
+                     qede_has_tx_work(fp))) {
+                       napi_complete(napi);
+
+                       /* Update and reenable interrupts */
+                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+                                  1 /*update*/);
+               } else {
+                       rx_work_done = budget;
+               }
+       }
+
+       return rx_work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+       struct qede_fastpath *fp = fp_cookie;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+       napi_schedule_irqoff(&fp->napi);
+       return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char mac[ETH_ALEN])
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.mac_valid = 1;
+       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+                                 enum qed_filter_xcast_params_type opcode,
+                                 u16 vid)
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.vlan_valid = 1;
+       filter_cmd.filter.ucast.vlan = vid;
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+       struct qed_eth_stats stats;
+
+       edev->ops->get_vport_stats(edev->cdev, &stats);
+       edev->stats.no_buff_discards = stats.no_buff_discards;
+       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+       edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+       edev->stats.coalesced_events = stats.tpa_coalesced_events;
+       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+       edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+       edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+       edev->stats.rx_128_to_255_byte_packets =
+                               stats.rx_128_to_255_byte_packets;
+       edev->stats.rx_256_to_511_byte_packets =
+                               stats.rx_256_to_511_byte_packets;
+       edev->stats.rx_512_to_1023_byte_packets =
+                               stats.rx_512_to_1023_byte_packets;
+       edev->stats.rx_1024_to_1518_byte_packets =
+                               stats.rx_1024_to_1518_byte_packets;
+       edev->stats.rx_1519_to_1522_byte_packets =
+                               stats.rx_1519_to_1522_byte_packets;
+       edev->stats.rx_1519_to_2047_byte_packets =
+                               stats.rx_1519_to_2047_byte_packets;
+       edev->stats.rx_2048_to_4095_byte_packets =
+                               stats.rx_2048_to_4095_byte_packets;
+       edev->stats.rx_4096_to_9216_byte_packets =
+                               stats.rx_4096_to_9216_byte_packets;
+       edev->stats.rx_9217_to_16383_byte_packets =
+                               stats.rx_9217_to_16383_byte_packets;
+       edev->stats.rx_crc_errors = stats.rx_crc_errors;
+       edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+       edev->stats.rx_pause_frames = stats.rx_pause_frames;
+       edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+       edev->stats.rx_align_errors = stats.rx_align_errors;
+       edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+       edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+       edev->stats.rx_jabbers = stats.rx_jabbers;
+       edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+       edev->stats.rx_fragments = stats.rx_fragments;
+       edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+       edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+       edev->stats.tx_128_to_255_byte_packets =
+                               stats.tx_128_to_255_byte_packets;
+       edev->stats.tx_256_to_511_byte_packets =
+                               stats.tx_256_to_511_byte_packets;
+       edev->stats.tx_512_to_1023_byte_packets =
+                               stats.tx_512_to_1023_byte_packets;
+       edev->stats.tx_1024_to_1518_byte_packets =
+                               stats.tx_1024_to_1518_byte_packets;
+       edev->stats.tx_1519_to_2047_byte_packets =
+                               stats.tx_1519_to_2047_byte_packets;
+       edev->stats.tx_2048_to_4095_byte_packets =
+                               stats.tx_2048_to_4095_byte_packets;
+       edev->stats.tx_4096_to_9216_byte_packets =
+                               stats.tx_4096_to_9216_byte_packets;
+       edev->stats.tx_9217_to_16383_byte_packets =
+                               stats.tx_9217_to_16383_byte_packets;
+       edev->stats.tx_pause_frames = stats.tx_pause_frames;
+       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+       edev->stats.tx_total_collisions = stats.tx_total_collisions;
+       edev->stats.brb_truncates = stats.brb_truncates;
+       edev->stats.brb_discards = stats.brb_discards;
+       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+                           struct net_device *dev,
+                           struct rtnl_link_stats64 *stats)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       qede_fill_by_demand_stats(edev);
+
+       stats->rx_packets = edev->stats.rx_ucast_pkts +
+                           edev->stats.rx_mcast_pkts +
+                           edev->stats.rx_bcast_pkts;
+       stats->tx_packets = edev->stats.tx_ucast_pkts +
+                           edev->stats.tx_mcast_pkts +
+                           edev->stats.tx_bcast_pkts;
+
+       stats->rx_bytes = edev->stats.rx_ucast_bytes +
+                         edev->stats.rx_mcast_bytes +
+                         edev->stats.rx_bcast_bytes;
+
+       stats->tx_bytes = edev->stats.tx_ucast_bytes +
+                         edev->stats.tx_mcast_bytes +
+                         edev->stats.tx_bcast_bytes;
+
+       stats->tx_errors = edev->stats.tx_err_drop_pkts;
+       stats->multicast = edev->stats.rx_mcast_pkts +
+                          edev->stats.rx_bcast_pkts;
+
+       stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+       stats->collisions = edev->stats.tx_total_collisions;
+       stats->rx_crc_errors = edev->stats.rx_crc_errors;
+       stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+       return stats;
+}
+
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+                             struct ifla_vf_info *ivi)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+                           int min_tx_rate, int max_tx_rate)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+                                       max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+                                 int link_state)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
+static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+       struct qed_update_vport_params params;
+       int rc;
+
+       /* Proceed only if action actually needs to be performed */
+       if (edev->accept_any_vlan == action)
+               return;
+
+       memset(&params, 0, sizeof(params));
+
+       params.vport_id = 0;
+       params.accept_any_vlan = action;
+       params.update_accept_any_vlan_flg = 1;
+
+       rc = edev->ops->vport_update(edev->cdev, &params);
+       if (rc) {
+               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+                      action ? "enable" : "disable");
+       } else {
+               DP_INFO(edev, "%s accept-any-vlan\n",
+                       action ? "enabled" : "disabled");
+               edev->accept_any_vlan = action;
+       }
+}
+
+static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan, *tmp;
+       int rc;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan) {
+               DP_INFO(edev, "Failed to allocate struct for vlan\n");
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&vlan->list);
+       vlan->vid = vid;
+       vlan->configured = false;
+
+       /* Verify vlan isn't already configured */
+       list_for_each_entry(tmp, &edev->vlan_list, list) {
+               if (tmp->vid == vlan->vid) {
+                       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                                  "vlan already configured\n");
+                       kfree(vlan);
+                       return -EEXIST;
+               }
+       }
+
+       /* If interface is down, cache this VLAN ID and return */
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, VLAN %d will be configured when interface is up\n",
+                          vid);
+               if (vid != 0)
+                       edev->non_configured_vlans++;
+               list_add(&vlan->list, &edev->vlan_list);
+
+               return 0;
+       }
+
+       /* Check for the filter limit.
+        * Note - vlan0 has a reserved filter and can be added without
+        * worrying about quota
+        */
+       if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+           (vlan->vid == 0)) {
+               rc = qede_set_ucast_rx_vlan(edev,
+                                           QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %d\n",
+                              vlan->vid);
+                       kfree(vlan);
+                       return -EINVAL;
+               }
+               vlan->configured = true;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0)
+                       edev->configured_vlans++;
+       } else {
+               /* Out of quota; Activate accept-any-VLAN mode */
+               if (!edev->non_configured_vlans)
+                       qede_config_accept_any_vlan(edev, true);
+
+               edev->non_configured_vlans++;
+       }
+
+       list_add(&vlan->list, &edev->vlan_list);
+
+       return 0;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+                                   struct qede_vlan *vlan)
+{
+       /* vlan0 filter isn't consuming out of our quota */
+       if (vlan->vid != 0) {
+               if (vlan->configured)
+                       edev->configured_vlans--;
+               else
+                       edev->non_configured_vlans--;
+       }
+
+       list_del(&vlan->list);
+       kfree(vlan);
+}
+
+static int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+       int rc = 0, real_rc = 0, accept_any_vlan = 0;
+       struct qed_dev_eth_info *dev_info;
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return 0;
+
+       dev_info = &edev->dev_info;
+
+       /* Configure non-configured vlans */
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (vlan->configured)
+                       continue;
+
+               /* We have used all our credits, now enable accept_any_vlan */
+               if ((vlan->vid != 0) &&
+                   (edev->configured_vlans == dev_info->num_vlan_filters)) {
+                       accept_any_vlan = 1;
+                       continue;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %u\n",
+                              vlan->vid);
+                       real_rc = rc;
+                       continue;
+               }
+
+               vlan->configured = true;
+               /* vlan0 filter doesn't consume our VLAN filter's quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans--;
+                       edev->configured_vlans++;
+               }
+       }
+
+       /* enable accept_any_vlan mode if we have more VLANs than credits,
+        * or remove accept_any_vlan mode if we've actually removed
+        * a non-configured vlan, and all remaining vlans are truly configured.
+        */
+
+       if (accept_any_vlan)
+               qede_config_accept_any_vlan(edev, true);
+       else if (!edev->non_configured_vlans)
+               qede_config_accept_any_vlan(edev, false);
+
+       return real_rc;
+}
+
+static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan = NULL;
+       int rc;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+       /* Find whether entry exists */
+       list_for_each_entry(vlan, &edev->vlan_list, list)
+               if (vlan->vid == vid)
+                       break;
+
+       if (!vlan || (vlan->vid != vid)) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "Vlan isn't configured\n");
+               return 0;
+       }
+
+       if (edev->state != QEDE_STATE_OPEN) {
+               /* As interface is already down, we don't have a VPORT
+                * instance to remove vlan filter. So just update vlan list
+                */
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, removing VLAN from list only\n");
+               qede_del_vlan_from_list(edev, vlan);
+               return 0;
+       }
+
+       /* Remove vlan */
+       if (vlan->configured) {
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                           vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+                       return -EINVAL;
+               }
+       }
+
+       qede_del_vlan_from_list(edev, vlan);
+
+       /* We have removed a VLAN - try to see if we can
+        * configure non-configured VLAN from the list.
+        */
+       rc = qede_configure_vlan_filters(edev);
+
+       return rc;
+}
+
+static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return;
+
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (!vlan->configured)
+                       continue;
+
+               vlan->configured = false;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans++;
+                       edev->configured_vlans--;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "marked vlan %d as non-configured\n",
+                          vlan->vid);
+       }
+
+       edev->accept_any_vlan = false;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload && netif_running(edev->ndev)) {
+               dev->features = features;
+               qede_reload(edev, NULL, NULL);
+               return 1;
+       }
+
+       return 0;
+}
+
+static void qede_udp_tunnel_add(struct net_device *dev,
+                               struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_udp_tunnel_del(struct net_device *dev,
+                               struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (t_port != edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (t_port != edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+       .ndo_open = qede_open,
+       .ndo_stop = qede_close,
+       .ndo_start_xmit = qede_start_xmit,
+       .ndo_set_rx_mode = qede_set_rx_mode,
+       .ndo_set_mac_address = qede_set_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+       .ndo_set_vf_mac = qede_set_vf_mac,
+       .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
+       .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_set_features = qede_set_features,
+       .ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+       .ndo_set_vf_link_state = qede_set_vf_link_state,
+       .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+       .ndo_get_vf_config = qede_get_vf_config,
+       .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+       .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+       .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+                                           struct pci_dev *pdev,
+                                           struct qed_dev_eth_info *info,
+                                           u32 dp_module,
+                                           u8 dp_level)
+{
+       struct net_device *ndev;
+       struct qede_dev *edev;
+
+       ndev = alloc_etherdev_mqs(sizeof(*edev),
+                                 info->num_queues,
+                                 info->num_queues);
+       if (!ndev) {
+               pr_err("etherdev allocation failed\n");
+               return NULL;
+       }
+
+       edev = netdev_priv(ndev);
+       edev->ndev = ndev;
+       edev->cdev = cdev;
+       edev->pdev = pdev;
+       edev->dp_module = dp_module;
+       edev->dp_level = dp_level;
+       edev->ops = qed_ops;
+       edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+       edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       memset(&edev->stats, 0, sizeof(edev->stats));
+       memcpy(&edev->dev_info, info, sizeof(*info));
+
+       edev->num_tc = edev->dev_info.num_tc;
+
+       INIT_LIST_HEAD(&edev->vlan_list);
+
+       return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+       struct net_device *ndev = edev->ndev;
+       struct pci_dev *pdev = edev->pdev;
+       u32 hw_features;
+
+       pci_set_drvdata(pdev, ndev);
+
+       ndev->mem_start = edev->dev_info.common.pci_mem_start;
+       ndev->base_addr = ndev->mem_start;
+       ndev->mem_end = edev->dev_info.common.pci_mem_end;
+       ndev->irq = edev->dev_info.common.pci_irq;
+
+       ndev->watchdog_timeo = TX_TIMEOUT;
+
+       ndev->netdev_ops = &qede_netdev_ops;
+
+       qede_set_ethtool_ops(ndev);
+
+       /* user-changeble features */
+       hw_features = NETIF_F_GRO | NETIF_F_SG |
+                     NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                     NETIF_F_TSO | NETIF_F_TSO6;
+
+       /* Encap features*/
+       hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+                      NETIF_F_TSO_ECN;
+       ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                               NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+                               NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
+       ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+                             NETIF_F_HIGHDMA;
+       ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+                        NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+                        NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
+
+       ndev->hw_features = hw_features;
+
+       /* Set network device HW mac */
+       ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+       *p_dp_level = QED_LEVEL_NOTICE;
+       *p_dp_module = 0;
+
+       if (debug & QED_LOG_VERBOSE_MASK) {
+               *p_dp_level = QED_LEVEL_VERBOSE;
+               *p_dp_module = (debug & 0x3FFFFFFF);
+       } else if (debug & QED_LOG_INFO_MASK) {
+               *p_dp_level = QED_LEVEL_INFO;
+       } else if (debug & QED_LOG_NOTICE_MASK) {
+               *p_dp_level = QED_LEVEL_NOTICE;
+       }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+       if (edev->fp_array) {
+               struct qede_fastpath *fp;
+               int i;
+
+               for_each_rss(i) {
+                       fp = &edev->fp_array[i];
+
+                       kfree(fp->sb_info);
+                       kfree(fp->rxq);
+                       kfree(fp->txqs);
+               }
+               kfree(edev->fp_array);
+       }
+       edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+       struct qede_fastpath *fp;
+       int i;
+
+       edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+                                sizeof(*edev->fp_array), GFP_KERNEL);
+       if (!edev->fp_array) {
+               DP_NOTICE(edev, "fp array allocation failed\n");
+               goto err;
+       }
+
+       for_each_rss(i) {
+               fp = &edev->fp_array[i];
+
+               fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+               if (!fp->sb_info) {
+                       DP_NOTICE(edev, "sb info struct allocation failed\n");
+                       goto err;
+               }
+
+               fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+               if (!fp->rxq) {
+                       DP_NOTICE(edev, "RXQ struct allocation failed\n");
+                       goto err;
+               }
+
+               fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+               if (!fp->txqs) {
+                       DP_NOTICE(edev, "TXQ array allocation failed\n");
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       qede_free_fp_array(edev);
+       return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+       struct qede_dev *edev = container_of(work, struct qede_dev,
+                                            sp_task.work);
+       struct qed_dev *cdev = edev->cdev;
+
+       mutex_lock(&edev->qede_lock);
+
+       if (edev->state == QEDE_STATE_OPEN) {
+               if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+                       qede_config_rx_mode(edev->ndev);
+       }
+
+       if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_vxlan_port = 1;
+               tunn_params.vxlan_port = edev->vxlan_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
+       if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_geneve_port = 1;
+               tunn_params.geneve_port = edev->geneve_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
+       mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+       struct qed_pf_params pf_params;
+
+       /* 64 rx + 64 tx */
+       memset(&pf_params, 0, sizeof(struct qed_pf_params));
+       pf_params.eth_pf_params.num_cons = 128;
+       qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+       QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+                       bool is_vf, enum qede_probe_mode mode)
+{
+       struct qed_probe_params probe_params;
+       struct qed_slowpath_params params;
+       struct qed_dev_eth_info dev_info;
+       struct qede_dev *edev;
+       struct qed_dev *cdev;
+       int rc;
+
+       if (unlikely(dp_level & QED_LEVEL_INFO))
+               pr_notice("Starting qede probe\n");
+
+       memset(&probe_params, 0, sizeof(probe_params));
+       probe_params.protocol = QED_PROTOCOL_ETH;
+       probe_params.dp_module = dp_module;
+       probe_params.dp_level = dp_level;
+       probe_params.is_vf = is_vf;
+       cdev = qed_ops->common->probe(pdev, &probe_params);
+       if (!cdev) {
+               rc = -ENODEV;
+               goto err0;
+       }
+
+       qede_update_pf_params(cdev);
+
+       /* Start the Slowpath-process */
+       memset(&params, 0, sizeof(struct qed_slowpath_params));
+       params.int_mode = QED_INT_MODE_MSIX;
+       params.drv_major = QEDE_MAJOR_VERSION;
+       params.drv_minor = QEDE_MINOR_VERSION;
+       params.drv_rev = QEDE_REVISION_VERSION;
+       params.drv_eng = QEDE_ENGINEERING_VERSION;
+       strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+       rc = qed_ops->common->slowpath_start(cdev, &params);
+       if (rc) {
+               pr_notice("Cannot start slowpath\n");
+               goto err1;
+       }
+
+       /* Learn information crucial for qede to progress */
+       rc = qed_ops->fill_dev_info(cdev, &dev_info);
+       if (rc)
+               goto err2;
+
+       edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+                                  dp_level);
+       if (!edev) {
+               rc = -ENOMEM;
+               goto err2;
+       }
+
+       if (is_vf)
+               edev->flags |= QEDE_FLAG_IS_VF;
+
+       qede_init_ndev(edev);
+
+       rc = register_netdev(edev->ndev);
+       if (rc) {
+               DP_NOTICE(edev, "Cannot register net-device\n");
+               goto err3;
+       }
+
+       edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+       edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+#ifdef CONFIG_DCB
+       if (!IS_VF(edev))
+               qede_set_dcbnl_ops(edev->ndev);
+#endif
+
+       INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+       mutex_init(&edev->qede_lock);
+       edev->rx_copybreak = QEDE_RX_HDR_SIZE;
+
+       DP_INFO(edev, "Ending successfully qede probe\n");
+
+       return 0;
+
+err3:
+       free_netdev(edev->ndev);
+err2:
+       qed_ops->common->slowpath_stop(cdev);
+err1:
+       qed_ops->common->remove(cdev);
+err0:
+       return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       bool is_vf = false;
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+
+       switch ((enum qede_pci_private)id->driver_data) {
+       case QEDE_PRIVATE_VF:
+               if (debug & QED_LOG_VERBOSE_MASK)
+                       dev_err(&pdev->dev, "Probing a VF\n");
+               is_vf = true;
+               break;
+       default:
+               if (debug & QED_LOG_VERBOSE_MASK)
+                       dev_err(&pdev->dev, "Probing a PF\n");
+       }
+
+       qede_config_debug(debug, &dp_module, &dp_level);
+
+       return __qede_probe(pdev, dp_module, dp_level, is_vf,
+                           QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+       QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_dev *cdev = edev->cdev;
+
+       DP_INFO(edev, "Starting qede_remove\n");
+
+       cancel_delayed_work_sync(&edev->sp_task);
+       unregister_netdev(ndev);
+
+       edev->ops->common->set_power_state(cdev, PCI_D0);
+
+       pci_set_drvdata(pdev, NULL);
+
+       free_netdev(ndev);
+
+       /* Use global ops since we've freed edev */
+       qed_ops->common->slowpath_stop(cdev);
+       qed_ops->common->remove(cdev);
+
+       pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+       __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+       int rc;
+       u16 rss_num;
+
+       /* Setup queues according to possible resources*/
+       if (edev->req_rss)
+               rss_num = edev->req_rss;
+       else
+               rss_num = netif_get_num_default_rss_queues() *
+                         edev->dev_info.common.num_hwfns;
+
+       rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+       rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+       if (rc > 0) {
+               /* Managed to request interrupts for our queues */
+               edev->num_rss = rc;
+               DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+                       QEDE_RSS_CNT(edev), rss_num);
+               rc = 0;
+       }
+       return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+                            struct qed_sb_info *sb_info)
+{
+       if (sb_info->sb_virt)
+               dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+                                 (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+                            struct qed_sb_info *sb_info,
+                            u16 sb_id)
+{
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       int rc;
+
+       sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+                                    sizeof(*sb_virt),
+                                    &sb_phys, GFP_KERNEL);
+       if (!sb_virt) {
+               DP_ERR(edev, "Status block allocation failed\n");
+               return -ENOMEM;
+       }
+
+       rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+                                       sb_virt, sb_phys, sb_id,
+                                       QED_SB_TYPE_L2_QUEUE);
+       if (rc) {
+               DP_ERR(edev, "Status block initialization failed\n");
+               dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+                                 sb_virt, sb_phys);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+                                struct qede_rx_queue *rxq)
+{
+       u16 i;
+
+       for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+               struct sw_rx_data *rx_buf;
+               struct page *data;
+
+               rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+               data = rx_buf->data;
+
+               dma_unmap_page(&edev->pdev->dev,
+                              rx_buf->mapping,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+
+               rx_buf->data = NULL;
+               __free_page(data);
+       }
+}
+
+static void qede_free_sge_mem(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq) {
+       int i;
+
+       if (edev->gro_disable)
+               return;
+
+       for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+               struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+               if (replace_buf->data) {
+                       dma_unmap_page(&edev->pdev->dev,
+                                      replace_buf->mapping,
+                                      PAGE_SIZE, DMA_FROM_DEVICE);
+                       __free_page(replace_buf->data);
+               }
+       }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq)
+{
+       qede_free_sge_mem(edev, rxq);
+
+       /* Free rx buffers */
+       qede_free_rx_buffers(edev, rxq);
+
+       /* Free the parallel SW ring */
+       kfree(rxq->sw_rx_ring);
+
+       /* Free the real RQ ring used by FW */
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+                               struct qede_rx_queue *rxq)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       struct page *data;
+       u16 rx_buf_size;
+
+       rx_buf_size = rxq->rx_buf_size;
+
+       data = alloc_pages(GFP_ATOMIC, 0);
+       if (unlikely(!data)) {
+               DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
+               return -ENOMEM;
+       }
+
+       /* Map the entire page as it would be used
+        * for multiple RX buffer segment size mapping.
+        */
+       mapping = dma_map_page(&edev->pdev->dev, data, 0,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               __free_page(data);
+               DP_NOTICE(edev, "Failed to map Rx buffer\n");
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->page_offset = 0;
+       sw_rx_data->data = data;
+       sw_rx_data->mapping = mapping;
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+       rxq->sw_rx_prod++;
+
+       return 0;
+}
+
+static int qede_alloc_sge_mem(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq)
+{
+       dma_addr_t mapping;
+       int i;
+
+       if (edev->gro_disable)
+               return 0;
+
+       if (edev->ndev->mtu > PAGE_SIZE) {
+               edev->gro_disable = 1;
+               return 0;
+       }
+
+       for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+               struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+               replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
+               if (unlikely(!replace_buf->data)) {
+                       DP_NOTICE(edev,
+                                 "Failed to allocate TPA skb pool [replacement buffer]\n");
+                       goto err;
+               }
+
+               mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
+                                      rxq->rx_buf_size, DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+                       DP_NOTICE(edev,
+                                 "Failed to map TPA replacement buffer\n");
+                       goto err;
+               }
+
+               replace_buf->mapping = mapping;
+               tpa_info->replace_buf.page_offset = 0;
+
+               tpa_info->replace_buf_mapping = mapping;
+               tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+       }
+
+       return 0;
+err:
+       qede_free_sge_mem(edev, rxq);
+       edev->gro_disable = 1;
+       return -ENOMEM;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq)
+{
+       int i, rc, size;
+
+       rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
+                          edev->ndev->mtu;
+       if (rxq->rx_buf_size > PAGE_SIZE)
+               rxq->rx_buf_size = PAGE_SIZE;
+
+       /* Segment size to spilt a page in multiple equal parts */
+       rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+
+       /* Allocate the parallel driver ring for Rx buffers */
+       size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
+       rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+       if (!rxq->sw_rx_ring) {
+               DP_ERR(edev, "Rx buffers ring allocation failed\n");
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       /* Allocate FW Rx ring  */
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                           QED_CHAIN_MODE_NEXT_PTR,
+                                           QED_CHAIN_CNT_TYPE_U16,
+                                           RX_RING_SIZE,
+                                           sizeof(struct eth_rx_bd),
+                                           &rxq->rx_bd_ring);
+
+       if (rc)
+               goto err;
+
+       /* Allocate FW completion ring */
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME,
+                                           QED_CHAIN_MODE_PBL,
+                                           QED_CHAIN_CNT_TYPE_U16,
+                                           RX_RING_SIZE,
+                                           sizeof(union eth_rx_cqe),
+                                           &rxq->rx_comp_ring);
+       if (rc)
+               goto err;
+
+       /* Allocate buffers for the Rx ring */
+       for (i = 0; i < rxq->num_rx_buffers; i++) {
+               rc = qede_alloc_rx_buffer(edev, rxq);
+               if (rc) {
+                       DP_ERR(edev,
+                              "Rx buffers allocation failed at index %d\n", i);
+                       goto err;
+               }
+       }
+
+       rc = qede_alloc_sge_mem(edev, rxq);
+err:
+       return rc;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+                             struct qede_tx_queue *txq)
+{
+       /* Free the parallel SW ring */
+       kfree(txq->sw_tx_ring);
+
+       /* Free the real RQ ring used by FW */
+       edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+                             struct qede_tx_queue *txq)
+{
+       int size, rc;
+       union eth_tx_bd_types *p_virt;
+
+       txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+       /* Allocate the parallel driver ring for Tx buffers */
+       size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+       txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+       if (!txq->sw_tx_ring) {
+               DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+               goto err;
+       }
+
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                           QED_CHAIN_MODE_PBL,
+                                           QED_CHAIN_CNT_TYPE_U16,
+                                           NUM_TX_BDS_MAX,
+                                           sizeof(*p_virt), &txq->tx_pbl);
+       if (rc)
+               goto err;
+
+       return 0;
+
+err:
+       qede_free_mem_txq(edev, txq);
+       return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+                            struct qede_fastpath *fp)
+{
+       int tc;
+
+       qede_free_mem_sb(edev, fp->sb_info);
+
+       qede_free_mem_rxq(edev, fp->rxq);
+
+       for (tc = 0; tc < edev->num_tc; tc++)
+               qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+                            struct qede_fastpath *fp)
+{
+       int rc, tc;
+
+       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+       if (rc)
+               goto err;
+
+       rc = qede_alloc_mem_rxq(edev, fp->rxq);
+       if (rc)
+               goto err;
+
+       for (tc = 0; tc < edev->num_tc; tc++) {
+               rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+               if (rc)
+                       goto err;
+       }
+
+       return 0;
+err:
+       return rc;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+
+               qede_free_mem_fp(edev, fp);
+       }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+       int rc = 0, rss_id;
+
+       for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+               struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+               rc = qede_alloc_mem_fp(edev, fp);
+               if (rc) {
+                       DP_ERR(edev,
+                              "Failed to allocate memory for fastpath - rss id = %d\n",
+                              rss_id);
+                       qede_free_mem_load(edev);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+       int rss_id, txq_index, tc;
+       struct qede_fastpath *fp;
+
+       for_each_rss(rss_id) {
+               fp = &edev->fp_array[rss_id];
+
+               fp->edev = edev;
+               fp->rss_id = rss_id;
+
+               memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+               memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+               memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+               fp->rxq->rxq_id = rss_id;
+
+               memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+                       fp->txqs[tc].index = txq_index;
+               }
+
+               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+                        edev->ndev->name, rss_id);
+       }
+
+       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+       int rc = 0;
+
+       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+       if (rc) {
+               DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+               return rc;
+       }
+       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+       if (rc) {
+               DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               napi_disable(&edev->fp_array[i].napi);
+
+               netif_napi_del(&edev->fp_array[i].napi);
+       }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+       int i;
+
+       /* Add NAPI objects */
+       for_each_rss(i) {
+               netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+                              qede_poll, NAPI_POLL_WEIGHT);
+               napi_enable(&edev->fp_array[i].napi);
+       }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+       int i;
+
+       for (i = 0; i < edev->int_info.used_cnt; i++) {
+               if (edev->int_info.msix_cnt) {
+                       synchronize_irq(edev->int_info.msix[i].vector);
+                       free_irq(edev->int_info.msix[i].vector,
+                                &edev->fp_array[i]);
+               } else {
+                       edev->ops->common->simd_handler_clean(edev->cdev, i);
+               }
+       }
+
+       edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+       int i, rc;
+
+       /* Sanitize number of interrupts == number of prepared RSS queues */
+       if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+               DP_ERR(edev,
+                      "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+                      QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+               rc = request_irq(edev->int_info.msix[i].vector,
+                                qede_msix_fp_int, 0, edev->fp_array[i].name,
+                                &edev->fp_array[i]);
+               if (rc) {
+                       DP_ERR(edev, "Request fp %d irq failed\n", i);
+                       qede_sync_free_irqs(edev);
+                       return rc;
+               }
+               DP_VERBOSE(edev, NETIF_MSG_INTR,
+                          "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+                          edev->fp_array[i].name, i,
+                          &edev->fp_array[i]);
+               edev->int_info.used_cnt++;
+       }
+
+       return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+       struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+       napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+       int i, rc = 0;
+
+       /* Learn Interrupt configuration */
+       rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+       if (rc)
+               return rc;
+
+       if (edev->int_info.msix_cnt) {
+               rc = qede_req_msix_irqs(edev);
+               if (rc)
+                       return rc;
+               edev->ndev->irq = edev->int_info.msix[0].vector;
+       } else {
+               const struct qed_common_ops *ops;
+
+               /* qed should learn receive the RSS ids and callbacks */
+               ops = edev->ops->common;
+               for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+                       ops->simd_handler_config(edev->cdev,
+                                                &edev->fp_array[i], i,
+                                                qede_simd_fp_handler);
+               edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+       }
+       return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+                         struct qede_tx_queue *txq,
+                         bool allow_drain)
+{
+       int rc, cnt = 1000;
+
+       while (txq->sw_tx_cons != txq->sw_tx_prod) {
+               if (!cnt) {
+                       if (allow_drain) {
+                               DP_NOTICE(edev,
+                                         "Tx queue[%d] is stuck, requesting MCP to drain\n",
+                                         txq->index);
+                               rc = edev->ops->common->drain(edev->cdev);
+                               if (rc)
+                                       return rc;
+                               return qede_drain_txq(edev, txq, false);
+                       }
+                       DP_NOTICE(edev,
+                                 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+                                 txq->index, txq->sw_tx_prod,
+                                 txq->sw_tx_cons);
+                       return -ENODEV;
+               }
+               cnt--;
+               usleep_range(1000, 2000);
+               barrier();
+       }
+
+       /* FW finished processing, wait for HW to transmit all tx packets */
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+       struct qed_update_vport_params vport_update_params;
+       struct qed_dev *cdev = edev->cdev;
+       int rc, tc, i;
+
+       /* Disable the vport */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = 0;
+       vport_update_params.update_vport_active_flg = 1;
+       vport_update_params.vport_active_flg = 0;
+       vport_update_params.update_rss_flg = 0;
+
+       rc = edev->ops->vport_update(cdev, &vport_update_params);
+       if (rc) {
+               DP_ERR(edev, "Failed to update vport\n");
+               return rc;
+       }
+
+       /* Flush Tx queues. If needed, request drain from MCP */
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qede_tx_queue *txq = &fp->txqs[tc];
+
+                       rc = qede_drain_txq(edev, txq, true);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       /* Stop all Queues in reverse order*/
+       for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+               struct qed_stop_rxq_params rx_params;
+
+               /* Stop the Tx Queue(s)*/
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qed_stop_txq_params tx_params;
+
+                       tx_params.rss_id = i;
+                       tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+                       rc = edev->ops->q_tx_stop(cdev, &tx_params);
+                       if (rc) {
+                               DP_ERR(edev, "Failed to stop TXQ #%d\n",
+                                      tx_params.tx_queue_id);
+                               return rc;
+                       }
+               }
+
+               /* Stop the Rx Queue*/
+               memset(&rx_params, 0, sizeof(rx_params));
+               rx_params.rss_id = i;
+               rx_params.rx_queue_id = i;
+
+               rc = edev->ops->q_rx_stop(cdev, &rx_params);
+               if (rc) {
+                       DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+                       return rc;
+               }
+       }
+
+       /* Stop the vport */
+       rc = edev->ops->vport_stop(cdev, 0);
+       if (rc)
+               DP_ERR(edev, "Failed to stop VPORT\n");
+
+       return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
+{
+       int rc, tc, i;
+       int vlan_removal_en = 1;
+       struct qed_dev *cdev = edev->cdev;
+       struct qed_update_vport_params vport_update_params;
+       struct qed_queue_start_common_params q_params;
+       struct qed_dev_info *qed_info = &edev->dev_info.common;
+       struct qed_start_vport_params start = {0};
+       bool reset_rss_indir = false;
+
+       if (!edev->num_rss) {
+               DP_ERR(edev,
+                      "Cannot update V-VPORT as active as there are no Rx queues\n");
+               return -EINVAL;
+       }
+
+       start.gro_enable = !edev->gro_disable;
+       start.mtu = edev->ndev->mtu;
+       start.vport_id = 0;
+       start.drop_ttl0 = true;
+       start.remove_inner_vlan = vlan_removal_en;
+       start.clear_stats = clear_stats;
+
+       rc = edev->ops->vport_start(cdev, &start);
+
+       if (rc) {
+               DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+               return rc;
+       }
+
+       DP_VERBOSE(edev, NETIF_MSG_IFUP,
+                  "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+                  start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+               dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+               memset(&q_params, 0, sizeof(q_params));
+               q_params.rss_id = i;
+               q_params.queue_id = i;
+               q_params.vport_id = 0;
+               q_params.sb = fp->sb_info->igu_sb_id;
+               q_params.sb_idx = RX_PI;
+
+               rc = edev->ops->q_rx_start(cdev, &q_params,
+                                          fp->rxq->rx_buf_size,
+                                          fp->rxq->rx_bd_ring.p_phys_addr,
+                                          phys_table,
+                                          fp->rxq->rx_comp_ring.page_cnt,
+                                          &fp->rxq->hw_rxq_prod_addr);
+               if (rc) {
+                       DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+                       return rc;
+               }
+
+               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+               qede_update_rx_prod(edev, fp->rxq);
+
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qede_tx_queue *txq = &fp->txqs[tc];
+                       int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+                       memset(&q_params, 0, sizeof(q_params));
+                       q_params.rss_id = i;
+                       q_params.queue_id = txq_index;
+                       q_params.vport_id = 0;
+                       q_params.sb = fp->sb_info->igu_sb_id;
+                       q_params.sb_idx = TX_PI(tc);
+
+                       rc = edev->ops->q_tx_start(cdev, &q_params,
+                                                  txq->tx_pbl.pbl.p_phys_table,
+                                                  txq->tx_pbl.page_cnt,
+                                                  &txq->doorbell_addr);
+                       if (rc) {
+                               DP_ERR(edev, "Start TXQ #%d failed %d\n",
+                                      txq_index, rc);
+                               return rc;
+                       }
+
+                       txq->hw_cons_ptr =
+                               &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+                       SET_FIELD(txq->tx_db.data.params,
+                                 ETH_DB_DATA_DEST, DB_DEST_XCM);
+                       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+                                 DB_AGG_CMD_SET);
+                       SET_FIELD(txq->tx_db.data.params,
+                                 ETH_DB_DATA_AGG_VAL_SEL,
+                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+                       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+               }
+       }
+
+       /* Prepare and send the vport enable */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = start.vport_id;
+       vport_update_params.update_vport_active_flg = 1;
+       vport_update_params.vport_active_flg = 1;
+
+       if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+           qed_info->tx_switching) {
+               vport_update_params.update_tx_switching_flg = 1;
+               vport_update_params.tx_switching_flg = 1;
+       }
+
+       /* Fill struct with RSS params */
+       if (QEDE_RSS_CNT(edev) > 1) {
+               vport_update_params.update_rss_flg = 1;
+
+               /* Need to validate current RSS config uses valid entries */
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                       if (edev->rss_params.rss_ind_table[i] >=
+                           edev->num_rss) {
+                               reset_rss_indir = true;
+                               break;
+                       }
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+                   reset_rss_indir) {
+                       u16 val;
+
+                       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                               u16 indir_val;
+
+                               val = QEDE_RSS_CNT(edev);
+                               indir_val = ethtool_rxfh_indir_default(i, val);
+                               edev->rss_params.rss_ind_table[i] = indir_val;
+                       }
+                       edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+                       netdev_rss_key_fill(edev->rss_params.rss_key,
+                                           sizeof(edev->rss_params.rss_key));
+                       edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+                       edev->rss_params.rss_caps = QED_RSS_IPV4 |
+                                                   QED_RSS_IPV6 |
+                                                   QED_RSS_IPV4_TCP |
+                                                   QED_RSS_IPV6_TCP;
+                       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+               }
+
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
+       } else {
+               memset(&vport_update_params.rss_params, 0,
+                      sizeof(vport_update_params.rss_params));
+       }
+
+       rc = edev->ops->vport_update(cdev, &vport_update_params);
+       if (rc) {
+               DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char *mac, int num_macs)
+{
+       struct qed_filter_params filter_cmd;
+       int i;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_MCAST;
+       filter_cmd.filter.mcast.type = opcode;
+       filter_cmd.filter.mcast.num = num_macs;
+
+       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+       QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+       struct qed_link_params link_params;
+       int rc;
+
+       DP_INFO(edev, "Starting qede unload\n");
+
+       mutex_lock(&edev->qede_lock);
+       edev->state = QEDE_STATE_CLOSED;
+
+       /* Close OS Tx */
+       netif_tx_disable(edev->ndev);
+       netif_carrier_off(edev->ndev);
+
+       /* Reset the link */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = false;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+       rc = qede_stop_queues(edev);
+       if (rc) {
+               qede_sync_free_irqs(edev);
+               goto out;
+       }
+
+       DP_INFO(edev, "Stopped Queues\n");
+
+       qede_vlan_mark_nonconfigured(edev);
+       edev->ops->fastpath_stop(edev->cdev);
+
+       /* Release the interrupts */
+       qede_sync_free_irqs(edev);
+       edev->ops->common->set_fp_int(edev->cdev, 0);
+
+       qede_napi_disable_remove(edev);
+
+       qede_free_mem_load(edev);
+       qede_free_fp_array(edev);
+
+out:
+       mutex_unlock(&edev->qede_lock);
+       DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+       QEDE_LOAD_NORMAL,
+       QEDE_LOAD_RELOAD,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+       struct qed_link_params link_params;
+       struct qed_link_output link_output;
+       int rc;
+
+       DP_INFO(edev, "Starting qede load\n");
+
+       rc = qede_set_num_queues(edev);
+       if (rc)
+               goto err0;
+
+       rc = qede_alloc_fp_array(edev);
+       if (rc)
+               goto err0;
+
+       qede_init_fp(edev);
+
+       rc = qede_alloc_mem_load(edev);
+       if (rc)
+               goto err1;
+       DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+               QEDE_RSS_CNT(edev), edev->num_tc);
+
+       rc = qede_set_real_num_queues(edev);
+       if (rc)
+               goto err2;
+
+       qede_napi_add_enable(edev);
+       DP_INFO(edev, "Napi added and enabled\n");
+
+       rc = qede_setup_irqs(edev);
+       if (rc)
+               goto err3;
+       DP_INFO(edev, "Setup IRQs succeeded\n");
+
+       rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
+       if (rc)
+               goto err4;
+       DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+       /* Add primary mac and set Rx filters */
+       ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+       mutex_lock(&edev->qede_lock);
+       edev->state = QEDE_STATE_OPEN;
+       mutex_unlock(&edev->qede_lock);
+
+       /* Program un-configured VLANs */
+       qede_configure_vlan_filters(edev);
+
+       /* Ask for link-up using current configuration */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Query whether link is already-up */
+       memset(&link_output, 0, sizeof(link_output));
+       edev->ops->common->get_link(edev->cdev, &link_output);
+       qede_link_update(edev, &link_output);
+
+       DP_INFO(edev, "Ending successfully qede load\n");
+
+       return 0;
+
+err4:
+       qede_sync_free_irqs(edev);
+       memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+       qede_napi_disable_remove(edev);
+err2:
+       qede_free_mem_load(edev);
+err1:
+       edev->ops->common->set_fp_int(edev->cdev, 0);
+       qede_free_fp_array(edev);
+       edev->num_rss = 0;
+err0:
+       return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+                void (*func)(struct qede_dev *, union qede_reload_args *),
+                union qede_reload_args *args)
+{
+       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+       /* Call function handler to update parameters
+        * needed for function load.
+        */
+       if (func)
+               func(edev, args);
+
+       qede_load(edev, QEDE_LOAD_RELOAD);
+
+       mutex_lock(&edev->qede_lock);
+       qede_config_rx_mode(edev->ndev);
+       mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       int rc;
+
+       netif_carrier_off(ndev);
+
+       edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+       rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+       if (rc)
+               return rc;
+
+       udp_tunnel_get_rx_info(ndev);
+
+       return 0;
+}
+
+static int qede_close(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+       return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+       struct qede_dev *edev = dev;
+
+       if (!netif_running(edev->ndev)) {
+               DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+               return;
+       }
+
+       if (link->link_up) {
+               if (!netif_carrier_ok(edev->ndev)) {
+                       DP_NOTICE(edev, "Link is up\n");
+                       netif_tx_start_all_queues(edev->ndev);
+                       netif_carrier_on(edev->ndev);
+               }
+       } else {
+               if (netif_carrier_ok(edev->ndev)) {
+                       DP_NOTICE(edev, "Link is down\n");
+                       netif_tx_disable(edev->ndev);
+                       netif_carrier_off(edev->ndev);
+               }
+       }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       int rc;
+
+       ASSERT_RTNL(); /* @@@TBD To be removed */
+
+       DP_INFO(edev, "Set_mac_addr called\n");
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               DP_NOTICE(edev, "The MAC address is not valid\n");
+               return -EFAULT;
+       }
+
+       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+               DP_NOTICE(edev, "qed prevents setting MAC\n");
+               return -EINVAL;
+       }
+
+       ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+       if (!netif_running(ndev))  {
+               DP_NOTICE(edev, "The device is currently down\n");
+               return 0;
+       }
+
+       /* Remove the previous primary mac */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  edev->primary_mac);
+       if (rc)
+               return rc;
+
+       /* Add MAC filter according to the new unicast HW MAC address */
+       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                     edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+                              enum qed_filter_rx_mode_type *accept_flags)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       unsigned char *mc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc = 0, mc_count;
+       size_t size;
+
+       size = 64 * ETH_ALEN;
+
+       mc_macs = kzalloc(size, GFP_KERNEL);
+       if (!mc_macs) {
+               DP_NOTICE(edev,
+                         "Failed to allocate memory for multicast MACs\n");
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       temp = mc_macs;
+
+       /* Remove all previously configured MAC filters */
+       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  mc_macs, 1);
+       if (rc)
+               goto exit;
+
+       netif_addr_lock_bh(ndev);
+
+       mc_count = netdev_mc_count(ndev);
+       if (mc_count < 64) {
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(temp, ha->addr);
+                       temp += ETH_ALEN;
+               }
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Check for all multicast @@@TBD resource allocation */
+       if ((ndev->flags & IFF_ALLMULTI) ||
+           (mc_count > 64)) {
+               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       } else {
+               /* Add all multicast MAC filters */
+               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                          mc_macs, mc_count);
+       }
+
+exit:
+       kfree(mc_macs);
+       return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       DP_INFO(edev, "qede_set_rx_mode called\n");
+
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_INFO(edev,
+                       "qede_set_rx_mode called while interface is down\n");
+       } else {
+               set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+               schedule_delayed_work(&edev->sp_task, 0);
+       }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+       enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_filter_params rx_mode;
+       unsigned char *uc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc, uc_count;
+       size_t size;
+
+       netif_addr_lock_bh(ndev);
+
+       uc_count = netdev_uc_count(ndev);
+       size = uc_count * ETH_ALEN;
+
+       uc_macs = kzalloc(size, GFP_ATOMIC);
+       if (!uc_macs) {
+               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+               netif_addr_unlock_bh(ndev);
+               return;
+       }
+
+       temp = uc_macs;
+       netdev_for_each_uc_addr(ha, ndev) {
+               ether_addr_copy(temp, ha->addr);
+               temp += ETH_ALEN;
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Configure the struct for the Rx mode */
+       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+       /* Remove all previous unicast secondary macs and multicast macs
+        * (configrue / leave the primary mac)
+        */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+                                  edev->primary_mac);
+       if (rc)
+               goto out;
+
+       /* Check for promiscuous */
+       if ((ndev->flags & IFF_PROMISC) ||
+           (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       } else {
+               /* Add MAC filters according to the unicast secondary macs */
+               int i;
+
+               temp = uc_macs;
+               for (i = 0; i < uc_count; i++) {
+                       rc = qede_set_ucast_rx_mac(edev,
+                                                  QED_FILTER_XCAST_TYPE_ADD,
+                                                  temp);
+                       if (rc)
+                               goto out;
+
+                       temp += ETH_ALEN;
+               }
+
+               rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+               if (rc)
+                       goto out;
+       }
+
+       /* take care of VLAN mode */
+       if (ndev->flags & IFF_PROMISC) {
+               qede_config_accept_any_vlan(edev, true);
+       } else if (!edev->non_configured_vlans) {
+               /* It's possible that accept_any_vlan mode is set due to a
+                * previous setting of IFF_PROMISC. If vlan credits are
+                * sufficient, disable accept_any_vlan.
+                */
+               qede_config_accept_any_vlan(edev, false);
+       }
+
+       rx_mode.filter.accept_flags = accept_flags;
+       edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+       kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
new file mode 100644 (file)
index 0000000..b09a6b8
--- /dev/null
@@ -0,0 +1,3947 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/prefetch.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME       "qla3xxx"
+#define DRV_STRING     "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION    "v2.03.00-k5"
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+#define TIMED_OUT_MSG                                                  \
+"Timed out waiting for management port to get free before issuing command\n"
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1;         /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static const struct pci_device_id ql3xxx_pci_tbl[] = {
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
+       /* required last entry */
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ *  These are the known PHY's which are used
+ */
+enum PHY_DEVICE_TYPE {
+   PHY_TYPE_UNKNOWN   = 0,
+   PHY_VITESSE_VSC8211,
+   PHY_AGERE_ET1011C,
+   MAX_PHY_DEV_TYPES
+};
+
+struct PHY_DEVICE_INFO {
+       const enum PHY_DEVICE_TYPE      phyDevice;
+       const u32               phyIdOUI;
+       const u16               phyIdModel;
+       const char              *name;
+};
+
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+       {PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+       {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+       {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+};
+
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+                           u32 sem_mask, u32 sem_bits)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 value;
+       unsigned int seconds = 3;
+
+       do {
+               writel((sem_mask | sem_bits),
+                      &port_regs->CommonRegs.semaphoreReg);
+               value = readl(&port_regs->CommonRegs.semaphoreReg);
+               if ((value & (sem_mask >> 16)) == sem_bits)
+                       return 0;
+               ssleep(1);
+       } while (--seconds);
+       return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+       readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 value;
+
+       writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+       value = readl(&port_regs->CommonRegs.semaphoreReg);
+       return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+       int i = 0;
+
+       do {
+               if (ql_sem_lock(qdev,
+                               QL_DRVR_SEM_MASK,
+                               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+                                * 2) << 1)) {
+                       netdev_printk(KERN_DEBUG, qdev->ndev,
+                                     "driver lock acquired\n");
+                       return 1;
+               }
+               ssleep(1);
+       } while (++i < 10);
+
+       netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+       return 0;
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       writel(((ISP_CONTROL_NP_MASK << 16) | page),
+                       &port_regs->CommonRegs.ispControlStatus);
+       readl(&port_regs->CommonRegs.ispControlStatus);
+       qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       u32 value;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       value = readl(reg);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       u32 value;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev, 0);
+       value = readl(reg);
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev, 0);
+       return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+                               u32 __iomem *reg, u32 value)
+{
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       writel(value, reg);
+       readl(reg);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+                               u32 __iomem *reg, u32 value)
+{
+       writel(value, reg);
+       readl(reg);
+}
+
+static void ql_write_nvram_reg(struct ql3_adapter *qdev,
+                               u32 __iomem *reg, u32 value)
+{
+       writel(value, reg);
+       readl(reg);
+       udelay(1);
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+                              u32 __iomem *reg, u32 value)
+{
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev, 0);
+       writel(value, reg);
+       readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+                              u32 __iomem *reg, u32 value)
+{
+       if (qdev->current_page != 1)
+               ql_set_register_page(qdev, 1);
+       writel(value, reg);
+       readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+                              u32 __iomem *reg, u32 value)
+{
+       if (qdev->current_page != 2)
+               ql_set_register_page(qdev, 2);
+       writel(value, reg);
+       readl(reg);
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+                           (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+                           ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+                                           struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+       dma_addr_t map;
+       int err;
+       lrg_buf_cb->next = NULL;
+
+       if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
+               qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+       } else {
+               qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+               qdev->lrg_buf_free_tail = lrg_buf_cb;
+       }
+
+       if (!lrg_buf_cb->skb) {
+               lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+                                                  qdev->lrg_buffer_len);
+               if (unlikely(!lrg_buf_cb->skb)) {
+                       qdev->lrg_buf_skb_check++;
+               } else {
+                       /*
+                        * We save some space to copy the ethhdr from first
+                        * buffer
+                        */
+                       skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+                       map = pci_map_single(qdev->pdev,
+                                            lrg_buf_cb->skb->data,
+                                            qdev->lrg_buffer_len -
+                                            QL_HEADER_SPACE,
+                                            PCI_DMA_FROMDEVICE);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping failed with error: %d\n",
+                                          err);
+                               dev_kfree_skb(lrg_buf_cb->skb);
+                               lrg_buf_cb->skb = NULL;
+
+                               qdev->lrg_buf_skb_check++;
+                               return;
+                       }
+
+                       lrg_buf_cb->buf_phy_addr_low =
+                           cpu_to_le32(LS_64BITS(map));
+                       lrg_buf_cb->buf_phy_addr_high =
+                           cpu_to_le32(MS_64BITS(map));
+                       dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                       dma_unmap_len_set(lrg_buf_cb, maplen,
+                                         qdev->lrg_buffer_len -
+                                         QL_HEADER_SPACE);
+               }
+       }
+
+       qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+                                                          *qdev)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+
+       if (lrg_buf_cb != NULL) {
+               qdev->lrg_buf_free_head = lrg_buf_cb->next;
+               if (qdev->lrg_buf_free_head == NULL)
+                       qdev->lrg_buf_free_tail = NULL;
+               qdev->lrg_buf_free_count--;
+       }
+
+       return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+                           unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+       qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+       ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+       ql_write_nvram_reg(qdev, spir,
+                          ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+       int i;
+       u32 mask;
+       u32 dataBit;
+       u32 previousBit;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+       /* Clock in a zero, then do the start bit */
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1));
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
+
+       mask = 1 << (FM93C56A_CMD_BITS - 1);
+       /* Force the previous data bit to be different */
+       previousBit = 0xffff;
+       for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+               dataBit = (cmd & mask)
+                       ? AUBURN_EEPROM_DO_1
+                       : AUBURN_EEPROM_DO_0;
+               if (previousBit != dataBit) {
+                       /* If the bit changed, change the DO state to match */
+                       ql_write_nvram_reg(qdev, spir,
+                                          (ISP_NVRAM_MASK |
+                                           qdev->eeprom_cmd_data | dataBit));
+                       previousBit = dataBit;
+               }
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_RISE));
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_FALL));
+               cmd = cmd << 1;
+       }
+
+       mask = 1 << (addrBits - 1);
+       /* Force the previous data bit to be different */
+       previousBit = 0xffff;
+       for (i = 0; i < addrBits; i++) {
+               dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+                       : AUBURN_EEPROM_DO_0;
+               if (previousBit != dataBit) {
+                       /*
+                        * If the bit changed, then change the DO state to
+                        * match
+                        */
+                       ql_write_nvram_reg(qdev, spir,
+                                          (ISP_NVRAM_MASK |
+                                           qdev->eeprom_cmd_data | dataBit));
+                       previousBit = dataBit;
+               }
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_RISE));
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_FALL));
+               eepromAddr = eepromAddr << 1;
+       }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+       qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+       ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+       int i;
+       u32 data = 0;
+       u32 dataBit;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+       /* Read the data bits */
+       /* The first bit is a dummy.  Clock right over it. */
+       for (i = 0; i < dataBits; i++) {
+               ql_write_nvram_reg(qdev, spir,
+                                  ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                  AUBURN_EEPROM_CLK_RISE);
+               ql_write_nvram_reg(qdev, spir,
+                                  ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                  AUBURN_EEPROM_CLK_FALL);
+               dataBit = (ql_read_common_reg(qdev, spir) &
+                          AUBURN_EEPROM_DI_1) ? 1 : 0;
+               data = (data << 1) | dataBit;
+       }
+       *value = (u16)data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+                           u32 eepromAddr, unsigned short *value)
+{
+       fm93c56a_select(qdev);
+       fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+       fm93c56a_datain(qdev, value);
+       fm93c56a_deselect(qdev);
+}
+
+static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
+{
+       __le16 *p = (__le16 *)ndev->dev_addr;
+       p[0] = cpu_to_le16(addr[0]);
+       p[1] = cpu_to_le16(addr[1]);
+       p[2] = cpu_to_le16(addr[2]);
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+       u16 *pEEPROMData;
+       u16 checksum = 0;
+       u32 index;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       pEEPROMData = (u16 *)&qdev->nvram_data;
+       qdev->eeprom_cmd_data = 0;
+       if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 10)) {
+               pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return -1;
+       }
+
+       for (index = 0; index < EEPROM_SIZE; index++) {
+               eeprom_readword(qdev, index, pEEPROMData);
+               checksum += *pEEPROMData;
+               pEEPROMData++;
+       }
+       ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+       if (checksum != 0) {
+               netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+                          checksum);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return -1;
+       }
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+       PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 temp;
+       int count = 1000;
+
+       while (count) {
+               temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+               if (!(temp & MAC_MII_STATUS_BSY))
+                       return 0;
+               udelay(10);
+               count--;
+       }
+       return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 scanControl;
+
+       if (qdev->numPorts > 1) {
+               /* Auto scan will cycle through multiple ports */
+               scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+       } else {
+               scanControl = MAC_MII_CONTROL_SC;
+       }
+
+       /*
+        * Scan register 1 of PHY/PETBI,
+        * Set up to scan both devices
+        * The autoscan starts from the first register, completes
+        * the last one before rolling over to the first
+        */
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[0] | MII_SCAN_REGISTER);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (scanControl) |
+                          ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+       u8 ret;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                                       qdev->mem_map_registers;
+
+       /* See if scan mode is enabled before we turn it off */
+       if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+           (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+               /* Scan is enabled */
+               ret = 1;
+       } else {
+               /* Scan is disabled */
+               ret = 0;
+       }
+
+       /*
+        * When disabling scan mode you must first change the MII register
+        * address
+        */
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[0] | MII_SCAN_REGISTER);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+                            MAC_MII_CONTROL_RC) << 16));
+
+       return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+                              u16 regAddr, u16 value, u32 phyAddr)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u8 scanWasEnabled;
+
+       scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          phyAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+       /* Wait for write to complete 9/10/04 SJP */
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       if (scanWasEnabled)
+               ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+                             u16 *value, u32 phyAddr)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u8 scanWasEnabled;
+       u32 temp;
+
+       scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          phyAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16));
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+       /* Wait for the read to complete */
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+       *value = (u16) temp;
+
+       if (scanWasEnabled)
+               ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          qdev->PHYAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+       /* Wait for write to complete. */
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+       u32 temp;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          qdev->PHYAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16));
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+       /* Wait for the read to complete */
+       if (ql_wait_for_mii_ready(qdev)) {
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+               return -1;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+       *value = (u16) temp;
+
+       ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+       ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       /* Enable Auto-negotiation sense */
+       ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+       reg |= PETBI_TBI_AUTO_SENSE;
+       ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+       ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+                        PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+       ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+                        PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+                        PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
+{
+       ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+                           PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       /* Enable Auto-negotiation sense */
+       ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
+                          PHYAddr[qdev->mac_index]);
+       reg |= PETBI_TBI_AUTO_SENSE;
+       ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
+                           PHYAddr[qdev->mac_index]);
+
+       ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+                           PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
+                           PHYAddr[qdev->mac_index]);
+
+       ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+                           PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+                           PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+                           PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+       ql_petbi_reset(qdev);
+       ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev)
+{
+       ql_petbi_reset_ex(qdev);
+       ql_petbi_start_neg_ex(qdev);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+               return 0;
+
+       return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
+{
+       netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
+       /* power down device bit 11 = 1 */
+       ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
+       /* enable diagnostic mode bit 2 = 1 */
+       ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
+       /* 1000MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
+       /* 1000MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
+       /* 100MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
+       /* 100MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
+       /* 10MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
+       /* 10MB amplitude adjust (see Agere errata) */
+       ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
+       /* point to hidden reg 0x2806 */
+       ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
+       /* Write new PHYAD w/bit 5 set */
+       ql_mii_write_reg_ex(qdev, 0x11,
+                           0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+       /*
+        * Disable diagnostic mode bit 2 = 0
+        * Power up device bit 11 = 0
+        * Link up (on) and activity (blink)
+        */
+       ql_mii_write_reg(qdev, 0x12, 0x840a);
+       ql_mii_write_reg(qdev, 0x00, 0x1140);
+       ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
+}
+
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+                                      u16 phyIdReg0, u16 phyIdReg1)
+{
+       enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
+       u32   oui;
+       u16   model;
+       int i;
+
+       if (phyIdReg0 == 0xffff)
+               return result;
+
+       if (phyIdReg1 == 0xffff)
+               return result;
+
+       /* oui is split between two registers */
+       oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
+
+       model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
+
+       /* Scan table for this PHY */
+       for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+               if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+                   (model == PHY_DEVICES[i].phyIdModel)) {
+                       netdev_info(qdev->ndev, "Phy: %s\n",
+                                   PHY_DEVICES[i].name);
+                       result = PHY_DEVICES[i].phyDevice;
+                       break;
+               }
+       }
+
+       return result;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       switch (qdev->phyType) {
+       case PHY_AGERE_ET1011C: {
+               if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
+                       return 0;
+
+               reg = (reg >> 8) & 3;
+               break;
+       }
+       default:
+               if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+                       return 0;
+
+               reg = (((reg & 0x18) >> 3) & 3);
+       }
+
+       switch (reg) {
+       case 2:
+               return SPEED_1000;
+       case 1:
+               return SPEED_100;
+       case 0:
+               return SPEED_10;
+       default:
+               return -1;
+       }
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       switch (qdev->phyType) {
+       case PHY_AGERE_ET1011C: {
+               if (ql_mii_read_reg(qdev, 0x1A, &reg))
+                       return 0;
+
+               return ((reg & 0x0080) && (reg & 0x1000)) != 0;
+       }
+       case PHY_VITESSE_VSC8211:
+       default: {
+               if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+                       return 0;
+               return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+       }
+       }
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+               return 0;
+
+       return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+static int PHY_Setup(struct ql3_adapter *qdev)
+{
+       u16   reg1;
+       u16   reg2;
+       bool  agereAddrChangeNeeded = false;
+       u32 miiAddr = 0;
+       int err;
+
+       /*  Determine the PHY we are using by reading the ID's */
+       err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
+       if (err != 0) {
+               netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+               return err;
+       }
+
+       err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
+       if (err != 0) {
+               netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+               return err;
+       }
+
+       /*  Check if we have a Agere PHY */
+       if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
+
+               /* Determine which MII address we should be using
+                  determined by the index of the card */
+               if (qdev->mac_index == 0)
+                       miiAddr = MII_AGERE_ADDR_1;
+               else
+                       miiAddr = MII_AGERE_ADDR_2;
+
+               err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+               if (err != 0) {
+                       netdev_err(qdev->ndev,
+                                  "Could not read from reg PHY_ID_0_REG after Agere detected\n");
+                       return err;
+               }
+
+               err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
+               if (err != 0) {
+                       netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+                       return err;
+               }
+
+               /*  We need to remember to initialize the Agere PHY */
+               agereAddrChangeNeeded = true;
+       }
+
+       /*  Determine the particular PHY we have on board to apply
+           PHY specific initializations */
+       qdev->phyType = getPhyType(qdev, reg1, reg2);
+
+       if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
+               /* need this here so address gets changed */
+               phyAgereSpecificInit(qdev, miiAddr);
+       } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
+               netdev_err(qdev->ndev, "PHY is unknown\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+       else
+               value = (MAC_CONFIG_REG_PE << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+       else
+               value = (MAC_CONFIG_REG_SR << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+       else
+               value = (MAC_CONFIG_REG_GM << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+       else
+               value = (MAC_CONFIG_REG_FD << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value =
+                   ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+                    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+       else
+               value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_SM0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_SM1;
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+       u16 reg;
+       ql_mii_read_reg(qdev, 0x00, &reg);
+       return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_AC0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_AC1;
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck) {
+               netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
+               return 1;
+       }
+       netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+       return 0;
+}
+
+/*
+ *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return ql_is_petbi_neg_pause(qdev);
+       else
+               return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_AE0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_AE1;
+               break;
+       }
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return SPEED_1000;
+       else
+               return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return 1;
+       else
+               return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = ISP_CONTROL_LINK_DN_0;
+               break;
+       case 1:
+               bitToCheck = ISP_CONTROL_LINK_DN_1;
+               break;
+       }
+
+       temp =
+           ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       switch (qdev->mac_index) {
+       case 0:
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.ispControlStatus,
+                                   (ISP_CONTROL_LINK_DN_0) |
+                                   (ISP_CONTROL_LINK_DN_0 << 16));
+               break;
+
+       case 1:
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.ispControlStatus,
+                                   (ISP_CONTROL_LINK_DN_1) |
+                                   (ISP_CONTROL_LINK_DN_1 << 16));
+               break;
+
+       default:
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_F1_ENABLED;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_F3_ENABLED;
+               break;
+       default:
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck) {
+               netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                            "not link master\n");
+               return 0;
+       }
+
+       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+       return 1;
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev)
+{
+       ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
+                           PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
+{
+       u16 reg;
+       u16 portConfiguration;
+
+       if (qdev->phyType == PHY_AGERE_ET1011C)
+               ql_mii_write_reg(qdev, 0x13, 0x0000);
+                                       /* turn off external loopback */
+
+       if (qdev->mac_index == 0)
+               portConfiguration =
+                       qdev->nvram_data.macCfg_port0.portConfiguration;
+       else
+               portConfiguration =
+                       qdev->nvram_data.macCfg_port1.portConfiguration;
+
+       /*  Some HBA's in the field are set to 0 and they need to
+           be reinterpreted with a default value */
+       if (portConfiguration == 0)
+               portConfiguration = PORT_CONFIG_DEFAULT;
+
+       /* Set the 1000 advertisements */
+       ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
+                          PHYAddr[qdev->mac_index]);
+       reg &= ~PHY_GIG_ALL_PARAMS;
+
+       if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+               if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+                       reg |= PHY_GIG_ADV_1000F;
+               else
+                       reg |= PHY_GIG_ADV_1000H;
+       }
+
+       ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
+                           PHYAddr[qdev->mac_index]);
+
+       /* Set the 10/100 & pause negotiation advertisements */
+       ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
+                          PHYAddr[qdev->mac_index]);
+       reg &= ~PHY_NEG_ALL_PARAMS;
+
+       if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+               reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
+
+       if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+               if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+                       reg |= PHY_NEG_ADV_100F;
+
+               if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+                       reg |= PHY_NEG_ADV_10F;
+       }
+
+       if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+               if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+                       reg |= PHY_NEG_ADV_100H;
+
+               if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+                       reg |= PHY_NEG_ADV_10H;
+       }
+
+       if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
+               reg |= 1;
+
+       ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
+                           PHYAddr[qdev->mac_index]);
+
+       ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
+
+       ql_mii_write_reg_ex(qdev, CONTROL_REG,
+                           reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
+                           PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev)
+{
+       ql_phy_reset_ex(qdev);
+       PHY_Setup(qdev);
+       ql_phy_start_neg_ex(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp, linkState;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_UP0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_UP1;
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck)
+               linkState = LS_UP;
+       else
+               linkState = LS_DOWN;
+
+       return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7)) {
+               netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
+               return -1;
+       }
+
+       if (ql_is_fiber(qdev)) {
+               ql_petbi_init(qdev);
+       } else {
+               /* Copper port */
+               ql_phy_init_ex(qdev);
+       }
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return -1;
+
+       if (!ql_auto_neg_error(qdev)) {
+               if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+                       /* configure the MAC */
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Configuring link\n");
+                       ql_mac_cfg_soft_reset(qdev, 1);
+                       ql_mac_cfg_gig(qdev,
+                                      (ql_get_link_speed
+                                       (qdev) ==
+                                       SPEED_1000));
+                       ql_mac_cfg_full_dup(qdev,
+                                           ql_is_link_full_dup
+                                           (qdev));
+                       ql_mac_cfg_pause(qdev,
+                                        ql_is_neg_pause
+                                        (qdev));
+                       ql_mac_cfg_soft_reset(qdev, 0);
+
+                       /* enable the MAC */
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Enabling mac\n");
+                       ql_mac_enable(qdev, 1);
+               }
+
+               qdev->port_link_state = LS_UP;
+               netif_start_queue(qdev->ndev);
+               netif_carrier_on(qdev->ndev);
+               netif_info(qdev, link, qdev->ndev,
+                          "Link is up at %d Mbps, %s duplex\n",
+                          ql_get_link_speed(qdev),
+                          ql_is_link_full_dup(qdev) ? "full" : "half");
+
+       } else {        /* Remote error detected */
+
+               if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Remote error detected. Calling ql_port_start()\n");
+                       /*
+                        * ql_port_start() is shared code and needs
+                        * to lock the PHY on it's own.
+                        */
+                       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+                       if (ql_port_start(qdev))        /* Restart port */
+                               return -1;
+                       return 0;
+               }
+       }
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+static void ql_link_state_machine_work(struct work_struct *work)
+{
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, link_state_work.work);
+
+       u32 curr_link_state;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       curr_link_state = ql_get_link_state(qdev);
+
+       if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+               netif_info(qdev, link, qdev->ndev,
+                          "Reset in progress, skip processing link state\n");
+
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+               /* Restart timer on 2 second interval. */
+               mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+               return;
+       }
+
+       switch (qdev->port_link_state) {
+       default:
+               if (test_bit(QL_LINK_MASTER, &qdev->flags))
+                       ql_port_start(qdev);
+               qdev->port_link_state = LS_DOWN;
+               /* Fall Through */
+
+       case LS_DOWN:
+               if (curr_link_state == LS_UP) {
+                       netif_info(qdev, link, qdev->ndev, "Link is up\n");
+                       if (ql_is_auto_neg_complete(qdev))
+                               ql_finish_auto_neg(qdev);
+
+                       if (qdev->port_link_state == LS_UP)
+                               ql_link_down_detect_clear(qdev);
+
+                       qdev->port_link_state = LS_UP;
+               }
+               break;
+
+       case LS_UP:
+               /*
+                * See if the link is currently down or went down and came
+                * back up
+                */
+               if (curr_link_state == LS_DOWN) {
+                       netif_info(qdev, link, qdev->ndev, "Link is down\n");
+                       qdev->port_link_state = LS_DOWN;
+               }
+               if (ql_link_down_detect(qdev))
+                       qdev->port_link_state = LS_DOWN;
+               break;
+       }
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       /* Restart timer on 2 second interval. */
+       mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+       if (ql_this_adapter_controls_port(qdev))
+               set_bit(QL_LINK_MASTER, &qdev->flags);
+       else
+               clear_bit(QL_LINK_MASTER, &qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+       ql_mii_enable_scan_mode(qdev);
+
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+               if (ql_this_adapter_controls_port(qdev))
+                       ql_petbi_init_ex(qdev);
+       } else {
+               if (ql_this_adapter_controls_port(qdev))
+                       ql_phy_init_ex(qdev);
+       }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+       u32 reg;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return -1;
+
+       if (qdev->device_id == QL3032_DEVICE_ID)
+               ql_write_page0_reg(qdev,
+                       &port_regs->macMIIMgmtControlReg, 0x0f00000);
+
+       /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+       reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+#define SUPPORTED_OPTICAL_MODES        (SUPPORTED_1000baseT_Full |     \
+                                SUPPORTED_FIBRE |              \
+                                SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES     (SUPPORTED_10baseT_Half |       \
+                                SUPPORTED_10baseT_Full |       \
+                                SUPPORTED_100baseT_Half |      \
+                                SUPPORTED_100baseT_Full |      \
+                                SUPPORTED_1000baseT_Half |     \
+                                SUPPORTED_1000baseT_Full |     \
+                                SUPPORTED_Autoneg |            \
+                                SUPPORTED_TP)                  \
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+               return SUPPORTED_OPTICAL_MODES;
+
+       return SUPPORTED_TP_MODES;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+       int status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return 0;
+       }
+       status = ql_is_auto_cfg(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+       u32 status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return 0;
+       }
+       status = ql_get_link_speed(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+       int status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return 0;
+       }
+       status = ql_is_link_full_dup(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       ecmd->transceiver = XCVR_INTERNAL;
+       ecmd->supported = ql_supported_modes(qdev);
+
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+               ecmd->port = PORT_FIBRE;
+       } else {
+               ecmd->port = PORT_TP;
+               ecmd->phy_address = qdev->PHYAddr;
+       }
+       ecmd->advertising = ql_supported_modes(qdev);
+       ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+       ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
+       ecmd->duplex = ql_get_full_dup(qdev);
+       return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+                          struct ethtool_drvinfo *drvinfo)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ql3xxx_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       qdev->msg_enable = value;
+}
+
+static void ql_get_pauseparam(struct net_device *ndev,
+                             struct ethtool_pauseparam *pause)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       u32 reg;
+       if (qdev->mac_index == 0)
+               reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
+       else
+               reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
+
+       pause->autoneg  = ql_get_auto_cfg_status(qdev);
+       pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
+       pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
+}
+
+static const struct ethtool_ops ql3xxx_ethtool_ops = {
+       .get_settings = ql_get_settings,
+       .get_drvinfo = ql_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_msglevel = ql_get_msglevel,
+       .set_msglevel = ql_set_msglevel,
+       .get_pauseparam = ql_get_pauseparam,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+       dma_addr_t map;
+       int err;
+
+       while (lrg_buf_cb) {
+               if (!lrg_buf_cb->skb) {
+                       lrg_buf_cb->skb =
+                               netdev_alloc_skb(qdev->ndev,
+                                                qdev->lrg_buffer_len);
+                       if (unlikely(!lrg_buf_cb->skb)) {
+                               netdev_printk(KERN_DEBUG, qdev->ndev,
+                                             "Failed netdev_alloc_skb()\n");
+                               break;
+                       } else {
+                               /*
+                                * We save some space to copy the ethhdr from
+                                * first buffer
+                                */
+                               skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+                               map = pci_map_single(qdev->pdev,
+                                                    lrg_buf_cb->skb->data,
+                                                    qdev->lrg_buffer_len -
+                                                    QL_HEADER_SPACE,
+                                                    PCI_DMA_FROMDEVICE);
+
+                               err = pci_dma_mapping_error(qdev->pdev, map);
+                               if (err) {
+                                       netdev_err(qdev->ndev,
+                                                  "PCI mapping failed with error: %d\n",
+                                                  err);
+                                       dev_kfree_skb(lrg_buf_cb->skb);
+                                       lrg_buf_cb->skb = NULL;
+                                       break;
+                               }
+
+
+                               lrg_buf_cb->buf_phy_addr_low =
+                                       cpu_to_le32(LS_64BITS(map));
+                               lrg_buf_cb->buf_phy_addr_high =
+                                       cpu_to_le32(MS_64BITS(map));
+                               dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                               dma_unmap_len_set(lrg_buf_cb, maplen,
+                                                 qdev->lrg_buffer_len -
+                                                 QL_HEADER_SPACE);
+                               --qdev->lrg_buf_skb_check;
+                               if (!qdev->lrg_buf_skb_check)
+                                       return 1;
+                       }
+               }
+               lrg_buf_cb = lrg_buf_cb->next;
+       }
+       return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       if (qdev->small_buf_release_cnt >= 16) {
+               while (qdev->small_buf_release_cnt >= 16) {
+                       qdev->small_buf_q_producer_index++;
+
+                       if (qdev->small_buf_q_producer_index ==
+                           NUM_SBUFQ_ENTRIES)
+                               qdev->small_buf_q_producer_index = 0;
+                       qdev->small_buf_release_cnt -= 8;
+               }
+               wmb();
+               writel(qdev->small_buf_q_producer_index,
+                       &port_regs->CommonRegs.rxSmallQProducerIndex);
+       }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+       struct bufq_addr_element *lrg_buf_q_ele;
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       if ((qdev->lrg_buf_free_count >= 8) &&
+           (qdev->lrg_buf_release_cnt >= 16)) {
+
+               if (qdev->lrg_buf_skb_check)
+                       if (!ql_populate_free_queue(qdev))
+                               return;
+
+               lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+               while ((qdev->lrg_buf_release_cnt >= 16) &&
+                      (qdev->lrg_buf_free_count >= 8)) {
+
+                       for (i = 0; i < 8; i++) {
+                               lrg_buf_cb =
+                                   ql_get_from_lrg_buf_free_list(qdev);
+                               lrg_buf_q_ele->addr_high =
+                                   lrg_buf_cb->buf_phy_addr_high;
+                               lrg_buf_q_ele->addr_low =
+                                   lrg_buf_cb->buf_phy_addr_low;
+                               lrg_buf_q_ele++;
+
+                               qdev->lrg_buf_release_cnt--;
+                       }
+
+                       qdev->lrg_buf_q_producer_index++;
+
+                       if (qdev->lrg_buf_q_producer_index ==
+                           qdev->num_lbufq_entries)
+                               qdev->lrg_buf_q_producer_index = 0;
+
+                       if (qdev->lrg_buf_q_producer_index ==
+                           (qdev->num_lbufq_entries - 1)) {
+                               lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+                       }
+               }
+               wmb();
+               qdev->lrg_buf_next_free = lrg_buf_q_ele;
+               writel(qdev->lrg_buf_q_producer_index,
+                       &port_regs->CommonRegs.rxLargeQProducerIndex);
+       }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+                                  struct ob_mac_iocb_rsp *mac_rsp)
+{
+       struct ql_tx_buf_cb *tx_cb;
+       int i;
+
+       if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+               netdev_warn(qdev->ndev,
+                           "Frame too short but it was padded and sent\n");
+       }
+
+       tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+
+       /*  Check the transmit response flags for any errors */
+       if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+               netdev_err(qdev->ndev,
+                          "Frame too short to be legal, frame not sent\n");
+
+               qdev->ndev->stats.tx_errors++;
+               goto frame_not_sent;
+       }
+
+       if (tx_cb->seg_count == 0) {
+               netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+                          mac_rsp->transaction_id);
+
+               qdev->ndev->stats.tx_errors++;
+               goto invalid_seg_count;
+       }
+
+       pci_unmap_single(qdev->pdev,
+                        dma_unmap_addr(&tx_cb->map[0], mapaddr),
+                        dma_unmap_len(&tx_cb->map[0], maplen),
+                        PCI_DMA_TODEVICE);
+       tx_cb->seg_count--;
+       if (tx_cb->seg_count) {
+               for (i = 1; i < tx_cb->seg_count; i++) {
+                       pci_unmap_page(qdev->pdev,
+                                      dma_unmap_addr(&tx_cb->map[i],
+                                                     mapaddr),
+                                      dma_unmap_len(&tx_cb->map[i], maplen),
+                                      PCI_DMA_TODEVICE);
+               }
+       }
+       qdev->ndev->stats.tx_packets++;
+       qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
+
+frame_not_sent:
+       dev_kfree_skb_irq(tx_cb->skb);
+       tx_cb->skb = NULL;
+
+invalid_seg_count:
+       atomic_inc(&qdev->tx_count);
+}
+
+static void ql_get_sbuf(struct ql3_adapter *qdev)
+{
+       if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+               qdev->small_buf_index = 0;
+       qdev->small_buf_release_cnt++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
+       lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
+       qdev->lrg_buf_release_cnt++;
+       if (++qdev->lrg_buf_index == qdev->num_large_buffers)
+               qdev->lrg_buf_index = 0;
+       return lrg_buf_cb;
+}
+
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion.  The first buffer contains
+ * (some) header info, the second the remainder of the headers plus
+ * the data.  For this chip we reserve some space at the top of the
+ * receive buffer so that the header info in buffer one can be
+ * prepended to the buffer two.  Buffer two is the sent up while
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a
+ * simpler process.  3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+                                  struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+       struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+       struct sk_buff *skb;
+       u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+       /*
+        * Get the inbound address list (small buffer).
+        */
+       ql_get_sbuf(qdev);
+
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               lrg_buf_cb1 = ql_get_lbuf(qdev);
+
+       /* start of second buffer */
+       lrg_buf_cb2 = ql_get_lbuf(qdev);
+       skb = lrg_buf_cb2->skb;
+
+       qdev->ndev->stats.rx_packets++;
+       qdev->ndev->stats.rx_bytes += length;
+
+       skb_put(skb, length);
+       pci_unmap_single(qdev->pdev,
+                        dma_unmap_addr(lrg_buf_cb2, mapaddr),
+                        dma_unmap_len(lrg_buf_cb2, maplen),
+                        PCI_DMA_FROMDEVICE);
+       prefetch(skb->data);
+       skb_checksum_none_assert(skb);
+       skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+       netif_receive_skb(skb);
+       lrg_buf_cb2->skb = NULL;
+
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+                                    struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+       struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+       struct sk_buff *skb1 = NULL, *skb2;
+       struct net_device *ndev = qdev->ndev;
+       u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+       u16 size = 0;
+
+       /*
+        * Get the inbound address list (small buffer).
+        */
+
+       ql_get_sbuf(qdev);
+
+       if (qdev->device_id == QL3022_DEVICE_ID) {
+               /* start of first buffer on 3022 */
+               lrg_buf_cb1 = ql_get_lbuf(qdev);
+               skb1 = lrg_buf_cb1->skb;
+               size = ETH_HLEN;
+               if (*((u16 *) skb1->data) != 0xFFFF)
+                       size += VLAN_ETH_HLEN - ETH_HLEN;
+       }
+
+       /* start of second buffer */
+       lrg_buf_cb2 = ql_get_lbuf(qdev);
+       skb2 = lrg_buf_cb2->skb;
+
+       skb_put(skb2, length);  /* Just the second buffer length here. */
+       pci_unmap_single(qdev->pdev,
+                        dma_unmap_addr(lrg_buf_cb2, mapaddr),
+                        dma_unmap_len(lrg_buf_cb2, maplen),
+                        PCI_DMA_FROMDEVICE);
+       prefetch(skb2->data);
+
+       skb_checksum_none_assert(skb2);
+       if (qdev->device_id == QL3022_DEVICE_ID) {
+               /*
+                * Copy the ethhdr from first buffer to second. This
+                * is necessary for 3022 IP completions.
+                */
+               skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
+                                                skb_push(skb2, size), size);
+       } else {
+               u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+               if (checksum &
+                       (IB_IP_IOCB_RSP_3032_ICE |
+                        IB_IP_IOCB_RSP_3032_CE)) {
+                       netdev_err(ndev,
+                                  "%s: Bad checksum for this %s packet, checksum = %x\n",
+                                  __func__,
+                                  ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+                                   "TCP" : "UDP"), checksum);
+               } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
+                               (checksum & IB_IP_IOCB_RSP_3032_UDP &&
+                               !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
+                       skb2->ip_summed = CHECKSUM_UNNECESSARY;
+               }
+       }
+       skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+       netif_receive_skb(skb2);
+       ndev->stats.rx_packets++;
+       ndev->stats.rx_bytes += length;
+       lrg_buf_cb2->skb = NULL;
+
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev,
+                         int *tx_cleaned, int *rx_cleaned, int work_to_do)
+{
+       struct net_rsp_iocb *net_rsp;
+       struct net_device *ndev = qdev->ndev;
+       int work_done = 0;
+
+       /* While there are entries in the completion queue. */
+       while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
+               qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+
+               net_rsp = qdev->rsp_current;
+               rmb();
+               /*
+                * Fix 4032 chip's undocumented "feature" where bit-8 is set
+                * if the inbound completion is for a VLAN.
+                */
+               if (qdev->device_id == QL3032_DEVICE_ID)
+                       net_rsp->opcode &= 0x7f;
+               switch (net_rsp->opcode) {
+
+               case OPCODE_OB_MAC_IOCB_FN0:
+               case OPCODE_OB_MAC_IOCB_FN2:
+                       ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+                                              net_rsp);
+                       (*tx_cleaned)++;
+                       break;
+
+               case OPCODE_IB_MAC_IOCB:
+               case OPCODE_IB_3032_MAC_IOCB:
+                       ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+                                              net_rsp);
+                       (*rx_cleaned)++;
+                       break;
+
+               case OPCODE_IB_IP_IOCB:
+               case OPCODE_IB_3032_IP_IOCB:
+                       ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+                                                net_rsp);
+                       (*rx_cleaned)++;
+                       break;
+               default: {
+                       u32 *tmp = (u32 *)net_rsp;
+                       netdev_err(ndev,
+                                  "Hit default case, not handled!\n"
+                                  "    dropping the packet, opcode = %x\n"
+                                  "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+                                  net_rsp->opcode,
+                                  (unsigned long int)tmp[0],
+                                  (unsigned long int)tmp[1],
+                                  (unsigned long int)tmp[2],
+                                  (unsigned long int)tmp[3]);
+               }
+               }
+
+               qdev->rsp_consumer_index++;
+
+               if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+                       qdev->rsp_consumer_index = 0;
+                       qdev->rsp_current = qdev->rsp_q_virt_addr;
+               } else {
+                       qdev->rsp_current++;
+               }
+
+               work_done = *tx_cleaned + *rx_cleaned;
+       }
+
+       return work_done;
+}
+
+static int ql_poll(struct napi_struct *napi, int budget)
+{
+       struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
+       int rx_cleaned = 0, tx_cleaned = 0;
+       unsigned long hw_flags;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
+       ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+
+       if (tx_cleaned + rx_cleaned != budget) {
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               __napi_complete(napi);
+               ql_update_small_bufq_prod_index(qdev);
+               ql_update_lrg_bufq_prod_index(qdev);
+               writel(qdev->rsp_consumer_index,
+                           &port_regs->CommonRegs.rspQConsumerIndex);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+               ql_enable_interrupts(qdev);
+       }
+       return tx_cleaned + rx_cleaned;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
+{
+
+       struct net_device *ndev = dev_id;
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 value;
+       int handled = 1;
+       u32 var;
+
+       value = ql_read_common_reg_l(qdev,
+                                    &port_regs->CommonRegs.ispControlStatus);
+
+       if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+               spin_lock(&qdev->adapter_lock);
+               netif_stop_queue(qdev->ndev);
+               netif_carrier_off(qdev->ndev);
+               ql_disable_interrupts(qdev);
+               qdev->port_link_state = LS_DOWN;
+               set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
+
+               if (value & ISP_CONTROL_FE) {
+                       /*
+                        * Chip Fatal Error.
+                        */
+                       var =
+                           ql_read_page0_reg_l(qdev,
+                                             &port_regs->PortFatalErrStatus);
+                       netdev_warn(ndev,
+                                   "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+                                   var);
+                       set_bit(QL_RESET_START, &qdev->flags) ;
+               } else {
+                       /*
+                        * Soft Reset Requested.
+                        */
+                       set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+                       netdev_err(ndev,
+                                  "Another function issued a reset to the chip. ISR value = %x\n",
+                                  value);
+               }
+               queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
+               spin_unlock(&qdev->adapter_lock);
+       } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+               ql_disable_interrupts(qdev);
+               if (likely(napi_schedule_prep(&qdev->napi)))
+                       __napi_schedule(&qdev->napi);
+       } else
+               return IRQ_NONE;
+
+       return IRQ_RETVAL(handled);
+}
+
+/*
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given.  Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to  point to the next OAL if more frags
+ * are coming.  That is why the frags:segment count ratio is not linear.
+ */
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
+{
+       if (qdev->device_id == QL3022_DEVICE_ID)
+               return 1;
+
+       if (frags <= 2)
+               return frags + 1;
+       else if (frags <= 6)
+               return frags + 2;
+       else if (frags <= 10)
+               return frags + 3;
+       else if (frags <= 14)
+               return frags + 4;
+       else if (frags <= 18)
+               return frags + 5;
+       return -1;
+}
+
+static void ql_hw_csum_setup(const struct sk_buff *skb,
+                            struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+       const struct iphdr *ip = ip_hdr(skb);
+
+       mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
+       mac_iocb_ptr->ip_hdr_len = ip->ihl;
+
+       if (ip->protocol == IPPROTO_TCP) {
+               mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+                       OB_3032MAC_IOCB_REQ_IC;
+       } else {
+               mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+                       OB_3032MAC_IOCB_REQ_IC;
+       }
+
+}
+
+/*
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_send_map(struct ql3_adapter *qdev,
+                               struct ob_mac_iocb_req *mac_iocb_ptr,
+                               struct ql_tx_buf_cb *tx_cb,
+                               struct sk_buff *skb)
+{
+       struct oal *oal;
+       struct oal_entry *oal_entry;
+       int len = skb_headlen(skb);
+       dma_addr_t map;
+       int err;
+       int completed_segs, i;
+       int seg_cnt, seg = 0;
+       int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
+
+       seg_cnt = tx_cb->seg_count;
+       /*
+        * Map the skb buffer first.
+        */
+       map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+       err = pci_dma_mapping_error(qdev->pdev, map);
+       if (err) {
+               netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+                          err);
+
+               return NETDEV_TX_BUSY;
+       }
+
+       oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+       oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+       oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+       oal_entry->len = cpu_to_le32(len);
+       dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+       dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
+       seg++;
+
+       if (seg_cnt == 1) {
+               /* Terminate the last segment. */
+               oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+               return NETDEV_TX_OK;
+       }
+       oal = tx_cb->oal;
+       for (completed_segs = 0;
+            completed_segs < frag_cnt;
+            completed_segs++, seg++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+               oal_entry++;
+               /*
+                * Check for continuation requirements.
+                * It's strange but necessary.
+                * Continuation entry points to outbound address list.
+                */
+               if ((seg == 2 && seg_cnt > 3) ||
+                   (seg == 7 && seg_cnt > 8) ||
+                   (seg == 12 && seg_cnt > 13) ||
+                   (seg == 17 && seg_cnt > 18)) {
+                       map = pci_map_single(qdev->pdev, oal,
+                                            sizeof(struct oal),
+                                            PCI_DMA_TODEVICE);
+
+                       err = pci_dma_mapping_error(qdev->pdev, map);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping outbound address list with error: %d\n",
+                                          err);
+                               goto map_error;
+                       }
+
+                       oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+                       oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+                       oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+                                                    OAL_CONT_ENTRY);
+                       dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+                       dma_unmap_len_set(&tx_cb->map[seg], maplen,
+                                         sizeof(struct oal));
+                       oal_entry = (struct oal_entry *)oal;
+                       oal++;
+                       seg++;
+               }
+
+               map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
+
+               err = dma_mapping_error(&qdev->pdev->dev, map);
+               if (err) {
+                       netdev_err(qdev->ndev,
+                                  "PCI mapping frags failed with error: %d\n",
+                                  err);
+                       goto map_error;
+               }
+
+               oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+               oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+               oal_entry->len = cpu_to_le32(skb_frag_size(frag));
+               dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+               dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
+               }
+       /* Terminate the last segment. */
+       oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+       return NETDEV_TX_OK;
+
+map_error:
+       /* A PCI mapping failed and now we will need to back out
+        * We need to traverse through the oal's and associated pages which
+        * have been mapped and now we must unmap them to clean up properly
+        */
+
+       seg = 1;
+       oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+       oal = tx_cb->oal;
+       for (i = 0; i < completed_segs; i++, seg++) {
+               oal_entry++;
+
+               /*
+                * Check for continuation requirements.
+                * It's strange but necessary.
+                */
+
+               if ((seg == 2 && seg_cnt > 3) ||
+                   (seg == 7 && seg_cnt > 8) ||
+                   (seg == 12 && seg_cnt > 13) ||
+                   (seg == 17 && seg_cnt > 18)) {
+                       pci_unmap_single(qdev->pdev,
+                               dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+                               dma_unmap_len(&tx_cb->map[seg], maplen),
+                                PCI_DMA_TODEVICE);
+                       oal++;
+                       seg++;
+               }
+
+               pci_unmap_page(qdev->pdev,
+                              dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+                              dma_unmap_len(&tx_cb->map[seg], maplen),
+                              PCI_DMA_TODEVICE);
+       }
+
+       pci_unmap_single(qdev->pdev,
+                        dma_unmap_addr(&tx_cb->map[0], mapaddr),
+                        dma_unmap_addr(&tx_cb->map[0], maplen),
+                        PCI_DMA_TODEVICE);
+
+       return NETDEV_TX_BUSY;
+
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
+ * in the IOCB plus a chain of outbound address lists (OAL) that
+ * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
+ * will be used to point to an OAL when more ALP entries are required.
+ * The IOCB is always the top of the chain followed by one or more
+ * OALs (when necessary).
+ */
+static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
+                              struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       struct ql_tx_buf_cb *tx_cb;
+       u32 tot_len = skb->len;
+       struct ob_mac_iocb_req *mac_iocb_ptr;
+
+       if (unlikely(atomic_read(&qdev->tx_count) < 2))
+               return NETDEV_TX_BUSY;
+
+       tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+       tx_cb->seg_count = ql_get_seg_count(qdev,
+                                            skb_shinfo(skb)->nr_frags);
+       if (tx_cb->seg_count == -1) {
+               netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+               return NETDEV_TX_OK;
+       }
+
+       mac_iocb_ptr = tx_cb->queue_entry;
+       memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+       mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+       mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
+       mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+       mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+       mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
+       tx_cb->skb = skb;
+       if (qdev->device_id == QL3032_DEVICE_ID &&
+           skb->ip_summed == CHECKSUM_PARTIAL)
+               ql_hw_csum_setup(skb, mac_iocb_ptr);
+
+       if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+               netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
+               return NETDEV_TX_BUSY;
+       }
+
+       wmb();
+       qdev->req_producer_index++;
+       if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+               qdev->req_producer_index = 0;
+       wmb();
+       ql_write_common_reg_l(qdev,
+                           &port_regs->CommonRegs.reqQProducerIndex,
+                           qdev->req_producer_index);
+
+       netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+                    "tx queued, slot %d, len %d\n",
+                    qdev->req_producer_index, skb->len);
+
+       atomic_dec(&qdev->tx_count);
+       return NETDEV_TX_OK;
+}
+
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+       qdev->req_q_size =
+           (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+       qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+       /* The barrier is required to ensure request and response queue
+        * addr writes to the registers.
+        */
+       wmb();
+
+       qdev->req_q_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                (size_t) qdev->req_q_size,
+                                &qdev->req_q_phy_addr);
+
+       if ((qdev->req_q_virt_addr == NULL) ||
+           LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+               netdev_err(qdev->ndev, "reqQ failed\n");
+               return -ENOMEM;
+       }
+
+       qdev->rsp_q_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                (size_t) qdev->rsp_q_size,
+                                &qdev->rsp_q_phy_addr);
+
+       if ((qdev->rsp_q_virt_addr == NULL) ||
+           LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+               netdev_err(qdev->ndev, "rspQ allocation failed\n");
+               pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
+                                   qdev->req_q_virt_addr,
+                                   qdev->req_q_phy_addr);
+               return -ENOMEM;
+       }
+
+       set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+
+       return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
+               return;
+       }
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->req_q_size,
+                           qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+       qdev->req_q_virt_addr = NULL;
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->rsp_q_size,
+                           qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+       qdev->rsp_q_virt_addr = NULL;
+
+       clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+       /* Create Large Buffer Queue */
+       qdev->lrg_buf_q_size =
+               qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+       if (qdev->lrg_buf_q_size < PAGE_SIZE)
+               qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+       else
+               qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+       qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+                                     sizeof(struct ql_rcv_buf_cb),
+                                     GFP_KERNEL);
+       if (qdev->lrg_buf == NULL)
+               return -ENOMEM;
+
+       qdev->lrg_buf_q_alloc_virt_addr =
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->lrg_buf_q_alloc_size,
+                                    &qdev->lrg_buf_q_alloc_phy_addr);
+
+       if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+               netdev_err(qdev->ndev, "lBufQ failed\n");
+               return -ENOMEM;
+       }
+       qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+       qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+       /* Create Small Buffer Queue */
+       qdev->small_buf_q_size =
+               NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+       if (qdev->small_buf_q_size < PAGE_SIZE)
+               qdev->small_buf_q_alloc_size = PAGE_SIZE;
+       else
+               qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+       qdev->small_buf_q_alloc_virt_addr =
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->small_buf_q_alloc_size,
+                                    &qdev->small_buf_q_alloc_phy_addr);
+
+       if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+               netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
+               pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
+                                   qdev->lrg_buf_q_alloc_virt_addr,
+                                   qdev->lrg_buf_q_alloc_phy_addr);
+               return -ENOMEM;
+       }
+
+       qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+       qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+       set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+       return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
+               return;
+       }
+       kfree(qdev->lrg_buf);
+       pci_free_consistent(qdev->pdev,
+                           qdev->lrg_buf_q_alloc_size,
+                           qdev->lrg_buf_q_alloc_virt_addr,
+                           qdev->lrg_buf_q_alloc_phy_addr);
+
+       qdev->lrg_buf_q_virt_addr = NULL;
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->small_buf_q_alloc_size,
+                           qdev->small_buf_q_alloc_virt_addr,
+                           qdev->small_buf_q_alloc_phy_addr);
+
+       qdev->small_buf_q_virt_addr = NULL;
+
+       clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct bufq_addr_element *small_buf_q_entry;
+
+       /* Currently we allocate on one of memory and use it for smallbuffers */
+       qdev->small_buf_total_size =
+               (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+                QL_SMALL_BUFFER_SIZE);
+
+       qdev->small_buf_virt_addr =
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->small_buf_total_size,
+                                    &qdev->small_buf_phy_addr);
+
+       if (qdev->small_buf_virt_addr == NULL) {
+               netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
+               return -ENOMEM;
+       }
+
+       qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+       qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+       small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+       /* Initialize the small buffer queue. */
+       for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+               small_buf_q_entry->addr_high =
+                   cpu_to_le32(qdev->small_buf_phy_addr_high);
+               small_buf_q_entry->addr_low =
+                   cpu_to_le32(qdev->small_buf_phy_addr_low +
+                               (i * QL_SMALL_BUFFER_SIZE));
+               small_buf_q_entry++;
+       }
+       qdev->small_buf_index = 0;
+       set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
+       return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
+               return;
+       }
+       if (qdev->small_buf_virt_addr != NULL) {
+               pci_free_consistent(qdev->pdev,
+                                   qdev->small_buf_total_size,
+                                   qdev->small_buf_virt_addr,
+                                   qdev->small_buf_phy_addr);
+
+               qdev->small_buf_virt_addr = NULL;
+       }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+       int i = 0;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+
+       for (i = 0; i < qdev->num_large_buffers; i++) {
+               lrg_buf_cb = &qdev->lrg_buf[i];
+               if (lrg_buf_cb->skb) {
+                       dev_kfree_skb(lrg_buf_cb->skb);
+                       pci_unmap_single(qdev->pdev,
+                                        dma_unmap_addr(lrg_buf_cb, mapaddr),
+                                        dma_unmap_len(lrg_buf_cb, maplen),
+                                        PCI_DMA_FROMDEVICE);
+                       memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+               } else {
+                       break;
+               }
+       }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+       for (i = 0; i < qdev->num_large_buffers; i++) {
+               lrg_buf_cb = &qdev->lrg_buf[i];
+               buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+               buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+               buf_addr_ele++;
+       }
+       qdev->lrg_buf_index = 0;
+       qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct sk_buff *skb;
+       dma_addr_t map;
+       int err;
+
+       for (i = 0; i < qdev->num_large_buffers; i++) {
+               skb = netdev_alloc_skb(qdev->ndev,
+                                      qdev->lrg_buffer_len);
+               if (unlikely(!skb)) {
+                       /* Better luck next round */
+                       netdev_err(qdev->ndev,
+                                  "large buff alloc failed for %d bytes at index %d\n",
+                                  qdev->lrg_buffer_len * 2, i);
+                       ql_free_large_buffers(qdev);
+                       return -ENOMEM;
+               } else {
+
+                       lrg_buf_cb = &qdev->lrg_buf[i];
+                       memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+                       lrg_buf_cb->index = i;
+                       lrg_buf_cb->skb = skb;
+                       /*
+                        * We save some space to copy the ethhdr from first
+                        * buffer
+                        */
+                       skb_reserve(skb, QL_HEADER_SPACE);
+                       map = pci_map_single(qdev->pdev,
+                                            skb->data,
+                                            qdev->lrg_buffer_len -
+                                            QL_HEADER_SPACE,
+                                            PCI_DMA_FROMDEVICE);
+
+                       err = pci_dma_mapping_error(qdev->pdev, map);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping failed with error: %d\n",
+                                          err);
+                               ql_free_large_buffers(qdev);
+                               return -ENOMEM;
+                       }
+
+                       dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                       dma_unmap_len_set(lrg_buf_cb, maplen,
+                                         qdev->lrg_buffer_len -
+                                         QL_HEADER_SPACE);
+                       lrg_buf_cb->buf_phy_addr_low =
+                           cpu_to_le32(LS_64BITS(map));
+                       lrg_buf_cb->buf_phy_addr_high =
+                           cpu_to_le32(MS_64BITS(map));
+               }
+       }
+       return 0;
+}
+
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+       struct ql_tx_buf_cb *tx_cb;
+       int i;
+
+       tx_cb = &qdev->tx_buf[0];
+       for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+               kfree(tx_cb->oal);
+               tx_cb->oal = NULL;
+               tx_cb++;
+       }
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+       struct ql_tx_buf_cb *tx_cb;
+       int i;
+       struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
+
+       /* Create free list of transmit buffers */
+       for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
+               tx_cb = &qdev->tx_buf[i];
+               tx_cb->skb = NULL;
+               tx_cb->queue_entry = req_q_curr;
+               req_q_curr++;
+               tx_cb->oal = kmalloc(512, GFP_KERNEL);
+               if (tx_cb->oal == NULL)
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+       if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
+               qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
+               qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+       } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+               /*
+                * Bigger buffers, so less of them.
+                */
+               qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
+               qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+       } else {
+               netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
+                          qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
+               return -ENOMEM;
+       }
+       qdev->num_large_buffers =
+               qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+       qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+       qdev->max_frame_size =
+               (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+       /*
+        * First allocate a page of shared memory and use it for shadow
+        * locations of Network Request Queue Consumer Address Register and
+        * Network Completion Queue Producer Index Register
+        */
+       qdev->shadow_reg_virt_addr =
+               pci_alloc_consistent(qdev->pdev,
+                                    PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+
+       if (qdev->shadow_reg_virt_addr != NULL) {
+               qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
+               qdev->req_consumer_index_phy_addr_high =
+                       MS_64BITS(qdev->shadow_reg_phy_addr);
+               qdev->req_consumer_index_phy_addr_low =
+                       LS_64BITS(qdev->shadow_reg_phy_addr);
+
+               qdev->prsp_producer_index =
+                       (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+               qdev->rsp_producer_index_phy_addr_high =
+                       qdev->req_consumer_index_phy_addr_high;
+               qdev->rsp_producer_index_phy_addr_low =
+                       qdev->req_consumer_index_phy_addr_low + 8;
+       } else {
+               netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
+               return -ENOMEM;
+       }
+
+       if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+               netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
+               goto err_req_rsp;
+       }
+
+       if (ql_alloc_buffer_queues(qdev) != 0) {
+               netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
+               goto err_buffer_queues;
+       }
+
+       if (ql_alloc_small_buffers(qdev) != 0) {
+               netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
+               goto err_small_buffers;
+       }
+
+       if (ql_alloc_large_buffers(qdev) != 0) {
+               netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
+               goto err_small_buffers;
+       }
+
+       /* Initialize the large buffer queue. */
+       ql_init_large_buffers(qdev);
+       if (ql_create_send_free_list(qdev))
+               goto err_free_list;
+
+       qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+       return 0;
+err_free_list:
+       ql_free_send_free_list(qdev);
+err_small_buffers:
+       ql_free_buffer_queues(qdev);
+err_buffer_queues:
+       ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+       pci_free_consistent(qdev->pdev,
+                           PAGE_SIZE,
+                           qdev->shadow_reg_virt_addr,
+                           qdev->shadow_reg_phy_addr);
+
+       return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+       ql_free_send_free_list(qdev);
+       ql_free_large_buffers(qdev);
+       ql_free_small_buffers(qdev);
+       ql_free_buffer_queues(qdev);
+       ql_free_net_req_rsp_queues(qdev);
+       if (qdev->shadow_reg_virt_addr != NULL) {
+               pci_free_consistent(qdev->pdev,
+                                   PAGE_SIZE,
+                                   qdev->shadow_reg_virt_addr,
+                                   qdev->shadow_reg_phy_addr);
+               qdev->shadow_reg_virt_addr = NULL;
+       }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_local_ram_registers __iomem *local_ram =
+           (void __iomem *)qdev->mem_map_registers;
+
+       if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 4))
+               return -1;
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxBufletCount,
+                          qdev->nvram_data.bufletCount);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->freeBufletThresholdLow,
+                          (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+                          (qdev->nvram_data.tcpWindowThreshold0));
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->freeBufletThresholdHigh,
+                          qdev->nvram_data.tcpWindowThreshold50);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->ipHashTableBase,
+                          (qdev->nvram_data.ipHashTableBaseHi << 16) |
+                          qdev->nvram_data.ipHashTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->ipHashTableCount,
+                          qdev->nvram_data.ipHashTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->tcpHashTableBase,
+                          (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+                          qdev->nvram_data.tcpHashTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->tcpHashTableCount,
+                          qdev->nvram_data.tcpHashTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->ncbBase,
+                          (qdev->nvram_data.ncbTableBaseHi << 16) |
+                          qdev->nvram_data.ncbTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxNcbCount,
+                          qdev->nvram_data.ncbTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->drbBase,
+                          (qdev->nvram_data.drbTableBaseHi << 16) |
+                          qdev->nvram_data.drbTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxDrbCount,
+                          qdev->nvram_data.drbTableSize);
+       ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+       return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+       u32 value;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+               (void __iomem *)port_regs;
+       u32 delay = 10;
+       int status = 0;
+
+       if (ql_mii_setup(qdev))
+               return -1;
+
+       /* Bring out PHY out of reset */
+       ql_write_common_reg(qdev, spir,
+                           (ISP_SERIAL_PORT_IF_WE |
+                            (ISP_SERIAL_PORT_IF_WE << 16)));
+       /* Give the PHY time to come out of reset. */
+       mdelay(100);
+       qdev->port_link_state = LS_DOWN;
+       netif_carrier_off(qdev->ndev);
+
+       /* V2 chip fix for ARS-39168. */
+       ql_write_common_reg(qdev, spir,
+                           (ISP_SERIAL_PORT_IF_SDE |
+                            (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+       /* Request Queue Registers */
+       *((u32 *)(qdev->preq_consumer_index)) = 0;
+       atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
+       qdev->req_producer_index = 0;
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqConsumerIndexAddrHigh,
+                          qdev->req_consumer_index_phy_addr_high);
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqConsumerIndexAddrLow,
+                          qdev->req_consumer_index_phy_addr_low);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqBaseAddrHigh,
+                          MS_64BITS(qdev->req_q_phy_addr));
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqBaseAddrLow,
+                          LS_64BITS(qdev->req_q_phy_addr));
+       ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+       /* Response Queue Registers */
+       *((__le16 *) (qdev->prsp_producer_index)) = 0;
+       qdev->rsp_consumer_index = 0;
+       qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspProducerIndexAddrHigh,
+                          qdev->rsp_producer_index_phy_addr_high);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspProducerIndexAddrLow,
+                          qdev->rsp_producer_index_phy_addr_low);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspBaseAddrHigh,
+                          MS_64BITS(qdev->rsp_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspBaseAddrLow,
+                          LS_64BITS(qdev->rsp_q_phy_addr));
+
+       ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+       /* Large Buffer Queue */
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQBaseAddrHigh,
+                          MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQBaseAddrLow,
+                          LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQLength,
+                          qdev->num_lbufq_entries);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeBufferLength,
+                          qdev->lrg_buffer_len);
+
+       /* Small Buffer Queue */
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallQBaseAddrHigh,
+                          MS_64BITS(qdev->small_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallQBaseAddrLow,
+                          LS_64BITS(qdev->small_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallBufferLength,
+                          QL_SMALL_BUFFER_SIZE);
+
+       qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+       qdev->small_buf_release_cnt = 8;
+       qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
+       qdev->lrg_buf_release_cnt = 8;
+       qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
+       qdev->small_buf_index = 0;
+       qdev->lrg_buf_index = 0;
+       qdev->lrg_buf_free_count = 0;
+       qdev->lrg_buf_free_head = NULL;
+       qdev->lrg_buf_free_tail = NULL;
+
+       ql_write_common_reg(qdev,
+                           &port_regs->CommonRegs.
+                           rxSmallQProducerIndex,
+                           qdev->small_buf_q_producer_index);
+       ql_write_common_reg(qdev,
+                           &port_regs->CommonRegs.
+                           rxLargeQProducerIndex,
+                           qdev->lrg_buf_q_producer_index);
+
+       /*
+        * Find out if the chip has already been initialized.  If it has, then
+        * we skip some of the initialization.
+        */
+       clear_bit(QL_LINK_MASTER, &qdev->flags);
+       value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if ((value & PORT_STATUS_IC) == 0) {
+
+               /* Chip has not been configured yet, so let it rip. */
+               if (ql_init_misc_registers(qdev)) {
+                       status = -1;
+                       goto out;
+               }
+
+               value = qdev->nvram_data.tcpMaxWindowSize;
+               ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+               value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+               if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+                               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+                                * 2) << 13)) {
+                       status = -1;
+                       goto out;
+               }
+               ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+               ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+                                  (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+                                    16) | (INTERNAL_CHIP_SD |
+                                           INTERNAL_CHIP_WE)));
+               ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+       }
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev,
+                                  &port_regs->mac1MaxFrameLengthReg,
+                                  qdev->max_frame_size);
+       else
+               ql_write_page0_reg(qdev,
+                                          &port_regs->mac0MaxFrameLengthReg,
+                                          qdev->max_frame_size);
+
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7)) {
+               status = -1;
+               goto out;
+       }
+
+       PHY_Setup(qdev);
+       ql_init_scan_mode(qdev);
+       ql_get_phy_owner(qdev);
+
+       /* Load the MAC Configuration */
+
+       /* Program lower 32 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((qdev->ndev->dev_addr[2] << 24)
+                           | (qdev->ndev->dev_addr[3] << 16)
+                           | (qdev->ndev->dev_addr[4] << 8)
+                           | qdev->ndev->dev_addr[5]));
+
+       /* Program top 16 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((qdev->ndev->dev_addr[0] << 8)
+                           | qdev->ndev->dev_addr[1]));
+
+       /* Enable Primary MAC */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+                           MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+       /* Clear Primary and Secondary IP addresses */
+       ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+                          ((IP_ADDR_INDEX_REG_MASK << 16) |
+                           (qdev->mac_index << 2)));
+       ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+       ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+                          ((IP_ADDR_INDEX_REG_MASK << 16) |
+                           ((qdev->mac_index << 2) + 1)));
+       ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+       /* Indicate Configuration Complete */
+       ql_write_page0_reg(qdev,
+                          &port_regs->portControl,
+                          ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+       do {
+               value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+               if (value & PORT_STATUS_IC)
+                       break;
+               spin_unlock_irq(&qdev->hw_lock);
+               msleep(500);
+               spin_lock_irq(&qdev->hw_lock);
+       } while (--delay);
+
+       if (delay == 0) {
+               netdev_err(qdev->ndev, "Hw Initialization timeout\n");
+               status = -1;
+               goto out;
+       }
+
+       /* Enable Ethernet Function */
+       if (qdev->device_id == QL3032_DEVICE_ID) {
+               value =
+                   (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+                    QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
+                       QL3032_PORT_CONTROL_ET);
+               ql_write_page0_reg(qdev, &port_regs->functionControl,
+                                  ((value << 16) | value));
+       } else {
+               value =
+                   (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+                    PORT_CONTROL_HH);
+               ql_write_page0_reg(qdev, &port_regs->portControl,
+                                  ((value << 16) | value));
+       }
+
+
+out:
+       return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       int status = 0;
+       u16 value;
+       int max_wait_time;
+
+       set_bit(QL_RESET_ACTIVE, &qdev->flags);
+       clear_bit(QL_RESET_DONE, &qdev->flags);
+
+       /*
+        * Issue soft reset to chip.
+        */
+       netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
+       ql_write_common_reg(qdev,
+                           &port_regs->CommonRegs.ispControlStatus,
+                           ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+       /* Wait 3 seconds for reset to complete. */
+       netdev_printk(KERN_DEBUG, qdev->ndev,
+                     "Wait 10 milliseconds for reset to complete\n");
+
+       /* Wait until the firmware tells us the Soft Reset is done */
+       max_wait_time = 5;
+       do {
+               value =
+                   ql_read_common_reg(qdev,
+                                      &port_regs->CommonRegs.ispControlStatus);
+               if ((value & ISP_CONTROL_SR) == 0)
+                       break;
+
+               ssleep(1);
+       } while ((--max_wait_time));
+
+       /*
+        * Also, make sure that the Network Reset Interrupt bit has been
+        * cleared after the soft reset has taken place.
+        */
+       value =
+           ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+       if (value & ISP_CONTROL_RI) {
+               netdev_printk(KERN_DEBUG, qdev->ndev,
+                             "clearing RI after reset\n");
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+       }
+
+       if (max_wait_time == 0) {
+               /* Issue Force Soft Reset */
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_FSR << 16) |
+                                    ISP_CONTROL_FSR));
+               /*
+                * Wait until the firmware tells us the Force Soft Reset is
+                * done
+                */
+               max_wait_time = 5;
+               do {
+                       value = ql_read_common_reg(qdev,
+                                                  &port_regs->CommonRegs.
+                                                  ispControlStatus);
+                       if ((value & ISP_CONTROL_FSR) == 0)
+                               break;
+                       ssleep(1);
+               } while ((--max_wait_time));
+       }
+       if (max_wait_time == 0)
+               status = 1;
+
+       clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+       set_bit(QL_RESET_DONE, &qdev->flags);
+       return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 value, port_status;
+       u8 func_number;
+
+       /* Get the function number */
+       value =
+           ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+       func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+       port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       switch (value & ISP_CONTROL_FN_MASK) {
+       case ISP_CONTROL_FN0_NET:
+               qdev->mac_index = 0;
+               qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+               qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+               qdev->PHYAddr = PORT0_PHY_ADDRESS;
+               if (port_status & PORT_STATUS_SM0)
+                       set_bit(QL_LINK_OPTICAL, &qdev->flags);
+               else
+                       clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+               break;
+
+       case ISP_CONTROL_FN1_NET:
+               qdev->mac_index = 1;
+               qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+               qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+               qdev->PHYAddr = PORT1_PHY_ADDRESS;
+               if (port_status & PORT_STATUS_SM1)
+                       set_bit(QL_LINK_OPTICAL, &qdev->flags);
+               else
+                       clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+               break;
+
+       case ISP_CONTROL_FN0_SCSI:
+       case ISP_CONTROL_FN1_SCSI:
+       default:
+               netdev_printk(KERN_DEBUG, qdev->ndev,
+                             "Invalid function number, ispControlStatus = 0x%x\n",
+                             value);
+               break;
+       }
+       qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct pci_dev *pdev = qdev->pdev;
+
+       netdev_info(ndev,
+                   "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+                   DRV_NAME, qdev->index, qdev->chip_rev_id,
+                   qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+                   qdev->pci_slot);
+       netdev_info(ndev, "%s Interface\n",
+               test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
+
+       /*
+        * Print PCI bus width/type.
+        */
+       netdev_info(ndev, "Bus interface is %s %s\n",
+                   ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+                   ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+       netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
+                   qdev->mem_map_registers);
+       netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
+
+       netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+       struct net_device *ndev = qdev->ndev;
+       int retval = 0;
+
+       netif_stop_queue(ndev);
+       netif_carrier_off(ndev);
+
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
+       clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+       ql_disable_interrupts(qdev);
+
+       free_irq(qdev->pdev->irq, ndev);
+
+       if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+               netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+               clear_bit(QL_MSI_ENABLED, &qdev->flags);
+               pci_disable_msi(qdev->pdev);
+       }
+
+       del_timer_sync(&qdev->adapter_timer);
+
+       napi_disable(&qdev->napi);
+
+       if (do_reset) {
+               int soft_reset;
+               unsigned long hw_flags;
+
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               if (ql_wait_for_drvr_lock(qdev)) {
+                       soft_reset = ql_adapter_reset(qdev);
+                       if (soft_reset) {
+                               netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+                                          qdev->index);
+                       }
+                       netdev_err(ndev,
+                                  "Releasing driver lock via chip reset\n");
+               } else {
+                       netdev_err(ndev,
+                                  "Could not acquire driver lock to do reset!\n");
+                       retval = -1;
+               }
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       }
+       ql_free_mem_resources(qdev);
+       return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+       struct net_device *ndev = qdev->ndev;
+       int err;
+       unsigned long irq_flags = IRQF_SHARED;
+       unsigned long hw_flags;
+
+       if (ql_alloc_mem_resources(qdev)) {
+               netdev_err(ndev, "Unable to  allocate buffers\n");
+               return -ENOMEM;
+       }
+
+       if (qdev->msi) {
+               if (pci_enable_msi(qdev->pdev)) {
+                       netdev_err(ndev,
+                                  "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
+                       qdev->msi = 0;
+               } else {
+                       netdev_info(ndev, "MSI Enabled...\n");
+                       set_bit(QL_MSI_ENABLED, &qdev->flags);
+                       irq_flags &= ~IRQF_SHARED;
+               }
+       }
+
+       err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+                         irq_flags, ndev->name, ndev);
+       if (err) {
+               netdev_err(ndev,
+                          "Failed to reserve interrupt %d - already in use\n",
+                          qdev->pdev->irq);
+               goto err_irq;
+       }
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       err = ql_wait_for_drvr_lock(qdev);
+       if (err) {
+               err = ql_adapter_initialize(qdev);
+               if (err) {
+                       netdev_err(ndev, "Unable to initialize adapter\n");
+                       goto err_init;
+               }
+               netdev_err(ndev, "Releasing driver lock\n");
+               ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+       } else {
+               netdev_err(ndev, "Could not acquire driver lock\n");
+               goto err_lock;
+       }
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+
+       mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+       napi_enable(&qdev->napi);
+       ql_enable_interrupts(qdev);
+       return 0;
+
+err_init:
+       ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       free_irq(qdev->pdev->irq, ndev);
+err_irq:
+       if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+               netdev_info(ndev, "calling pci_disable_msi()\n");
+               clear_bit(QL_MSI_ENABLED, &qdev->flags);
+               pci_disable_msi(qdev->pdev);
+       }
+       return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+       if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+               netdev_err(qdev->ndev,
+                          "Driver up/down cycle failed, closing device\n");
+               rtnl_lock();
+               dev_close(qdev->ndev);
+               rtnl_unlock();
+               return -1;
+       }
+       return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       /*
+        * Wait for device to recover from a reset.
+        * (Rarely happens, but possible.)
+        */
+       while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+               msleep(50);
+
+       ql_adapter_down(qdev, QL_DO_RESET);
+       return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       return ql_adapter_up(qdev);
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       struct sockaddr *addr = p;
+       unsigned long hw_flags;
+
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       /* Program lower 32 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((ndev->dev_addr[2] << 24) | (ndev->
+                                                        dev_addr[3] << 16) |
+                           (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+       /* Program top 16 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       netdev_err(ndev, "Resetting...\n");
+       /*
+        * Stop the queues, we've got a problem.
+        */
+       netif_stop_queue(ndev);
+
+       /*
+        * Wake up the worker to process this event.
+        */
+       queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
+}
+
+static void ql_reset_work(struct work_struct *work)
+{
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, reset_work.work);
+       struct net_device *ndev = qdev->ndev;
+       u32 value;
+       struct ql_tx_buf_cb *tx_cb;
+       int max_wait_time, i;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       unsigned long hw_flags;
+
+       if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+               clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+               /*
+                * Loop through the active list and return the skb.
+                */
+               for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+                       int j;
+                       tx_cb = &qdev->tx_buf[i];
+                       if (tx_cb->skb) {
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "Freeing lost SKB\n");
+                               pci_unmap_single(qdev->pdev,
+                                        dma_unmap_addr(&tx_cb->map[0],
+                                                       mapaddr),
+                                        dma_unmap_len(&tx_cb->map[0], maplen),
+                                        PCI_DMA_TODEVICE);
+                               for (j = 1; j < tx_cb->seg_count; j++) {
+                                       pci_unmap_page(qdev->pdev,
+                                              dma_unmap_addr(&tx_cb->map[j],
+                                                             mapaddr),
+                                              dma_unmap_len(&tx_cb->map[j],
+                                                            maplen),
+                                              PCI_DMA_TODEVICE);
+                               }
+                               dev_kfree_skb(tx_cb->skb);
+                               tx_cb->skb = NULL;
+                       }
+               }
+
+               netdev_err(ndev, "Clearing NRI after reset\n");
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+               /*
+                * Wait the for Soft Reset to Complete.
+                */
+               max_wait_time = 10;
+               do {
+                       value = ql_read_common_reg(qdev,
+                                                  &port_regs->CommonRegs.
+
+                                                  ispControlStatus);
+                       if ((value & ISP_CONTROL_SR) == 0) {
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "reset completed\n");
+                               break;
+                       }
+
+                       if (value & ISP_CONTROL_RI) {
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "clearing NRI after reset\n");
+                               ql_write_common_reg(qdev,
+                                                   &port_regs->
+                                                   CommonRegs.
+                                                   ispControlStatus,
+                                                   ((ISP_CONTROL_RI <<
+                                                     16) | ISP_CONTROL_RI));
+                       }
+
+                       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+                       ssleep(1);
+                       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               } while (--max_wait_time);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+               if (value & ISP_CONTROL_SR) {
+
+                       /*
+                        * Set the reset flags and clear the board again.
+                        * Nothing else to do...
+                        */
+                       netdev_err(ndev,
+                                  "Timed out waiting for reset to complete\n");
+                       netdev_err(ndev, "Do a reset\n");
+                       clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+                       clear_bit(QL_RESET_START, &qdev->flags);
+                       ql_cycle_adapter(qdev, QL_DO_RESET);
+                       return;
+               }
+
+               clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+               clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+               clear_bit(QL_RESET_START, &qdev->flags);
+               ql_cycle_adapter(qdev, QL_NO_RESET);
+       }
+}
+
+static void ql_tx_timeout_work(struct work_struct *work)
+{
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+       ql_cycle_adapter(qdev, QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 value;
+
+       value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+       qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+       if (value & PORT_STATUS_64)
+               qdev->pci_width = 64;
+       else
+               qdev->pci_width = 32;
+       if (value & PORT_STATUS_X)
+               qdev->pci_x = 1;
+       else
+               qdev->pci_x = 0;
+       qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(unsigned long ptr)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+       queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
+}
+
+static const struct net_device_ops ql3xxx_netdev_ops = {
+       .ndo_open               = ql3xxx_open,
+       .ndo_start_xmit         = ql3xxx_send,
+       .ndo_stop               = ql3xxx_close,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = ql3xxx_set_mac_address,
+       .ndo_tx_timeout         = ql3xxx_tx_timeout,
+};
+
+static int ql3xxx_probe(struct pci_dev *pdev,
+                       const struct pci_device_id *pci_entry)
+{
+       struct net_device *ndev = NULL;
+       struct ql3_adapter *qdev = NULL;
+       static int cards_found;
+       int uninitialized_var(pci_using_dac), err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               pr_err("%s cannot enable PCI device\n", pci_name(pdev));
+               goto err_out;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
+               goto err_out_disable_pdev;
+       }
+
+       pci_set_master(pdev);
+
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               pci_using_dac = 1;
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+               pci_using_dac = 0;
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       }
+
+       if (err) {
+               pr_err("%s no usable DMA configuration\n", pci_name(pdev));
+               goto err_out_free_regions;
+       }
+
+       ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+       if (!ndev) {
+               err = -ENOMEM;
+               goto err_out_free_regions;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       pci_set_drvdata(pdev, ndev);
+
+       qdev = netdev_priv(ndev);
+       qdev->index = cards_found;
+       qdev->ndev = ndev;
+       qdev->pdev = pdev;
+       qdev->device_id = pci_entry->device;
+       qdev->port_link_state = LS_DOWN;
+       if (msi)
+               qdev->msi = 1;
+
+       qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+       if (pci_using_dac)
+               ndev->features |= NETIF_F_HIGHDMA;
+       if (qdev->device_id == QL3032_DEVICE_ID)
+               ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+
+       qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
+       if (!qdev->mem_map_registers) {
+               pr_err("%s: cannot map device registers\n", pci_name(pdev));
+               err = -EIO;
+               goto err_out_free_ndev;
+       }
+
+       spin_lock_init(&qdev->adapter_lock);
+       spin_lock_init(&qdev->hw_lock);
+
+       /* Set driver entry points */
+       ndev->netdev_ops = &ql3xxx_netdev_ops;
+       ndev->ethtool_ops = &ql3xxx_ethtool_ops;
+       ndev->watchdog_timeo = 5 * HZ;
+
+       netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+
+       ndev->irq = pdev->irq;
+
+       /* make sure the EEPROM is good */
+       if (ql_get_nvram_params(qdev)) {
+               pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+                        __func__, qdev->index);
+               err = -EIO;
+               goto err_out_iounmap;
+       }
+
+       ql_set_mac_info(qdev);
+
+       /* Validate and set parameters */
+       if (qdev->mac_index) {
+               ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
+               ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
+       } else {
+               ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
+               ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
+       }
+
+       ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+       /* Record PCI bus information. */
+       ql_get_board_info(qdev);
+
+       /*
+        * Set the Maximum Memory Read Byte Count value. We do this to handle
+        * jumbo frames.
+        */
+       if (qdev->pci_x)
+               pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+
+       err = register_netdev(ndev);
+       if (err) {
+               pr_err("%s: cannot register net device\n", pci_name(pdev));
+               goto err_out_iounmap;
+       }
+
+       /* we're going to reset, so assume we have no link for now */
+
+       netif_carrier_off(ndev);
+       netif_stop_queue(ndev);
+
+       qdev->workqueue = create_singlethread_workqueue(ndev->name);
+       INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+       INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
+       INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
+
+       init_timer(&qdev->adapter_timer);
+       qdev->adapter_timer.function = ql3xxx_timer;
+       qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+       qdev->adapter_timer.data = (unsigned long)qdev;
+
+       if (!cards_found) {
+               pr_alert("%s\n", DRV_STRING);
+               pr_alert("Driver name: %s, Version: %s\n",
+                        DRV_NAME, DRV_VERSION);
+       }
+       ql_display_dev_info(ndev);
+
+       cards_found++;
+       return 0;
+
+err_out_iounmap:
+       iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+       free_netdev(ndev);
+err_out_free_regions:
+       pci_release_regions(pdev);
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+err_out:
+       return err;
+}
+
+static void ql3xxx_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       unregister_netdev(ndev);
+
+       ql_disable_interrupts(qdev);
+
+       if (qdev->workqueue) {
+               cancel_delayed_work(&qdev->reset_work);
+               cancel_delayed_work(&qdev->tx_timeout_work);
+               destroy_workqueue(qdev->workqueue);
+               qdev->workqueue = NULL;
+       }
+
+       iounmap(qdev->mem_map_registers);
+       pci_release_regions(pdev);
+       free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+       .name = DRV_NAME,
+       .id_table = ql3xxx_pci_tbl,
+       .probe = ql3xxx_probe,
+       .remove = ql3xxx_remove,
+};
+
+module_pci_driver(ql3xxx_driver);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h
new file mode 100644 (file)
index 0000000..73e2343
--- /dev/null
@@ -0,0 +1,1189 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0          0x01
+#define OPCODE_OB_MAC_IOCB_FN2          0x21
+
+#define OPCODE_IB_MAC_IOCB          0xF9
+#define OPCODE_IB_3032_MAC_IOCB     0x09
+#define OPCODE_IB_IP_IOCB           0xFA
+#define OPCODE_IB_3032_IP_IOCB      0x0A
+
+#define OPCODE_FUNC_ID_MASK                 0x30
+#define OUTBOUND_MAC_IOCB                   0x01       /* plus function bits */
+
+#define FN0_MA_BITS_MASK    0x00
+#define FN1_MA_BITS_MASK    0x80
+
+struct ob_mac_iocb_req {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_REQ_MA  0xe0
+#define OB_MAC_IOCB_REQ_F   0x10
+#define OB_MAC_IOCB_REQ_X   0x08
+#define OB_MAC_IOCB_REQ_D   0x02
+#define OB_MAC_IOCB_REQ_I   0x01
+       u8 flags1;
+#define OB_3032MAC_IOCB_REQ_IC 0x04
+#define OB_3032MAC_IOCB_REQ_TC 0x02
+#define OB_3032MAC_IOCB_REQ_UC 0x01
+       u8 reserved0;
+
+       u32 transaction_id;     /* opaque for hardware */
+       __le16 data_len;
+       u8 ip_hdr_off;
+       u8 ip_hdr_len;
+       __le32 reserved1;
+       __le32 reserved2;
+       __le32 buf_addr0_low;
+       __le32 buf_addr0_high;
+       __le32 buf_0_len;
+       __le32 buf_addr1_low;
+       __le32 buf_addr1_high;
+       __le32 buf_1_len;
+       __le32 buf_addr2_low;
+       __le32 buf_addr2_high;
+       __le32 buf_2_len;
+       __le32 reserved3;
+       __le32 reserved4;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E   0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C   0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L   0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R   0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_RSP_P   0x08
+#define OB_MAC_IOCB_RSP_L   0x04
+#define OB_MAC_IOCB_RSP_S   0x02
+#define OB_MAC_IOCB_RSP_I   0x01
+
+       __le16 reserved0;
+       u32 transaction_id;     /* opaque for hardware */
+       __le32 reserved1;
+       __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+       u8 opcode;
+#define IB_MAC_IOCB_RSP_V   0x80
+       u8 flags;
+#define IB_MAC_IOCB_RSP_S   0x80
+#define IB_MAC_IOCB_RSP_H1  0x40
+#define IB_MAC_IOCB_RSP_H0  0x20
+#define IB_MAC_IOCB_RSP_B   0x10
+#define IB_MAC_IOCB_RSP_M   0x08
+#define IB_MAC_IOCB_RSP_MA  0x07
+
+       __le16 length;
+       __le32 reserved;
+       __le32 ial_low;
+       __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+       u8 opcode;
+       __le16 flags;
+#define OB_IP_IOCB_REQ_O        0x100
+#define OB_IP_IOCB_REQ_H        0x008
+#define OB_IP_IOCB_REQ_U        0x004
+#define OB_IP_IOCB_REQ_D        0x002
+#define OB_IP_IOCB_REQ_I        0x001
+
+       u8 reserved0;
+
+       __le32 transaction_id;
+       __le16 data_len;
+       __le16 reserved1;
+       __le32 hncb_ptr_low;
+       __le32 hncb_ptr_high;
+       __le32 buf_addr0_low;
+       __le32 buf_addr0_high;
+       __le32 buf_0_len;
+       __le32 buf_addr1_low;
+       __le32 buf_addr1_high;
+       __le32 buf_1_len;
+       __le32 buf_addr2_low;
+       __le32 buf_addr2_high;
+       __le32 buf_2_len;
+       __le32 reserved2;
+       __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E    0x80000000
+#define OB_IP_IOCB_REQ_C    0x40000000
+#define OB_IP_IOCB_REQ_L    0x20000000
+#define OB_IP_IOCB_REQ_R    0x10000000
+
+struct ob_ip_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_RSP_H       0x10
+#define OB_MAC_IOCB_RSP_E       0x08
+#define OB_MAC_IOCB_RSP_L       0x04
+#define OB_MAC_IOCB_RSP_S       0x02
+#define OB_MAC_IOCB_RSP_I       0x01
+
+       __le16 reserved0;
+       __le32 transaction_id;
+       __le32 reserved1;
+       __le32 reserved2;
+};
+
+struct ib_ip_iocb_rsp {
+       u8 opcode;
+#define IB_IP_IOCB_RSP_3032_V   0x80
+#define IB_IP_IOCB_RSP_3032_O   0x40
+#define IB_IP_IOCB_RSP_3032_I   0x20
+#define IB_IP_IOCB_RSP_3032_R   0x10
+       u8 flags;
+#define IB_IP_IOCB_RSP_S        0x80
+#define IB_IP_IOCB_RSP_H1       0x40
+#define IB_IP_IOCB_RSP_H0       0x20
+#define IB_IP_IOCB_RSP_B        0x10
+#define IB_IP_IOCB_RSP_M        0x08
+#define IB_IP_IOCB_RSP_MA       0x07
+
+       __le16 length;
+       __le16 checksum;
+#define IB_IP_IOCB_RSP_3032_ICE                0x01
+#define IB_IP_IOCB_RSP_3032_CE         0x02
+#define IB_IP_IOCB_RSP_3032_NUC                0x04
+#define IB_IP_IOCB_RSP_3032_UDP                0x08
+#define IB_IP_IOCB_RSP_3032_TCP                0x10
+#define IB_IP_IOCB_RSP_3032_IPE                0x20
+       __le16 reserved;
+#define IB_IP_IOCB_RSP_R        0x01
+       __le32 ial_low;
+       __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+       u8 opcode;
+       u8 flags;
+       __le16 reserved0;
+       __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS   0x1e00
+#define PORT1_PHY_ADDRESS   0x1f00
+
+#define ETHERNET_CRC_SIZE   4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+#define PHY_ID_0_REG    2
+#define PHY_ID_1_REG    3
+
+#define PHY_OUI_1_MASK       0xfc00
+#define PHY_MODEL_MASK       0x03f0
+
+/*  Address for the Agere Phy */
+#define MII_AGERE_ADDR_1  0x00001000
+#define MII_AGERE_ADDR_2  0x00001100
+
+/* 32-bit ispControlStatus */
+enum {
+       ISP_CONTROL_NP_MASK = 0x0003,
+       ISP_CONTROL_NP_PCSR = 0x0000,
+       ISP_CONTROL_NP_HMCR = 0x0001,
+       ISP_CONTROL_NP_LRAMCR = 0x0002,
+       ISP_CONTROL_NP_PSR = 0x0003,
+       ISP_CONTROL_RI = 0x0008,
+       ISP_CONTROL_CI = 0x0010,
+       ISP_CONTROL_PI = 0x0020,
+       ISP_CONTROL_IN = 0x0040,
+       ISP_CONTROL_BE = 0x0080,
+       ISP_CONTROL_FN_MASK = 0x0700,
+       ISP_CONTROL_FN0_NET = 0x0400,
+       ISP_CONTROL_FN0_SCSI = 0x0500,
+       ISP_CONTROL_FN1_NET = 0x0600,
+       ISP_CONTROL_FN1_SCSI = 0x0700,
+       ISP_CONTROL_LINK_DN_0 = 0x0800,
+       ISP_CONTROL_LINK_DN_1 = 0x1000,
+       ISP_CONTROL_FSR = 0x2000,
+       ISP_CONTROL_FE = 0x4000,
+       ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+       ISP_IMR_ENABLE_INT = 0x0004,
+       ISP_IMR_DISABLE_RESET_INT = 0x0008,
+       ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+       ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+       ISP_SERIAL_PORT_IF_CLK = 0x0001,
+       ISP_SERIAL_PORT_IF_CS = 0x0002,
+       ISP_SERIAL_PORT_IF_D0 = 0x0004,
+       ISP_SERIAL_PORT_IF_DI = 0x0008,
+       ISP_NVRAM_MASK = (0x000F << 16),
+       ISP_SERIAL_PORT_IF_WE = 0x0010,
+       ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+       ISP_SERIAL_PORT_IF_SCI = 0x0400,
+       ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+       ISP_SERIAL_PORT_IF_SCE = 0x1000,
+       ISP_SERIAL_PORT_IF_SDI = 0x2000,
+       ISP_SERIAL_PORT_IF_SDO = 0x4000,
+       ISP_SERIAL_PORT_IF_SDE = 0x8000,
+       ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+       QL_RESOURCE_MASK_BASE_CODE = 0x7,
+       QL_RESOURCE_BITS_BASE_CODE = 0x4,
+       QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+       QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+       QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+       QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+       QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+       QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+       QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+       QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+       QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+       QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+  * QL3XXX memory-mapped registers
+  * QL3XXX has 4 "pages" of registers, each page occupying
+  * 256 bytes.  Each page has a "common" area at the start and then
+  * page-specific registers after that.
+  */
+struct ql3xxx_common_registers {
+       u32 MB0;                /* Offset 0x00 */
+       u32 MB1;                /* Offset 0x04 */
+       u32 MB2;                /* Offset 0x08 */
+       u32 MB3;                /* Offset 0x0c */
+       u32 MB4;                /* Offset 0x10 */
+       u32 MB5;                /* Offset 0x14 */
+       u32 MB6;                /* Offset 0x18 */
+       u32 MB7;                /* Offset 0x1c */
+       u32 flashBiosAddr;
+       u32 flashBiosData;
+       u32 ispControlStatus;
+       u32 ispInterruptMaskReg;
+       u32 serialPortInterfaceReg;
+       u32 semaphoreReg;
+       u32 reqQProducerIndex;
+       u32 rspQConsumerIndex;
+
+       u32 rxLargeQProducerIndex;
+       u32 rxSmallQProducerIndex;
+       u32 arcMadiCommand;
+       u32 arcMadiData;
+};
+
+enum {
+       EXT_HW_CONFIG_SP_MASK = 0x0006,
+       EXT_HW_CONFIG_SP_NONE = 0x0000,
+       EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+       EXT_HW_CONFIG_SP_ECC = 0x0004,
+       EXT_HW_CONFIG_SP_ECCx = 0x0006,
+       EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+       EXT_HW_CONFIG_SIZE_128M = 0x0000,
+       EXT_HW_CONFIG_SIZE_256M = 0x0020,
+       EXT_HW_CONFIG_SIZE_512M = 0x0040,
+       EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+       EXT_HW_CONFIG_PD = 0x0080,
+       EXT_HW_CONFIG_FW = 0x0200,
+       EXT_HW_CONFIG_US = 0x0400,
+       EXT_HW_CONFIG_DCS_MASK = 0x1800,
+       EXT_HW_CONFIG_DCS_9MA = 0x0000,
+       EXT_HW_CONFIG_DCS_15MA = 0x0800,
+       EXT_HW_CONFIG_DCS_18MA = 0x1000,
+       EXT_HW_CONFIG_DCS_24MA = 0x1800,
+       EXT_HW_CONFIG_DDS_MASK = 0x6000,
+       EXT_HW_CONFIG_DDS_9MA = 0x0000,
+       EXT_HW_CONFIG_DDS_15MA = 0x2000,
+       EXT_HW_CONFIG_DDS_18MA = 0x4000,
+       EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+       INTERNAL_CHIP_DM = 0x0001,
+       INTERNAL_CHIP_SD = 0x0002,
+       INTERNAL_CHIP_RAP_MASK = 0x000C,
+       INTERNAL_CHIP_RAP_RR = 0x0000,
+       INTERNAL_CHIP_RAP_NRM = 0x0004,
+       INTERNAL_CHIP_RAP_ERM = 0x0008,
+       INTERNAL_CHIP_RAP_ERMx = 0x000C,
+       INTERNAL_CHIP_WE = 0x0010,
+       INTERNAL_CHIP_EF = 0x0020,
+       INTERNAL_CHIP_FR = 0x0040,
+       INTERNAL_CHIP_FW = 0x0080,
+       INTERNAL_CHIP_FI = 0x0100,
+       INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+       PORT_CONTROL_DS = 0x0001,
+       PORT_CONTROL_HH = 0x0002,
+       PORT_CONTROL_EI = 0x0004,
+       PORT_CONTROL_ET = 0x0008,
+       PORT_CONTROL_EF = 0x0010,
+       PORT_CONTROL_DRM = 0x0020,
+       PORT_CONTROL_RLB = 0x0040,
+       PORT_CONTROL_RCB = 0x0080,
+       PORT_CONTROL_MAC = 0x0100,
+       PORT_CONTROL_IPV = 0x0200,
+       PORT_CONTROL_IFP = 0x0400,
+       PORT_CONTROL_ITP = 0x0800,
+       PORT_CONTROL_FI = 0x1000,
+       PORT_CONTROL_DFP = 0x2000,
+       PORT_CONTROL_OI = 0x4000,
+       PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+       PORT_STATUS_SM0 = 0x0001,
+       PORT_STATUS_SM1 = 0x0002,
+       PORT_STATUS_X = 0x0008,
+       PORT_STATUS_DL = 0x0080,
+       PORT_STATUS_IC = 0x0200,
+       PORT_STATUS_MRC = 0x0400,
+       PORT_STATUS_NL = 0x0800,
+       PORT_STATUS_REV_ID_MASK = 0x7000,
+       PORT_STATUS_REV_ID_1 = 0x1000,
+       PORT_STATUS_REV_ID_2 = 0x2000,
+       PORT_STATUS_REV_ID_3 = 0x3000,
+       PORT_STATUS_64 = 0x8000,
+       PORT_STATUS_UP0 = 0x10000,
+       PORT_STATUS_AC0 = 0x20000,
+       PORT_STATUS_AE0 = 0x40000,
+       PORT_STATUS_UP1 = 0x100000,
+       PORT_STATUS_AC1 = 0x200000,
+       PORT_STATUS_AE1 = 0x400000,
+       PORT_STATUS_F0_ENABLED = 0x1000000,
+       PORT_STATUS_F1_ENABLED = 0x2000000,
+       PORT_STATUS_F2_ENABLED = 0x4000000,
+       PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+       MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+       MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+       MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+       MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+       MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+       MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+       MAC_MII_CONTROL_RC = 0x0001,
+       MAC_MII_CONTROL_SC = 0x0002,
+       MAC_MII_CONTROL_AS = 0x0004,
+       MAC_MII_CONTROL_NP = 0x0008,
+       MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+       MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+       MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+       MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+       MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+       MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+       MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+       MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+       MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+       MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+       MAC_MII_STATUS_BSY = 0x0001,
+       MAC_MII_STATUS_SC = 0x0002,
+       MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+       MAC_CONFIG_REG_PE = 0x0001,
+       MAC_CONFIG_REG_TF = 0x0002,
+       MAC_CONFIG_REG_RF = 0x0004,
+       MAC_CONFIG_REG_FD = 0x0008,
+       MAC_CONFIG_REG_GM = 0x0010,
+       MAC_CONFIG_REG_LB = 0x0020,
+       MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+       MAC_HALF_DUPLEX_REG_ED = 0x10000,
+       MAC_HALF_DUPLEX_REG_NB = 0x20000,
+       MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+       MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+       IP_ADDR_INDEX_REG_MASK = 0x000f,
+       IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+       IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+       IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+       IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+       IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+       IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+       IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+       IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+       IP_ADDR_INDEX_REG_6 = 0x0008,
+       IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
+       IP_ADDR_INDEX_REG_E = 0x0040,
+};
+enum {
+       QL3032_PORT_CONTROL_DS = 0x0001,
+       QL3032_PORT_CONTROL_HH = 0x0002,
+       QL3032_PORT_CONTROL_EIv6 = 0x0004,
+       QL3032_PORT_CONTROL_EIv4 = 0x0008,
+       QL3032_PORT_CONTROL_ET = 0x0010,
+       QL3032_PORT_CONTROL_EF = 0x0020,
+       QL3032_PORT_CONTROL_DRM = 0x0040,
+       QL3032_PORT_CONTROL_RLB = 0x0080,
+       QL3032_PORT_CONTROL_RCB = 0x0100,
+       QL3032_PORT_CONTROL_KIE = 0x0200,
+};
+
+enum {
+       PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+       PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+       PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+       PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+       PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+       PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+       PROBE_MUX_ADDR_REG_UP = 0x4000,
+       PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+       STATISTICS_INDEX_REG_MASK = 0x01ff,
+       STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+       STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+       STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+       STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+       STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+       STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+       STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+       STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+       STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+       STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+       STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+       STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+       STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+       STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+       STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+       STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+       STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+       STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+       STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+       STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+       STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+       STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+       STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+       STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+       STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+       STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+       STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+       STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+       PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+       PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+       PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+       PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+       PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+       PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+       PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+       PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+       PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+       PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+       PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+       PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+       PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+       PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+       PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+       PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+       PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+       PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+       PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+       PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+       PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+       PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+       PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+       PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+       PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ *  port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+       struct ql3xxx_common_registers CommonRegs;
+
+       u32 ExternalHWConfig;
+       u32 InternalChipConfig;
+       u32 portControl;
+       u32 portStatus;
+       u32 macAddrIndirectPtrReg;
+       u32 macAddrDataReg;
+       u32 macMIIMgmtControlReg;
+       u32 macMIIMgmtAddrReg;
+       u32 macMIIMgmtDataReg;
+       u32 macMIIStatusReg;
+       u32 mac0ConfigReg;
+       u32 mac0IpgIfgReg;
+       u32 mac0HalfDuplexReg;
+       u32 mac0MaxFrameLengthReg;
+       u32 mac0PauseThresholdReg;
+       u32 mac1ConfigReg;
+       u32 mac1IpgIfgReg;
+       u32 mac1HalfDuplexReg;
+       u32 mac1MaxFrameLengthReg;
+       u32 mac1PauseThresholdReg;
+       u32 ipAddrIndexReg;
+       u32 ipAddrDataReg;
+       u32 ipReassemblyTimeout;
+       u32 tcpMaxWindow;
+       u32 currentTcpTimestamp[2];
+       u32 internalRamRWAddrReg;
+       u32 internalRamWDataReg;
+       u32 reclaimedBufferAddrRegLow;
+       u32 reclaimedBufferAddrRegHigh;
+       u32 tcpConfiguration;
+       u32 functionControl;
+       u32 fpgaRevID;
+       u32 localRamAddr;
+       u32 localRamDataAutoIncr;
+       u32 localRamDataNonIncr;
+       u32 gpOutput;
+       u32 gpInput;
+       u32 probeMuxAddr;
+       u32 probeMuxData;
+       u32 statisticsIndexReg;
+       u32 statisticsReadDataRegAutoIncr;
+       u32 statisticsReadDataRegNoIncr;
+       u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+       struct ql3xxx_common_registers CommonRegs;
+
+       u32 reserved[12];
+
+       /* Network Request Queue */
+       u32 reqConsumerIndex;
+       u32 reqConsumerIndexAddrLow;
+       u32 reqConsumerIndexAddrHigh;
+       u32 reqBaseAddrLow;
+       u32 reqBaseAddrHigh;
+       u32 reqLength;
+
+       /* Network Completion Queue */
+       u32 rspProducerIndex;
+       u32 rspProducerIndexAddrLow;
+       u32 rspProducerIndexAddrHigh;
+       u32 rspBaseAddrLow;
+       u32 rspBaseAddrHigh;
+       u32 rspLength;
+
+       /* RX Large Buffer Queue */
+       u32 rxLargeQConsumerIndex;
+       u32 rxLargeQBaseAddrLow;
+       u32 rxLargeQBaseAddrHigh;
+       u32 rxLargeQLength;
+       u32 rxLargeBufferLength;
+
+       /* RX Small Buffer Queue */
+       u32 rxSmallQConsumerIndex;
+       u32 rxSmallQBaseAddrLow;
+       u32 rxSmallQBaseAddrHigh;
+       u32 rxSmallQLength;
+       u32 rxSmallBufferLength;
+
+};
+
+/*
+ *  port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+       struct ql3xxx_common_registers CommonRegs;
+       u32 bufletSize;
+       u32 maxBufletCount;
+       u32 currentBufletCount;
+       u32 reserved;
+       u32 freeBufletThresholdLow;
+       u32 freeBufletThresholdHigh;
+       u32 ipHashTableBase;
+       u32 ipHashTableCount;
+       u32 tcpHashTableBase;
+       u32 tcpHashTableCount;
+       u32 ncbBase;
+       u32 maxNcbCount;
+       u32 currentNcbCount;
+       u32 drbBase;
+       u32 maxDrbCount;
+       u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x)    (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x)    (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+       CONTROL_REG = 0,
+       STATUS_REG = 1,
+       PHY_STAT_LINK_UP = 0x0004,
+       PHY_CTRL_LOOPBACK = 0x4000,
+
+       PETBI_CONTROL_REG = 0x00,
+       PETBI_CTRL_ALL_PARAMS = 0x7140,
+       PETBI_CTRL_SOFT_RESET = 0x8000,
+       PETBI_CTRL_AUTO_NEG = 0x1000,
+       PETBI_CTRL_RESTART_NEG = 0x0200,
+       PETBI_CTRL_FULL_DUPLEX = 0x0100,
+       PETBI_CTRL_SPEED_1000 = 0x0040,
+
+       PETBI_STATUS_REG = 0x01,
+       PETBI_STAT_NEG_DONE = 0x0020,
+       PETBI_STAT_LINK_UP = 0x0004,
+
+       PETBI_NEG_ADVER = 0x04,
+       PETBI_NEG_PAUSE = 0x0080,
+       PETBI_NEG_PAUSE_MASK = 0x0180,
+       PETBI_NEG_DUPLEX = 0x0020,
+       PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+       PETBI_NEG_PARTNER = 0x05,
+       PETBI_NEG_ERROR_MASK = 0x3000,
+
+       PETBI_EXPANSION_REG = 0x06,
+       PETBI_EXP_PAGE_RX = 0x0002,
+
+       PHY_GIG_CONTROL = 9,
+       PHY_GIG_ENABLE_MAN = 0x1000,  /* Enable Master/Slave Manual Config*/
+       PHY_GIG_SET_MASTER = 0x0800,  /* Set Master (slave if clear)*/
+       PHY_GIG_ALL_PARAMS = 0x0300,
+       PHY_GIG_ADV_1000F = 0x0200,
+       PHY_GIG_ADV_1000H = 0x0100,
+
+       PHY_NEG_ADVER = 4,
+       PHY_NEG_ALL_PARAMS = 0x0fe0,
+       PHY_NEG_ASY_PAUSE =  0x0800,
+       PHY_NEG_SYM_PAUSE =  0x0400,
+       PHY_NEG_ADV_SPEED =  0x01e0,
+       PHY_NEG_ADV_100F =   0x0100,
+       PHY_NEG_ADV_100H =   0x0080,
+       PHY_NEG_ADV_10F =    0x0040,
+       PHY_NEG_ADV_10H =    0x0020,
+
+       PETBI_TBI_CTRL = 0x11,
+       PETBI_TBI_RESET = 0x8000,
+       PETBI_TBI_AUTO_SENSE = 0x0100,
+       PETBI_TBI_SERDES_MODE = 0x0010,
+       PETBI_TBI_SERDES_WRAP = 0x0002,
+
+       AUX_CONTROL_STATUS = 0x1c,
+       PHY_AUX_NEG_DONE = 0x8000,
+       PHY_NEG_PARTNER = 5,
+       PHY_AUX_DUPLEX_STAT = 0x0020,
+       PHY_AUX_SPEED_STAT = 0x0018,
+       PHY_AUX_NO_HW_STRAP = 0x0004,
+       PHY_AUX_RESET_STICK = 0x0002,
+       PHY_NEG_PAUSE = 0x0400,
+       PHY_CTRL_SOFT_RESET = 0x8000,
+       PHY_CTRL_AUTO_NEG = 0x1000,
+       PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions    */
+       FM93C56A_START = 0x1,
+/* Commands */
+       FM93C56A_READ = 0x2,
+       FM93C56A_WEN = 0x0,
+       FM93C56A_WRITE = 0x1,
+       FM93C56A_WRITE_ALL = 0x0,
+       FM93C56A_WDS = 0x0,
+       FM93C56A_ERASE = 0x3,
+       FM93C56A_ERASE_ALL = 0x0,
+/* Command Extensions */
+       FM93C56A_WEN_EXT = 0x3,
+       FM93C56A_WRITE_ALL_EXT = 0x1,
+       FM93C56A_WDS_EXT = 0x0,
+       FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+       FM93C56A_READ_DUMMY_BITS = 1,
+       FM93C56A_READY = 0,
+       FM93C56A_BUSY = 1,
+       FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions    */
+       FM93C56A_SIZE_8 = 0x100,
+       FM93C56A_SIZE_16 = 0x80,
+       FM93C66A_SIZE_8 = 0x200,
+       FM93C66A_SIZE_16 = 0x100,
+       FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+       FM93C56A_NO_ADDR_BITS_16 = 8,
+       FM93C56A_NO_ADDR_BITS_8 = 9,
+       FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+       FM93C56A_DATA_BITS_16 = 16,
+       FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+           AUBURN_EEPROM_DI = 0x8,
+       AUBURN_EEPROM_DI_0 = 0x0,
+       AUBURN_EEPROM_DI_1 = 0x8,
+       AUBURN_EEPROM_DO = 0x4,
+       AUBURN_EEPROM_DO_0 = 0x0,
+       AUBURN_EEPROM_DO_1 = 0x4,
+       AUBURN_EEPROM_CS = 0x2,
+       AUBURN_EEPROM_CS_0 = 0x0,
+       AUBURN_EEPROM_CS_1 = 0x2,
+       AUBURN_EEPROM_CLK_RISE = 0x1,
+       AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+       EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+       EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ *  MAC Config data structure
+ */
+    struct eeprom_port_cfg {
+       u16 etherMtu_mac;
+       u16 pauseThreshold_mac;
+       u16 resumeThreshold_mac;
+       u16 portConfiguration;
+#define PORT_CONFIG_DEFAULT                 0xf700
+#define PORT_CONFIG_AUTO_NEG_ENABLED        0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED       0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED     0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED     0x1000
+#define PORT_CONFIG_1000MB_SPEED            0x0400
+#define PORT_CONFIG_100MB_SPEED             0x0200
+#define PORT_CONFIG_10MB_SPEED              0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK         0x0F00
+       u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+       u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+       u8 bootID0:7, boodID0Valid:1;
+       u8 bootLun0[8];
+
+       u8 bootID1:7, boodID1Valid:1;
+       u8 bootLun1[8];
+
+       u16 MaxLunsTrgt;
+       u8 reserved[10];
+};
+
+/*
+ *  Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+       u8 reserved[30];
+       u16 macAddress[3];
+       u16 macAddressSecondary[3];
+
+       u16 subsysVendorId;
+       u16 subsysDeviceId;
+};
+
+/*
+ *  EEPROM format
+ */
+struct eeprom_data {
+       u8 asicId[4];
+       u16 version_and_numPorts; /* together to avoid endianness crap */
+       u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE   16
+#define EEPROM_SERIAL_NUM_SIZE    16
+
+       u8 boardIdStr[16];
+       u8 serialNumber[16];
+       u16 extHwConfig;
+       struct eeprom_port_cfg macCfg_port0;
+       struct eeprom_port_cfg macCfg_port1;
+       u16 bufletSize;
+       u16 bufletCount;
+       u16 tcpWindowThreshold50;
+       u16 tcpWindowThreshold25;
+       u16 tcpWindowThreshold0;
+       u16 ipHashTableBaseHi;
+       u16 ipHashTableBaseLo;
+       u16 ipHashTableSize;
+       u16 tcpHashTableBaseHi;
+       u16 tcpHashTableBaseLo;
+       u16 tcpHashTableSize;
+       u16 ncbTableBaseHi;
+       u16 ncbTableBaseLo;
+       u16 ncbTableSize;
+       u16 drbTableBaseHi;
+       u16 drbTableBaseLo;
+       u16 drbTableSize;
+       u16 reserved_142[4];
+       u16 ipReassemblyTimeout;
+       u16 tcpMaxWindowSize;
+       u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+       u8 reserved_156[294];
+       u16 qDebug[8];
+       struct eeprom_function_cfg funcCfg_fn0;
+       u16 reserved_510;
+       u8 oemSpace[432];
+       struct eeprom_bios_cfg biosCfg_fn1;
+       struct eeprom_function_cfg funcCfg_fn1;
+       u16 reserved_1022;
+       u8 reserved_1024[464];
+       struct eeprom_function_cfg funcCfg_fn2;
+       u16 reserved_1534;
+       u8 reserved_1536[432];
+       struct eeprom_bios_cfg biosCfg_fn3;
+       struct eeprom_function_cfg funcCfg_fn3;
+       u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID    0x1077
+#define QL3022_DEVICE_ID    0x3022
+#define QL3032_DEVICE_ID    0x3032
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE                ETH_DATA_LEN
+#define JUMBO_MTU_SIZE                         9000
+#define VLAN_ID_LEN                        2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES   256        /* so that 64 * 64  = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES   256        /* so that 256 * 16  = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES      128
+#define JUMBO_NUM_LBUFQ_ENTRIES 32
+#define NUM_SBUFQ_ENTRIES      64
+#define QL_SMALL_BUFFER_SIZE    32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+    /* Each send has at least control block.  This is how many we keep. */
+#define NUM_SMALL_BUFFERS      NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+
+#define QL_HEADER_SPACE 32     /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+       __le32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK  0x00000003
+       __le32 addr0_upper;
+       __le32 addr1_lower;
+       __le32 addr1_upper;
+       __le32 addr2_lower;
+       __le32 addr2_upper;
+       __le32 addr3_lower;
+       __le32 addr3_upper;
+       __le32 addr4_lower;
+       __le32 addr4_upper;
+       __le32 addr5_lower;
+       __le32 addr5_upper;
+       __le32 addr6_lower;
+       __le32 addr6_upper;
+       __le32 addr7_lower;
+       __le32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+       __le32 addr_low;
+       __le32 addr_high;
+};
+
+#define QL_NO_RESET                    0
+#define QL_DO_RESET                    1
+
+enum link_state_t {
+       LS_UNKNOWN = 0,
+       LS_DOWN,
+       LS_DEGRADE,
+       LS_RECOVER,
+       LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+       struct ql_rcv_buf_cb *next;
+       struct sk_buff *skb;
+       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+       DEFINE_DMA_UNMAP_LEN(maplen);
+       __le32 buf_phy_addr_low;
+       __le32 buf_phy_addr_high;
+       int index;
+};
+
+/*
+ * Original IOCB has 3 sg entries:
+ * first points to skb-data area
+ * second points to first frag
+ * third points to next oal.
+ * OAL has 5 entries:
+ * 1 thru 4 point to frags
+ * fifth points to next oal.
+ */
+#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
+
+struct oal_entry {
+       __le32 dma_lo;
+       __le32 dma_hi;
+       __le32 len;
+#define OAL_LAST_ENTRY   0x80000000    /* Last valid buffer in list. */
+#define OAL_CONT_ENTRY   0x40000000    /* points to an OAL. (continuation) */
+};
+
+struct oal {
+       struct oal_entry oal_entry[5];
+};
+
+struct map_list {
+       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+       DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct ql_tx_buf_cb {
+       struct sk_buff *skb;
+       struct ob_mac_iocb_req *queue_entry ;
+       int seg_count;
+       struct oal *oal;
+       struct map_list map[MAX_SKB_FRAGS+1];
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB  0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1,      /* Reset finished. */
+       QL_RESET_ACTIVE = 2,    /* Waiting for reset to finish. */
+       QL_RESET_START = 3,     /* Please reset the chip. */
+       QL_RESET_PER_SCSI = 4,  /* SCSI driver requests reset. */
+       QL_TX_TIMEOUT = 5,      /* Timeout in progress. */
+       QL_LINK_MASTER = 6,     /* This driver controls the link. */
+       QL_ADAPTER_UP = 7,      /* Adapter has been brought up. */
+       QL_THREAD_UP = 8,       /* This flag is available. */
+       QL_LINK_UP = 9, /* Link Status. */
+       QL_ALLOC_REQ_RSP_Q_DONE = 10,
+       QL_ALLOC_BUFQS_DONE = 11,
+       QL_ALLOC_SMALL_BUF_DONE = 12,
+       QL_LINK_OPTICAL = 13,
+       QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+       u32 reserved_00;
+       unsigned long flags;
+
+       /* PCI Configuration information for this device */
+       struct pci_dev *pdev;
+       struct net_device *ndev;        /* Parent NET device */
+
+       struct napi_struct napi;
+
+       /* Hardware information */
+       u8 chip_rev_id;
+       u8 pci_slot;
+       u8 pci_width;
+       u8 pci_x;
+       u32 msi;
+       int index;
+       struct timer_list adapter_timer;        /* timer used for various functions */
+
+       spinlock_t adapter_lock;
+       spinlock_t hw_lock;
+
+       /* PCI Bus Relative Register Addresses */
+       u8 __iomem *mmap_virt_base;     /* stores return value from ioremap() */
+       struct ql3xxx_port_registers __iomem *mem_map_registers;
+       u32 current_page;       /* tracks current register page */
+
+       u32 msg_enable;
+       u8 reserved_01[2];
+       u8 reserved_02[2];
+
+       /* Page for Shadow Registers */
+       void *shadow_reg_virt_addr;
+       dma_addr_t shadow_reg_phy_addr;
+
+       /* Net Request Queue */
+       u32 req_q_size;
+       u32 reserved_03;
+       struct ob_mac_iocb_req *req_q_virt_addr;
+       dma_addr_t req_q_phy_addr;
+       u16 req_producer_index;
+       u16 reserved_04;
+       u16 *preq_consumer_index;
+       u32 req_consumer_index_phy_addr_high;
+       u32 req_consumer_index_phy_addr_low;
+       atomic_t tx_count;
+       struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+       /* Net Response Queue */
+       u32 rsp_q_size;
+       u32 eeprom_cmd_data;
+       struct net_rsp_iocb *rsp_q_virt_addr;
+       dma_addr_t rsp_q_phy_addr;
+       struct net_rsp_iocb *rsp_current;
+       u16 rsp_consumer_index;
+       u16 reserved_06;
+       volatile __le32 *prsp_producer_index;
+       u32 rsp_producer_index_phy_addr_high;
+       u32 rsp_producer_index_phy_addr_low;
+
+       /* Large Buffer Queue */
+       u32 lrg_buf_q_alloc_size;
+       u32 lrg_buf_q_size;
+       void *lrg_buf_q_alloc_virt_addr;
+       void *lrg_buf_q_virt_addr;
+       dma_addr_t lrg_buf_q_alloc_phy_addr;
+       dma_addr_t lrg_buf_q_phy_addr;
+       u32 lrg_buf_q_producer_index;
+       u32 lrg_buf_release_cnt;
+       struct bufq_addr_element *lrg_buf_next_free;
+       u32 num_large_buffers;
+       u32 num_lbufq_entries;
+
+       /* Large (Receive) Buffers */
+       struct ql_rcv_buf_cb *lrg_buf;
+       struct ql_rcv_buf_cb *lrg_buf_free_head;
+       struct ql_rcv_buf_cb *lrg_buf_free_tail;
+       u32 lrg_buf_free_count;
+       u32 lrg_buffer_len;
+       u32 lrg_buf_index;
+       u32 lrg_buf_skb_check;
+
+       /* Small Buffer Queue */
+       u32 small_buf_q_alloc_size;
+       u32 small_buf_q_size;
+       u32 small_buf_q_producer_index;
+       void *small_buf_q_alloc_virt_addr;
+       void *small_buf_q_virt_addr;
+       dma_addr_t small_buf_q_alloc_phy_addr;
+       dma_addr_t small_buf_q_phy_addr;
+       u32 small_buf_index;
+
+       /* Small (Receive) Buffers */
+       void *small_buf_virt_addr;
+       dma_addr_t small_buf_phy_addr;
+       u32 small_buf_phy_addr_low;
+       u32 small_buf_phy_addr_high;
+       u32 small_buf_release_cnt;
+       u32 small_buf_total_size;
+
+       struct eeprom_data nvram_data;
+       u32 port_link_state;
+
+       /* 4022 specific */
+       u32 mac_index;          /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+       u32 PHYAddr;            /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+       u32 mac_ob_opcode;      /* Opcode to use on mac transmission */
+       u32 mb_bit_mask;        /* MA Bits mask to use on transmission */
+       u32 numPorts;
+       struct workqueue_struct *workqueue;
+       struct delayed_work reset_work;
+       struct delayed_work tx_timeout_work;
+       struct delayed_work link_state_work;
+       u32 max_frame_size;
+       u32 device_id;
+       u16 phyType;
+};
+
+#endif                         /* _QLA3XXX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
new file mode 100644 (file)
index 0000000..3c2c2c7
--- /dev/null
@@ -0,0 +1,15 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+       qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+       qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+       qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+       qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
new file mode 100644 (file)
index 0000000..49bad00
--- /dev/null
@@ -0,0 +1,2404 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID  "5.3.65"
+#define QLCNIC_DRV_IDC_VER  0x01
+#define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
+                (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+
+#define QLCNIC_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
+#define _major(v)      (((v) >> 24) & 0xff)
+#define _minor(v)      (((v) >> 16) & 0xff)
+#define _build(v)      ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ *  7:0  - major
+ * 15:8  - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+       QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_MIN_FW_VERSION     QLCNIC_VERSION_CODE(4, 4, 2)
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE  (QLCNIC_NUM_FLASH_SECTORS \
+                                       * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring)    \
+       (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring)    \
+       (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+       (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring)      \
+       (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring)      \
+       (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0          0x50
+#define QLCNIC_P3P_C0          0x58
+
+#define QLCNIC_IS_REVISION_P3P(REVISION)     (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END   0x100000
+
+#define P3P_MAX_MTU                     (9600)
+#define P3P_MIN_MTU                     (68)
+#define QLCNIC_MAX_ETHERHDR                32 /* This contains some padding */
+
+#define QLCNIC_P3P_RX_BUF_MAX_LEN         (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN   (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN   2048
+#define QLCNIC_LRO_BUFFER_EXTRA                2048
+
+/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX        14
+#define MAX_TSO_HEADER_DESC    2
+#define MGMT_CMD_DESC_RESV     4
+#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+                                                       + MGMT_CMD_DESC_RESV)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING             1
+#define QLCNIC_DEF_SDS_RINGS           4
+#define QLCNIC_DEF_TX_RINGS            4
+#define QLCNIC_MAX_VNIC_TX_RINGS       4
+#define QLCNIC_MAX_VNIC_SDS_RINGS      4
+#define QLCNIC_83XX_MINIMUM_VECTOR     3
+#define QLCNIC_82XX_MINIMUM_VECTOR     2
+
+enum qlcnic_queue_type {
+       QLCNIC_TX_QUEUE = 1,
+       QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE       0xFF
+#define QLCNIC_DEFAULT_MODE    0x0
+
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT    16
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED         0xffff
+#define PHAN_INITIALIZE_COMPLETE       0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK            0xf00f
+#define PHAN_PEG_RCV_INITIALIZED       0xff01
+
+#define NUM_RCV_DESC_RINGS     3
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+
+#define MIN_CMD_DESCRIPTORS            64
+#define MIN_RCV_DESCRIPTORS            64
+#define MIN_JUMBO_DESCRIPTORS          32
+
+#define MAX_CMD_DESCRIPTORS            1024
+#define MAX_RCV_DESCRIPTORS_1G         4096
+#define MAX_RCV_DESCRIPTORS_10G        8192
+#define MAX_RCV_DESCRIPTORS_VF         2048
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G   512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G  1024
+
+#define DEFAULT_RCV_DESCRIPTORS_1G     2048
+#define DEFAULT_RCV_DESCRIPTORS_10G    4096
+#define DEFAULT_RCV_DESCRIPTORS_VF     1024
+#define MAX_RDS_RINGS                   2
+
+#define get_next_index(index, length)  \
+       (((index) + 1) & ((length) - 1))
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+struct cmd_desc_type0 {
+       u8 tcp_hdr_offset;      /* For LSO only */
+       u8 ip_hdr_offset;       /* For LSO only */
+       __le16 flags_opcode;    /* 15:13 unused, 12:7 opcode, 6:0 flags */
+       __le32 nfrags__length;  /* 31:8 total len, 7:0 frag count */
+
+       __le64 addr_buffer2;
+
+       __le16 encap_descr;     /* 15:10 offset of outer L3 header,
+                                * 9:6 number of 32bit words in outer L3 header,
+                                * 5 offload outer L4 checksum,
+                                * 4 offload outer L3 checksum,
+                                * 3 Inner L4 type, TCP=0, UDP=1,
+                                * 2 Inner L3 type, IPv4=0, IPv6=1,
+                                * 1 Outer L3 type,IPv4=0, IPv6=1,
+                                * 0 type of encapsulation, GRE=0, VXLAN=1
+                                */
+       __le16 mss;
+       u8 port_ctxid;          /* 7:4 ctxid 3:0 port */
+       u8 hdr_length;          /* LSO only : MAC+IP+TCP Hdr size */
+       u8 outer_hdr_length;    /* Encapsulation only */
+       u8 rsvd1;
+
+       __le64 addr_buffer3;
+       __le64 addr_buffer1;
+
+       __le16 buffer_length[4];
+
+       __le64 addr_buffer4;
+
+       u8 eth_addr[ETH_ALEN];
+       __le16 vlan_TCI;        /* In case of  encapsulation,
+                                * this is for outer VLAN
+                                */
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+       __le16 reference_handle;
+       __le16 reserved;
+       __le32 buffer_length;   /* allocated buffer length (usually 2K) */
+       __le64 addr_buffer;
+} __packed;
+
+struct status_desc {
+       __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE         0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL        0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD     0x6
+#define QLCNIC_UNI_DIR_SECT_FW         0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF                10
+#define QLCNIC_UNI_FLAGS_OFF           11
+#define QLCNIC_UNI_BIOS_VERSION_OFF    12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF      27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF    29
+
+struct uni_table_desc{
+       __le32  findex;
+       __le32  num_entries;
+       __le32  entry_size;
+       __le32  reserved[5];
+};
+
+struct uni_data_desc{
+       __le32  findex;
+       __le32  size;
+       __le32  reserved[5];
+};
+
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION    0x3F1000
+#define QLCNIC_FDT_LOCATION     0x3F0000
+#define QLCNIC_B0_FW_IMAGE_REGION 0x74
+#define QLCNIC_C0_FW_IMAGE_REGION 0x97
+#define QLCNIC_BOOTLD_REGION    0X72
+struct qlcnic_flt_header {
+       u16 version;
+       u16 len;
+       u16 checksum;
+       u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+       u8 region;
+       u8 reserved0;
+       u8 attrib;
+       u8 reserved1;
+       u32 size;
+       u32 start_addr;
+       u32 end_addr;
+};
+
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+       u32     valid;
+       u16     ver;
+       u16     len;
+       u16     cksum;
+       u16     unused;
+       u8      model[16];
+       u8      mfg_id;
+       u16     id;
+       u8      flag;
+       u8      erase_cmd;
+       u8      alt_erase_cmd;
+       u8      write_enable_cmd;
+       u8      write_enable_bits;
+       u8      write_statusreg_cmd;
+       u8      unprotected_sec_cmd;
+       u8      read_manuf_cmd;
+       u32     block_size;
+       u32     alt_block_size;
+       u32     flash_size;
+       u32     write_enable_data;
+       u8      readid_addr_len;
+       u8      write_disable_bits;
+       u8      read_dev_id_len;
+       u8      chip_erase_cmd;
+       u16     read_timeo;
+       u8      protected_sec_cmd;
+       u8      resvd[65];
+};
+/* Magic number to let user know flash is programmed */
+#define        QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3P_REF_QG      0x0021
+#define QLCNIC_BRDTYPE_P3P_HMEZ                0x0022
+#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP  0x0023
+#define QLCNIC_BRDTYPE_P3P_4_GB                0x0024
+#define QLCNIC_BRDTYPE_P3P_IMEZ                0x0025
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS        0x0026
+#define QLCNIC_BRDTYPE_P3P_10000_BASE_T        0x0027
+#define QLCNIC_BRDTYPE_P3P_XG_LOM      0x0028
+#define QLCNIC_BRDTYPE_P3P_4_GB_MM     0x0029
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT  0x002a
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT  0x002b
+#define QLCNIC_BRDTYPE_P3P_10G_CX4     0x0031
+#define QLCNIC_BRDTYPE_P3P_10G_XFP     0x0032
+#define QLCNIC_BRDTYPE_P3P_10G_TP      0x0080
+
+#define QLCNIC_MSIX_TABLE_OFFSET       0x44
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START    0x4000          /* board config */
+#define QLCNIC_BOOTLD_START    0x10000         /* bootld */
+#define QLCNIC_IMAGE_START     0x43000         /* compressed image */
+#define QLCNIC_USER_START      0x3E8000        /* Firmware info */
+
+#define QLCNIC_FW_VERSION_OFFSET       (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET          (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET    (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET     (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET          (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET         (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE             (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE        0
+#define QLCNIC_FLASH_ROMIMAGE          1
+#define QLCNIC_UNKNOWN_ROMIMAGE                0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME   "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME     "flash"
+
+extern char qlcnic_driver_name[];
+
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE      (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+       u64 dma;
+       u64 length;
+};
+
+/*    Following defines are for the state of the buffers    */
+#define        QLCNIC_BUFFER_FREE      0
+#define        QLCNIC_BUFFER_BUSY      1
+
+/*
+ * There will be one qlcnic_buffer per skb packet.    These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+       struct sk_buff *skb;
+       struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+       u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+       u16 ref_handle;
+       struct sk_buff *skb;
+       struct list_head list;
+       u64 dma;
+};
+
+/* Board types */
+#define        QLCNIC_GBE      0x01
+#define        QLCNIC_XGBE     0x02
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_INTR_COAL_TYPE_RX               1
+#define QLCNIC_INTR_COAL_TYPE_TX               2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX            3
+
+#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US    3
+#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS    256
+
+#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US    64
+#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS    64
+
+#define QLCNIC_INTR_DEFAULT                    0x04
+#define QLCNIC_CONFIG_INTR_COALESCE            3
+#define QLCNIC_DEV_INFO_SIZE                   2
+
+struct qlcnic_nic_intr_coalesce {
+       u8      type;
+       u8      sts_ring_mask;
+       u16     rx_packets;
+       u16     rx_time_us;
+       u16     tx_packets;
+       u16     tx_time_us;
+       u16     flag;
+       u32     timer_out;
+};
+
+struct qlcnic_83xx_dump_template_hdr {
+       u32     type;
+       u32     offset;
+       u32     size;
+       u32     cap_mask;
+       u32     num_entries;
+       u32     version;
+       u32     timestamp;
+       u32     checksum;
+       u32     drv_cap_mask;
+       u32     sys_info[3];
+       u32     saved_state[16];
+       u32     cap_sizes[8];
+       u32     ocm_wnd_reg[16];
+       u32     rsvd[0];
+};
+
+struct qlcnic_82xx_dump_template_hdr {
+       u32     type;
+       u32     offset;
+       u32     size;
+       u32     cap_mask;
+       u32     num_entries;
+       u32     version;
+       u32     timestamp;
+       u32     checksum;
+       u32     drv_cap_mask;
+       u32     sys_info[3];
+       u32     saved_state[16];
+       u32     cap_sizes[8];
+       u32     rsvd[7];
+       u32     capabilities;
+       u32     rsvd1[0];
+};
+
+#define QLC_PEX_DMA_READ_SIZE  (PAGE_SIZE * 16)
+
+struct qlcnic_fw_dump {
+       u8      clr;    /* flag to indicate if dump is cleared */
+       bool    enable; /* enable/disable dump */
+       u32     size;   /* total size of the dump */
+       u32     cap_mask; /* Current capture mask */
+       void    *data;  /* dump data area */
+       void    *tmpl_hdr;
+       dma_addr_t phys_addr;
+       void    *dma_buffer;
+       bool    use_pex_dma;
+       /* Read only elements which are common between 82xx and 83xx
+        * template header. Update these values immediately after we read
+        * template header from Firmware
+        */
+       u32     tmpl_hdr_size;
+       u32     version;
+       u32     num_entries;
+       u32     offset;
+};
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+       void __iomem *pci_base0;
+       void __iomem *ocm_win_crb;
+
+       unsigned long pci_len0;
+
+       rwlock_t crb_lock;
+       struct mutex mem_lock;
+
+       u8 revision_id;
+       u8 pci_func;
+       u8 linkup;
+       u8 loopback_state;
+       u8 beacon_state;
+       u8 has_link_events;
+       u8 fw_type;
+       u8 physical_port;
+       u8 reset_context;
+       u8 msix_supported;
+       u8 max_mac_filters;
+       u8 mc_enabled;
+       u8 max_mc_count;
+       u8 diag_test;
+       u8 num_msix;
+       u8 nic_mode;
+       int diag_cnt;
+
+       u16 max_uc_count;
+       u16 port_type;
+       u16 board_type;
+       u16 supported_type;
+
+       u16 link_speed;
+       u16 link_duplex;
+       u16 link_autoneg;
+       u16 module_type;
+
+       u16 op_mode;
+       u16 switch_mode;
+       u16 max_tx_ques;
+       u16 max_rx_ques;
+       u16 max_mtu;
+       u32 msg_enable;
+       u16 total_nic_func;
+       u16 max_pci_func;
+       u32 max_vnic_func;
+       u32 total_pci_func;
+
+       u32 capabilities;
+       u32 extra_capability[3];
+       u32 temp;
+       u32 int_vec_bit;
+       u32 fw_hal_version;
+       u32 port_config;
+       struct qlcnic_hardware_ops *hw_ops;
+       struct qlcnic_nic_intr_coalesce coal;
+       struct qlcnic_fw_dump fw_dump;
+       struct qlcnic_fdt fdt;
+       struct qlc_83xx_reset reset;
+       struct qlc_83xx_idc idc;
+       struct qlc_83xx_fw_info *fw_info;
+       struct qlcnic_intrpt_config *intr_tbl;
+       struct qlcnic_sriov *sriov;
+       u32 *reg_tbl;
+       u32 *ext_reg_tbl;
+       u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+       u32 mbox_reg[4];
+       struct qlcnic_mailbox *mailbox;
+       u8 extend_lb_time;
+       u8 phys_port_id[ETH_ALEN];
+       u8 lb_mode;
+       u8 vxlan_port_count;
+       u16 vxlan_port;
+       struct device *hwmon_dev;
+       u32 post_mode;
+       bool run_post;
+};
+
+struct qlcnic_adapter_stats {
+       u64  xmitcalled;
+       u64  xmitfinished;
+       u64  rxdropped;
+       u64  txdropped;
+       u64  csummed;
+       u64  rx_pkts;
+       u64  lro_pkts;
+       u64  rxbytes;
+       u64  txbytes;
+       u64  lrobytes;
+       u64  lso_frames;
+       u64  encap_lso_frames;
+       u64  encap_tx_csummed;
+       u64  encap_rx_csummed;
+       u64  xmit_on;
+       u64  xmit_off;
+       u64  skb_alloc_failure;
+       u64  null_rxbuf;
+       u64  rx_dma_map_error;
+       u64  tx_dma_map_error;
+       u64  spurious_intr;
+       u64  mac_filter_limit_overrun;
+       u64  mbx_spurious_intr;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+       void __iomem *crb_rcv_producer;
+       struct rcv_desc *desc_head;
+       struct qlcnic_rx_buffer *rx_buf_arr;
+       u32 num_desc;
+       u32 producer;
+       u32 dma_size;
+       u32 skb_size;
+       u32 flags;
+       struct list_head free_list;
+       spinlock_t lock;
+       dma_addr_t phys_addr;
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_host_sds_ring {
+       u32 consumer;
+       u32 num_desc;
+       void __iomem *crb_sts_consumer;
+
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct status_desc *desc_head;
+       struct qlcnic_adapter *adapter;
+       struct napi_struct napi;
+       struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+       void __iomem *crb_intr_mask;
+       int irq;
+
+       dma_addr_t phys_addr;
+       char name[IFNAMSIZ + 12];
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_tx_queue_stats {
+       u64 xmit_on;
+       u64 xmit_off;
+       u64 xmit_called;
+       u64 xmit_finished;
+       u64 tx_bytes;
+};
+
+struct qlcnic_host_tx_ring {
+       int irq;
+       void __iomem *crb_intr_mask;
+       char name[IFNAMSIZ + 12];
+       u16 ctx_id;
+
+       u32 state;
+       u32 producer;
+       u32 sw_consumer;
+       u32 num_desc;
+
+       struct qlcnic_tx_queue_stats tx_stats;
+
+       void __iomem *crb_cmd_producer;
+       struct cmd_desc_type0 *desc_head;
+       struct qlcnic_adapter *adapter;
+       struct napi_struct napi;
+       struct qlcnic_cmd_buffer *cmd_buf_arr;
+       __le32 *hw_consumer;
+
+       dma_addr_t phys_addr;
+       dma_addr_t hw_cons_phys_addr;
+       struct netdev_queue *txq;
+       /* Lock to protect Tx descriptors cleanup */
+       spinlock_t tx_clean_lock;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+       struct qlcnic_host_rds_ring *rds_rings;
+       struct qlcnic_host_sds_ring *sds_rings;
+       u32 state;
+       u16 context_id;
+       u16 virt_port;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT      4000
+
+#define QLCNIC_CDRP_CMD_BIT            0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp)      (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp)        (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK             0x00000001
+#define QLCNIC_CDRP_RSP_FAIL           0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT        0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd)      (QLCNIC_CDRP_CMD_BIT | (cmd))
+
+#define QLCNIC_RCODE_SUCCESS           0
+#define QLCNIC_RCODE_INVALID_ARGS      6
+#define QLCNIC_RCODE_NOT_SUPPORTED     9
+#define QLCNIC_RCODE_NOT_PERMITTED     10
+#define QLCNIC_RCODE_NOT_IMPL          15
+#define QLCNIC_RCODE_INVALID           16
+#define QLCNIC_RCODE_TIMEOUT           17
+#define QLCNIC_DESTROY_CTX_RESET       0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT     (1)
+#define QLCNIC_CAP0_LEGACY_MN          (1 << 2)
+#define QLCNIC_CAP0_LSO                (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS   (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS     (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF           (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS            (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI           (1 << 22)
+
+/*
+ * Context state
+ */
+#define QLCNIC_HOST_CTX_STATE_FREED    0
+#define QLCNIC_HOST_CTX_STATE_ACTIVE   2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le32 ring_size;               /* Ring entries */
+       __le16 msi_index;
+       __le16 rsvd;            /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_rds_ring {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le64 buff_size;               /* Packet buffer size */
+       __le32 ring_size;               /* Ring entries */
+       __le32 ring_kind;               /* Class of ring */
+} __packed;
+
+struct qlcnic_hostrq_rx_ctx {
+       __le64 host_rsp_dma_addr;       /* Response dma'd here */
+       __le32 capabilities[4];         /* Flag bit vector */
+       __le32 host_int_crb_mode;       /* Interrupt crb usage */
+       __le32 host_rds_crb_mode;       /* RDS crb usage */
+       /* These ring offsets are relative to data[0] below */
+       __le32 rds_ring_offset; /* Offset to RDS config */
+       __le32 sds_ring_offset; /* Offset to SDS config */
+       __le16 num_rds_rings;   /* Count of RDS rings */
+       __le16 num_sds_rings;   /* Count of SDS rings */
+       __le16 valid_field_offset;
+       u8  txrx_sds_binding;
+       u8  msix_handler;
+       u8  reserved[128];      /* reserve space for future expansion*/
+       /* MUST BE 64-bit aligned.
+          The following is packed:
+          - N hostrq_rds_rings
+          - N hostrq_sds_rings */
+       char data[0];
+} __packed;
+
+struct qlcnic_cardrsp_rds_ring{
+       __le32 host_producer_crb;       /* Crb to use */
+       __le32 rsvd1;           /* Padding */
+} __packed;
+
+struct qlcnic_cardrsp_sds_ring {
+       __le32 host_consumer_crb;       /* Crb to use */
+       __le32 interrupt_crb;   /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_rx_ctx {
+       /* These ring offsets are relative to data[0] below */
+       __le32 rds_ring_offset; /* Offset to RDS config */
+       __le32 sds_ring_offset; /* Offset to SDS config */
+       __le32 host_ctx_state;  /* Starting State */
+       __le32 num_fn_per_port; /* How many PCI fn share the port */
+       __le16 num_rds_rings;   /* Count of RDS rings */
+       __le16 num_sds_rings;   /* Count of SDS rings */
+       __le16 context_id;              /* Handle for context */
+       u8  phys_port;          /* Physical id of port */
+       u8  virt_port;          /* Virtual/Logical id of port */
+       u8  reserved[128];      /* save space for future expansion */
+       /*  MUST BE 64-bit aligned.
+          The following is packed:
+          - N cardrsp_rds_rings
+          - N cardrs_sds_rings */
+       char data[0];
+} __packed;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings)      \
+       (sizeof(HOSTRQ_RX) +                                    \
+       (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) +           \
+       (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings)    \
+       (sizeof(CARDRSP_RX) +                                   \
+       (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) +          \
+       (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+       __le64 host_phys_addr;  /* Ring base addr */
+       __le32 ring_size;               /* Ring entries */
+       __le32 rsvd;            /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_tx_ctx {
+       __le64 host_rsp_dma_addr;       /* Response dma'd here */
+       __le64 cmd_cons_dma_addr;       /*  */
+       __le64 dummy_dma_addr;  /*  */
+       __le32 capabilities[4]; /* Flag bit vector */
+       __le32 host_int_crb_mode;       /* Interrupt crb usage */
+       __le32 rsvd1;           /* Padding */
+       __le16 rsvd2;           /* Padding */
+       __le16 interrupt_ctl;
+       __le16 msi_index;
+       __le16 rsvd3;           /* Padding */
+       struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+       u8  reserved[128];      /* future expansion */
+} __packed;
+
+struct qlcnic_cardrsp_cds_ring {
+       __le32 host_producer_crb;       /* Crb to use */
+       __le32 interrupt_crb;   /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_tx_ctx {
+       __le32 host_ctx_state;  /* Starting state */
+       __le16 context_id;              /* Handle for context */
+       u8  phys_port;          /* Physical id of port */
+       u8  virt_port;          /* Virtual/Logical id of port */
+       struct qlcnic_cardrsp_cds_ring cds_ring;        /* Card cds settings */
+       u8  reserved[128];      /* future expansion */
+} __packed;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX)    (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX)  (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE        0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED        1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM        2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX   3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE        0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED        1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX  2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX  3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX        4
+
+
+/* MAC */
+
+#define MC_COUNT_P3P   38
+
+#define QLCNIC_MAC_NOOP        0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD    3
+#define QLCNIC_MAC_VLAN_DEL    4
+
+enum qlcnic_mac_type {
+       QLCNIC_UNICAST_MAC,
+       QLCNIC_MULTICAST_MAC,
+       QLCNIC_BROADCAST_MAC,
+};
+
+struct qlcnic_mac_vlan_list {
+       struct list_head list;
+       uint8_t mac_addr[ETH_ALEN+2];
+       u16 vlan_id;
+       enum qlcnic_mac_type mac_type;
+};
+
+/* MAC Learn */
+#define NO_MAC_LEARN           0
+#define DRV_MAC_LEARN          1
+#define FDB_MAC_LEARN          2
+
+#define QLCNIC_HOST_REQUEST    0x13
+#define QLCNIC_REQUEST         0x14
+
+#define QLCNIC_MAC_EVENT       0x1
+
+#define QLCNIC_IP_UP           2
+#define QLCNIC_IP_DOWN         3
+
+#define QLCNIC_ILB_MODE                0x1
+#define QLCNIC_ELB_MODE                0x2
+#define QLCNIC_LB_MODE_MASK    0x3
+
+#define QLCNIC_LINKEVENT       0x1
+#define QLCNIC_LB_RESPONSE     0x2
+#define QLCNIC_IS_LB_CONFIGURED(VAL)   \
+               (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS                   0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE         0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED                   0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST                  0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE         0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR                0x12
+
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT                0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING              0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO                0x18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK              0x13
+
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK              0x8f
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE       0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN                  0x90
+
+#define VPORT_MISS_MODE_DROP           0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL     1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI   2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP     4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_TSO               BIT_1
+#define QLCNIC_FW_CAPABILITY_BDG               BIT_8
+#define QLCNIC_FW_CAPABILITY_FVLANTX           BIT_9
+#define QLCNIC_FW_CAPABILITY_HW_LRO            BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX                BIT_4
+#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK    BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS         BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6             BIT_3
+#define QLCNIC_FW_CAPABILITY_SET_DRV_VER       BIT_5
+#define QLCNIC_FW_CAPABILITY_2_BEACON          BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG    BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP  BIT_13
+
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD  BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD  BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT                   1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN               2
+#define LINKEVENT_MODULE_OPTICAL_SRLR                  3
+#define LINKEVENT_MODULE_OPTICAL_LRM                   4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G                5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE      6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN   7
+#define LINKEVENT_MODULE_TWINAX                        8
+
+#define LINKSPEED_10GBPS       10000
+#define LINKSPEED_1GBPS        1000
+#define LINKSPEED_100MBPS      100
+#define LINKSPEED_10MBPS       10
+
+#define LINKSPEED_ENCODED_10MBPS       0
+#define LINKSPEED_ENCODED_100MBPS      1
+#define LINKSPEED_ENCODED_1GBPS        2
+
+#define LINKEVENT_AUTONEG_DISABLED     0
+#define LINKEVENT_AUTONEG_ENABLED      1
+
+#define LINKEVENT_HALF_DUPLEX          0
+#define LINKEVENT_FULL_DUPLEX          1
+
+#define LINKEVENT_LINKSPEED_MBPS       0
+#define LINKEVENT_LINKSPEED_ENCODED    1
+
+/* firmware response header:
+ *     63:58 - message type
+ *     57:56 - owner
+ *     55:53 - desc count
+ *     52:48 - reserved
+ *     47:40 - completion id
+ *     39:32 - opcode
+ *     31:16 - error code
+ *     15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr)     \
+       ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+       union {
+               struct {
+                       u64 hdr;
+                       u64 body[7];
+               };
+               u64 words[8];
+       };
+};
+
+struct qlcnic_nic_req {
+       __le64 qhdr;
+       __le64 req_hdr;
+       __le64 words[6];
+} __packed;
+
+struct qlcnic_mac_req {
+       u8 op;
+       u8 tag;
+       u8 mac_addr[6];
+};
+
+struct qlcnic_vlan_req {
+       __le16 vlan_id;
+       __le16 rsvd[3];
+} __packed;
+
+struct qlcnic_ipaddr {
+       __be32 ipv4;
+       __be32 ipv6[4];
+};
+
+#define QLCNIC_MSI_ENABLED             0x02
+#define QLCNIC_MSIX_ENABLED            0x04
+#define QLCNIC_LRO_ENABLED             0x01
+#define QLCNIC_LRO_DISABLED            0x00
+#define QLCNIC_BRIDGE_ENABLED          0X10
+#define QLCNIC_DIAG_ENABLED            0x20
+#define QLCNIC_ESWITCH_ENABLED         0x40
+#define QLCNIC_ADAPTER_INITIALIZED     0x80
+#define QLCNIC_TAGGING_ENABLED         0x100
+#define QLCNIC_MACSPOOF                        0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED   0x400
+#define QLCNIC_PROMISC_DISABLED                0x800
+#define QLCNIC_NEED_FLR                        0x1000
+#define QLCNIC_FW_RESET_OWNER          0x2000
+#define QLCNIC_FW_HANG                 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP          0x8000
+#define QLCNIC_TX_INTR_SHARED          0x10000
+#define QLCNIC_APP_CHANGED_FLAGS       0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID                0x40000
+#define QLCNIC_TSS_RSS                 0x80000
+
+#define QLCNIC_ADD_VXLAN_PORT          0x100000
+#define QLCNIC_DEL_VXLAN_PORT          0x200000
+
+#define QLCNIC_VLAN_FILTERING          0x800000
+
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+       ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter)  \
+       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
+#define QLCNIC_BEACON_EANBLE           0xC
+#define QLCNIC_BEACON_DISABLE          0xD
+
+#define QLCNIC_BEACON_ON               2
+#define QLCNIC_BEACON_OFF              0
+
+#define QLCNIC_MSIX_TBL_SPACE          8192
+#define QLCNIC_PCI_REG_MSIX_TBL        0x44
+#define QLCNIC_MSIX_TBL_PGSIZE         4096
+
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED           0
+#define __QLCNIC_DEV_UP                1
+#define __QLCNIC_RESETTING             2
+#define __QLCNIC_START_FW              4
+#define __QLCNIC_AER                   5
+#define __QLCNIC_DIAG_RES_ALLOC                6
+#define __QLCNIC_LED_ENABLE            7
+#define __QLCNIC_ELB_INPROGRESS                8
+#define __QLCNIC_MULTI_TX_UNIQUE       9
+#define __QLCNIC_SRIOV_ENABLE          10
+#define __QLCNIC_SRIOV_CAPABLE         11
+#define __QLCNIC_MBX_POLL_ENABLE       12
+#define __QLCNIC_DIAG_MODE             13
+#define __QLCNIC_MAINTENANCE_MODE      16
+
+#define QLCNIC_INTERRUPT_TEST          1
+#define QLCNIC_LOOPBACK_TEST           2
+#define QLCNIC_LED_TEST                3
+
+#define QLCNIC_FILTER_AGE      80
+#define QLCNIC_READD_AGE       20
+#define QLCNIC_LB_MAX_FILTERS  64
+#define QLCNIC_LB_BUCKET_SIZE  32
+#define QLCNIC_ILB_MAX_RCV_LOOP        10
+
+struct qlcnic_filter {
+       struct hlist_node fnode;
+       u8 faddr[ETH_ALEN];
+       u16 vlan_id;
+       unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+       struct hlist_head *fhead;
+       u8 fnum;
+       u16 fmax;
+       u16 fbucket_size;
+};
+
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+       struct workqueue_struct *work_q;
+       struct qlcnic_adapter   *adapter;
+       const struct qlcnic_mbx_ops *ops;
+       struct work_struct      work;
+       struct completion       completion;
+       struct list_head        cmd_q;
+       unsigned long           status;
+       spinlock_t              queue_lock;     /* Mailbox queue lock */
+       spinlock_t              aen_lock;       /* Mailbox response/AEN lock */
+       u32                     rsp_status;
+       u32                     num_cmds;
+};
+
+struct qlcnic_adapter {
+       struct qlcnic_hardware_context *ahw;
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct net_device *netdev;
+       struct pci_dev *pdev;
+
+       unsigned long state;
+       u32 flags;
+
+       u16 num_txd;
+       u16 num_rxd;
+       u16 num_jumbo_rxd;
+       u16 max_rxd;
+       u16 max_jumbo_rxd;
+
+       u8 max_rds_rings;
+
+       u8 max_sds_rings; /* max sds rings supported by adapter */
+       u8 max_tx_rings;  /* max tx rings supported by adapter */
+
+       u8 drv_tx_rings;  /* max tx rings supported by driver */
+       u8 drv_sds_rings; /* max sds rings supported by driver */
+
+       u8 drv_tss_rings; /* tss ring input */
+       u8 drv_rss_rings; /* rss ring input */
+
+       u8 rx_csum;
+       u8 portnum;
+
+       u8 fw_wait_cnt;
+       u8 fw_fail_cnt;
+       u8 tx_timeo_cnt;
+       u8 need_fw_reset;
+       u8 reset_ctx_cnt;
+
+       u16 is_up;
+       u16 rx_pvid;
+       u16 tx_pvid;
+
+       u32 irq;
+       u32 heartbeat;
+
+       u8 dev_state;
+       u8 reset_ack_timeo;
+       u8 dev_init_timeo;
+
+       u8 mac_addr[ETH_ALEN];
+
+       u64 dev_rst_time;
+       bool drv_mac_learn;
+       bool fdb_mac_learn;
+       bool rx_mac_learn;
+       unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       u8 flash_mfg_id;
+       struct qlcnic_npar_info *npars;
+       struct qlcnic_eswitch *eswitch;
+       struct qlcnic_nic_template *nic_ops;
+
+       struct qlcnic_adapter_stats stats;
+       struct list_head mac_list;
+
+       void __iomem    *tgt_mask_reg;
+       void __iomem    *tgt_status_reg;
+       void __iomem    *crb_int_state_reg;
+       void __iomem    *isr_int_vec;
+
+       struct msix_entry *msix_entries;
+       struct workqueue_struct *qlcnic_wq;
+       struct delayed_work fw_work;
+       struct delayed_work idc_aen_work;
+       struct delayed_work mbx_poll_work;
+       struct qlcnic_dcb *dcb;
+
+       struct qlcnic_filter_hash fhash;
+       struct qlcnic_filter_hash rx_fhash;
+       struct list_head vf_mc_list;
+
+       spinlock_t mac_learn_lock;
+       /* spinlock for catching rcv filters for eswitch traffic */
+       spinlock_t rx_mac_learn_lock;
+       u32 file_prd_off;       /*File fw product offset*/
+       u32 fw_version;
+       u32 offload_flags;
+       const struct firmware *fw;
+};
+
+struct qlcnic_info_le {
+       __le16  pci_func;
+       __le16  op_mode;        /* 1 = Priv, 2 = NP, 3 = NP passthru */
+       __le16  phys_port;
+       __le16  switch_mode;    /* 0 = disabled, 1 = int, 2 = ext */
+
+       __le32  capabilities;
+       u8      max_mac_filters;
+       u8      reserved1;
+       __le16  max_mtu;
+
+       __le16  max_tx_ques;
+       __le16  max_rx_ques;
+       __le16  min_tx_bw;
+       __le16  max_tx_bw;
+       __le32  op_type;
+       __le16  max_bw_reg_offset;
+       __le16  max_linkspeed_reg_offset;
+       __le32  capability1;
+       __le32  capability2;
+       __le32  capability3;
+       __le16  max_tx_mac_filters;
+       __le16  max_rx_mcast_mac_filters;
+       __le16  max_rx_ucast_mac_filters;
+       __le16  max_rx_ip_addr;
+       __le16  max_rx_lro_flow;
+       __le16  max_rx_status_rings;
+       __le16  max_rx_buf_rings;
+       __le16  max_tx_vlan_keys;
+       u8      total_pf;
+       u8      total_rss_engines;
+       __le16  max_vports;
+       __le16  linkstate_reg_offset;
+       __le16  bit_offsets;
+       __le16  max_local_ipv6_addrs;
+       __le16  max_remote_ipv6_addrs;
+       u8      reserved2[56];
+} __packed;
+
+struct qlcnic_info {
+       u16     pci_func;
+       u16     op_mode;
+       u16     phys_port;
+       u16     switch_mode;
+       u32     capabilities;
+       u8      max_mac_filters;
+       u16     max_mtu;
+       u16     max_tx_ques;
+       u16     max_rx_ques;
+       u16     min_tx_bw;
+       u16     max_tx_bw;
+       u32     op_type;
+       u16     max_bw_reg_offset;
+       u16     max_linkspeed_reg_offset;
+       u32     capability1;
+       u32     capability2;
+       u32     capability3;
+       u16     max_tx_mac_filters;
+       u16     max_rx_mcast_mac_filters;
+       u16     max_rx_ucast_mac_filters;
+       u16     max_rx_ip_addr;
+       u16     max_rx_lro_flow;
+       u16     max_rx_status_rings;
+       u16     max_rx_buf_rings;
+       u16     max_tx_vlan_keys;
+       u8      total_pf;
+       u8      total_rss_engines;
+       u16     max_vports;
+       u16     linkstate_reg_offset;
+       u16     bit_offsets;
+       u16     max_local_ipv6_addrs;
+       u16     max_remote_ipv6_addrs;
+};
+
+struct qlcnic_pci_info_le {
+       __le16  id;             /* pci function id */
+       __le16  active;         /* 1 = Enabled */
+       __le16  type;           /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+       __le16  default_port;   /* default port number */
+
+       __le16  tx_min_bw;      /* Multiple of 100mbpc */
+       __le16  tx_max_bw;
+       __le16  reserved1[2];
+
+       u8      mac[ETH_ALEN];
+       __le16  func_count;
+       u8      reserved2[104];
+
+} __packed;
+
+struct qlcnic_pci_info {
+       u16     id;
+       u16     active;
+       u16     type;
+       u16     default_port;
+       u16     tx_min_bw;
+       u16     tx_max_bw;
+       u8      mac[ETH_ALEN];
+       u16  func_count;
+};
+
+struct qlcnic_npar_info {
+       bool    eswitch_status;
+       u16     pvid;
+       u16     min_bw;
+       u16     max_bw;
+       u8      phy_port;
+       u8      type;
+       u8      active;
+       u8      enable_pm;
+       u8      dest_npar;
+       u8      discard_tagged;
+       u8      mac_override;
+       u8      mac_anti_spoof;
+       u8      promisc_mode;
+       u8      offload_flags;
+       u8      pci_func;
+       u8      mac[ETH_ALEN];
+};
+
+struct qlcnic_eswitch {
+       u8      port;
+       u8      active_vports;
+       u8      active_vlans;
+       u8      active_ucast_filters;
+       u8      max_ucast_filters;
+       u8      max_active_vlans;
+
+       u32     flags;
+#define QLCNIC_SWITCH_ENABLE           BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING   BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE     BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING   BIT_4
+};
+
+
+#define MAX_BW                 100     /* % of link speed */
+#define MIN_BW                 1       /* % of link speed */
+#define MAX_VLAN_ID            4095
+#define MIN_VLAN_ID            2
+#define DEFAULT_MAC_LEARN      1
+
+#define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
+#define IS_VALID_BW(bw)                (bw <= MAX_BW)
+
+struct qlcnic_pci_func_cfg {
+       u16     func_type;
+       u16     min_bw;
+       u16     max_bw;
+       u16     port_num;
+       u8      pci_func;
+       u8      func_state;
+       u8      def_mac_addr[ETH_ALEN];
+};
+
+struct qlcnic_npar_func_cfg {
+       u32     fw_capab;
+       u16     port_num;
+       u16     min_bw;
+       u16     max_bw;
+       u16     max_tx_queues;
+       u16     max_rx_queues;
+       u8      pci_func;
+       u8      op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+       u8      pci_func;
+       u8      action;
+       u8      dest_npar;
+       u8      reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+       u16     vlan_id;
+       u8      op_mode;
+       u8      op_type;
+       u8      pci_func;
+       u8      host_vlan_tag;
+       u8      promisc_mode;
+       u8      discard_tagged;
+       u8      mac_override;
+       u8      mac_anti_spoof;
+       u8      offload_flags;
+       u8      reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION           1
+#define QLCNIC_STATS_PORT              1
+#define QLCNIC_STATS_ESWITCH           2
+#define QLCNIC_QUERY_RX_COUNTER                0
+#define QLCNIC_QUERY_TX_COUNTER                1
+#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
+#define QLCNIC_FILL_STATS(VAL1) \
+       (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
+#define QLCNIC_MAC_STATS 1
+#define QLCNIC_ESW_STATS 2
+
+#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
+do {   \
+       if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
+           ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+               (VAL1) = (VAL2); \
+       else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
+                ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+                       (VAL1) += (VAL2); \
+} while (0)
+
+struct qlcnic_mac_statistics_le {
+       __le64  mac_tx_frames;
+       __le64  mac_tx_bytes;
+       __le64  mac_tx_mcast_pkts;
+       __le64  mac_tx_bcast_pkts;
+       __le64  mac_tx_pause_cnt;
+       __le64  mac_tx_ctrl_pkt;
+       __le64  mac_tx_lt_64b_pkts;
+       __le64  mac_tx_lt_127b_pkts;
+       __le64  mac_tx_lt_255b_pkts;
+       __le64  mac_tx_lt_511b_pkts;
+       __le64  mac_tx_lt_1023b_pkts;
+       __le64  mac_tx_lt_1518b_pkts;
+       __le64  mac_tx_gt_1518b_pkts;
+       __le64  rsvd1[3];
+
+       __le64  mac_rx_frames;
+       __le64  mac_rx_bytes;
+       __le64  mac_rx_mcast_pkts;
+       __le64  mac_rx_bcast_pkts;
+       __le64  mac_rx_pause_cnt;
+       __le64  mac_rx_ctrl_pkt;
+       __le64  mac_rx_lt_64b_pkts;
+       __le64  mac_rx_lt_127b_pkts;
+       __le64  mac_rx_lt_255b_pkts;
+       __le64  mac_rx_lt_511b_pkts;
+       __le64  mac_rx_lt_1023b_pkts;
+       __le64  mac_rx_lt_1518b_pkts;
+       __le64  mac_rx_gt_1518b_pkts;
+       __le64  rsvd2[3];
+
+       __le64  mac_rx_length_error;
+       __le64  mac_rx_length_small;
+       __le64  mac_rx_length_large;
+       __le64  mac_rx_jabber;
+       __le64  mac_rx_dropped;
+       __le64  mac_rx_crc_error;
+       __le64  mac_align_error;
+} __packed;
+
+struct qlcnic_mac_statistics {
+       u64     mac_tx_frames;
+       u64     mac_tx_bytes;
+       u64     mac_tx_mcast_pkts;
+       u64     mac_tx_bcast_pkts;
+       u64     mac_tx_pause_cnt;
+       u64     mac_tx_ctrl_pkt;
+       u64     mac_tx_lt_64b_pkts;
+       u64     mac_tx_lt_127b_pkts;
+       u64     mac_tx_lt_255b_pkts;
+       u64     mac_tx_lt_511b_pkts;
+       u64     mac_tx_lt_1023b_pkts;
+       u64     mac_tx_lt_1518b_pkts;
+       u64     mac_tx_gt_1518b_pkts;
+       u64     rsvd1[3];
+       u64     mac_rx_frames;
+       u64     mac_rx_bytes;
+       u64     mac_rx_mcast_pkts;
+       u64     mac_rx_bcast_pkts;
+       u64     mac_rx_pause_cnt;
+       u64     mac_rx_ctrl_pkt;
+       u64     mac_rx_lt_64b_pkts;
+       u64     mac_rx_lt_127b_pkts;
+       u64     mac_rx_lt_255b_pkts;
+       u64     mac_rx_lt_511b_pkts;
+       u64     mac_rx_lt_1023b_pkts;
+       u64     mac_rx_lt_1518b_pkts;
+       u64     mac_rx_gt_1518b_pkts;
+       u64     rsvd2[3];
+       u64     mac_rx_length_error;
+       u64     mac_rx_length_small;
+       u64     mac_rx_length_large;
+       u64     mac_rx_jabber;
+       u64     mac_rx_dropped;
+       u64     mac_rx_crc_error;
+       u64     mac_align_error;
+};
+
+struct qlcnic_esw_stats_le {
+       __le16 context_id;
+       __le16 version;
+       __le16 size;
+       __le16 unused;
+       __le64 unicast_frames;
+       __le64 multicast_frames;
+       __le64 broadcast_frames;
+       __le64 dropped_frames;
+       __le64 errors;
+       __le64 local_frames;
+       __le64 numbytes;
+       __le64 rsvd[3];
+} __packed;
+
+struct __qlcnic_esw_statistics {
+       u16     context_id;
+       u16     version;
+       u16     size;
+       u16     unused;
+       u64     unicast_frames;
+       u64     multicast_frames;
+       u64     broadcast_frames;
+       u64     dropped_frames;
+       u64     errors;
+       u64     local_frames;
+       u64     numbytes;
+       u64     rsvd[3];
+};
+
+struct qlcnic_esw_statistics {
+       struct __qlcnic_esw_statistics rx;
+       struct __qlcnic_esw_statistics tx;
+};
+
+#define QLCNIC_FORCE_FW_DUMP_KEY       0xdeadfeed
+#define QLCNIC_ENABLE_FW_DUMP          0xaddfeed
+#define QLCNIC_DISABLE_FW_DUMP         0xbadfeed
+#define QLCNIC_FORCE_FW_RESET          0xdeaddead
+#define QLCNIC_SET_QUIESCENT           0xadd00010
+#define QLCNIC_RESET_QUIESCENT         0xadd00020
+
+struct _cdrp_cmd {
+       u32 num;
+       u32 *arg;
+};
+
+struct qlcnic_cmd_args {
+       struct completion       completion;
+       struct list_head        list;
+       struct _cdrp_cmd        req;
+       struct _cdrp_cmd        rsp;
+       atomic_t                rsp_status;
+       int                     pay_size;
+       u32                     rsp_opcode;
+       u32                     total_cmds;
+       u32                     op_type;
+       u32                     type;
+       u32                     cmd_op;
+       u32                     *hdr;   /* Back channel message header */
+       u32                     *pay;   /* Back channel message payload */
+       u8                      func_num;
+};
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
+int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+       (((addr) < (high)) && ((addr) >= (low)))
+
+#define QLCRD32(adapter, off, err) \
+       (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
+
+#define QLCWR32(adapter, off, val) \
+       adapter->ahw->hw_ops->write_reg(adapter, off, val)
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a)     \
+       qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a)   \
+       qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a)     \
+       qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a)   \
+       qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_sw_lock(a)      \
+       qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a)    \
+       qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a)        \
+       qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a)      \
+       qlcnic_pcie_sem_unlock((a), 7)
+
+#define __QLCNIC_MAX_LED_RATE  0xf
+#define __QLCNIC_MAX_LED_STATE 0x2
+
+#define MAX_CTL_CHECK 1000
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
+int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+
+/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+                               u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+                              struct qlcnic_host_tx_ring *);
+
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+               struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16,
+                      enum qlcnic_mac_type);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
+void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
+
+/* Functions from qlcnic_ethtool.c */
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
+void qlcnic_set_drv_version(struct qlcnic_adapter *);
+
+/*  eSwitch management functions */
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+                               struct qlcnic_esw_func_cfg *);
+
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+                               struct qlcnic_esw_func_cfg *);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+                                       struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+                                       struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
+int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
+
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+                           struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+                                     struct qlcnic_esw_func_cfg *);
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter  *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+                               struct qlcnic_esw_func_cfg *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+                            u16 *);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_board_info {
+       unsigned short  vendor;
+       unsigned short  device;
+       unsigned short  sub_vendor;
+       unsigned short  sub_device;
+       char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (likely(tx_ring->producer < tx_ring->sw_consumer))
+               return tx_ring->sw_consumer - tx_ring->producer;
+       else
+               return tx_ring->sw_consumer + tx_ring->num_desc -
+                               tx_ring->producer;
+}
+
+struct qlcnic_nic_template {
+       int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+       int (*config_led) (struct qlcnic_adapter *, u32, u32);
+       int (*start_firmware) (struct qlcnic_adapter *);
+       int (*init_driver) (struct qlcnic_adapter *);
+       void (*request_reset) (struct qlcnic_adapter *, u32);
+       void (*cancel_idc_work) (struct qlcnic_adapter *);
+       int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+       void (*napi_del)(struct qlcnic_adapter *);
+       void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+       irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+       int (*shutdown)(struct pci_dev *);
+       int (*resume)(struct qlcnic_adapter *);
+};
+
+struct qlcnic_mbx_ops {
+       int (*enqueue_cmd) (struct qlcnic_adapter *,
+                           struct qlcnic_cmd_args *, unsigned long *);
+       void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+       void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+       void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+       int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
+       int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+       void (*get_ocm_win) (struct qlcnic_hardware_context *);
+       int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+       int (*setup_intr) (struct qlcnic_adapter *);
+       int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+                             struct qlcnic_adapter *, u32);
+       int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*get_func_no) (struct qlcnic_adapter *);
+       int (*api_lock) (struct qlcnic_adapter *);
+       void (*api_unlock) (struct qlcnic_adapter *);
+       void (*add_sysfs) (struct qlcnic_adapter *);
+       void (*remove_sysfs) (struct qlcnic_adapter *);
+       void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+       int (*create_rx_ctx) (struct qlcnic_adapter *);
+       int (*create_tx_ctx) (struct qlcnic_adapter *,
+       struct qlcnic_host_tx_ring *, int);
+       void (*del_rx_ctx) (struct qlcnic_adapter *);
+       void (*del_tx_ctx) (struct qlcnic_adapter *,
+                           struct qlcnic_host_tx_ring *);
+       int (*setup_link_event) (struct qlcnic_adapter *, int);
+       int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+       int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+       int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+       int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
+       void (*napi_enable) (struct qlcnic_adapter *);
+       void (*napi_disable) (struct qlcnic_adapter *);
+       int (*config_intr_coal) (struct qlcnic_adapter *,
+                                struct ethtool_coalesce *);
+       int (*config_rss) (struct qlcnic_adapter *, int);
+       int (*config_hw_lro) (struct qlcnic_adapter *, int);
+       int (*config_loopback) (struct qlcnic_adapter *, u8);
+       int (*clear_loopback) (struct qlcnic_adapter *, u8);
+       int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       int (*get_board_info) (struct qlcnic_adapter *);
+       void (*set_mac_filter_count) (struct qlcnic_adapter *);
+       void (*free_mac_list) (struct qlcnic_adapter *);
+       int (*read_phys_port_id) (struct qlcnic_adapter *);
+       pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+                                              pci_channel_state_t);
+       pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+       void (*io_resume) (struct pci_dev *);
+       void (*get_beacon_state)(struct qlcnic_adapter *);
+       void (*enable_sds_intr) (struct qlcnic_adapter *,
+                                struct qlcnic_host_sds_ring *);
+       void (*disable_sds_intr) (struct qlcnic_adapter *,
+                                 struct qlcnic_host_sds_ring *);
+       void (*enable_tx_intr) (struct qlcnic_adapter *,
+                               struct qlcnic_host_tx_ring *);
+       void (*disable_tx_intr) (struct qlcnic_adapter *,
+                                struct qlcnic_host_tx_ring *);
+       u32 (*get_saved_state)(void *, u32);
+       void (*set_saved_state)(void *, u32, u32);
+       void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+       u32 (*get_cap_size)(void *, int);
+       void (*set_sys_info)(void *, int, u32);
+       void (*store_cap_mask)(void *, u32);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->extra_capability[0] &
+              QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->extra_capability[0] &
+              QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+       return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+                                  loff_t offset, size_t size)
+{
+       adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+                                   loff_t offset, size_t size)
+{
+       adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+                                       ulong off, u32 data)
+{
+       return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+                                        u8 *mac, u8 function)
+{
+       return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->setup_intr(adapter);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+                                       struct qlcnic_adapter *adapter, u32 arg)
+{
+       return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_cmd_args *cmd)
+{
+       if (adapter->ahw->hw_ops->mbx_cmd)
+               return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+       return -EIO;
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->hw_ops->add_sysfs)
+               adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->hw_ops->remove_sysfs)
+               adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+       sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+                                             struct qlcnic_host_tx_ring *ptr,
+                                             int ring)
+{
+       return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+                                           struct qlcnic_host_tx_ring *ptr)
+{
+       return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+                                          int enable)
+{
+       return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_info *info, u8 id)
+{
+       return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_pci_info *info)
+{
+       return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_info *info)
+{
+       return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+                                           u8 *addr, u16 id, u8 cmd)
+{
+       return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+                                 struct net_device *netdev)
+{
+       return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+       adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+       return adapter->nic_ops->shutdown(pdev);
+}
+
+static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
+{
+       return adapter->nic_ops->resume(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+                                             struct ethtool_coalesce *ethcoal)
+{
+       return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+       return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+                                      int enable)
+{
+       return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+                                        u32 mode)
+{
+       return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+                                       u64 *addr, u16 id)
+{
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->hw_ops->free_mac_list(adapter);
+}
+
+static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->hw_ops->set_mac_filter_count)
+               adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->hw_ops->read_phys_port_id)
+               adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+                                        void *t_hdr, u32 index)
+{
+       return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+                                         void *t_hdr, u32 index, u32 value)
+{
+       adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+                                               struct qlcnic_fw_dump *fw_dump)
+{
+       adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+                                     void *tmpl_hdr, int index)
+{
+       return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+                                      void *tmpl_hdr, int idx, u32 value)
+{
+       adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+                                        void *tmpl_hdr, u32 mask)
+{
+       adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+                                           u32 key)
+{
+       if (adapter->nic_ops->request_reset)
+               adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+       if (adapter->nic_ops->cancel_idc_work)
+               adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+       return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+                                   u32 rate)
+{
+       return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+                                       __be32 ip, int cmd)
+{
+       adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+       return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+                          struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test)
+               writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test)
+               writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+                          struct qlcnic_host_tx_ring *tx_ring)
+{
+       writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_tx_ring *tx_ring)
+{
+       writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_sds_ring *sds_ring)
+{
+       writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+                            struct qlcnic_host_sds_ring *sds_ring)
+{
+       writel(1, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+       test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+       adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+                            struct qlcnic_host_sds_ring *sds_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED))
+               writel(0x1, sds_ring->crb_intr_mask);
+       else
+               writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_host_sds_ring *sds_ring)
+{
+       if (adapter->ahw->hw_ops->enable_sds_intr)
+               adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+                       struct qlcnic_host_sds_ring *sds_ring)
+{
+       if (adapter->ahw->hw_ops->disable_sds_intr)
+               adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (adapter->ahw->hw_ops->enable_tx_intr)
+               adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (adapter->ahw->hw_ops->disable_tx_intr)
+               adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_sds_ring *sds_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED))
+               writel(0, sds_ring->crb_intr_mask);
+       else
+               writel(0x1, sds_ring->crb_intr_mask);
+
+       if (!QLCNIC_IS_MSI_FAMILY(adapter))
+               writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
+{
+       return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
+{
+       clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
+{
+       return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
+
+#define QLCDB(adapter, lvl, _fmt, _args...) do {       \
+       if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
+               printk(KERN_INFO "%s: %s: " _fmt,       \
+                        dev_name(&adapter->pdev->dev), \
+                       __func__, ##_args);             \
+       } while (0)
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X           0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X           0x8030
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X        0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830           0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30                0x8C30
+#define PCI_DEVICE_ID_QLOGIC_QLE844X           0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X        0x8440
+
+static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+       return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
+}
+
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+
+       return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+               (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+       bool status;
+
+       status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+
+       return status;
+}
+
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+       return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
+}
+
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+       bool status;
+
+       status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+
+       return status;
+}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+
+       return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+       unsigned short device = adapter->pdev->device;
+
+       return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+               (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+       bool status;
+
+       status = (qlcnic_sriov_pf_check(adapter) ||
+                 qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+       return status;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+       if (qlcnic_84xx_check(adapter))
+               return QLC_84XX_VNIC_COUNT;
+       else
+               return QLC_DEFAULT_VNIC_COUNT;
+}
+
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+       u32 *tmp = buffer;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               *tmp = swab32(*tmp);
+               tmp++;
+       }
+#endif
+}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+#endif
+#endif                         /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644 (file)
index 0000000..bdbcd2b
--- /dev/null
@@ -0,0 +1,4195 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+                                     struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+                                                     pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
+#define RSS_HASHTYPE_IP_TCP            0x3
+#define QLC_83XX_FW_MBX_CMD            0
+#define QLC_SKIP_INACTIVE_PCI_REGS     7
+#define QLC_MAX_LEGACY_FUNC_SUPP       8
+
+/* 83xx Module type */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM      0x1 /* 10GBase-LRM */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LR       0x2 /* 10GBase-LR */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_SR       0x3 /* 10GBase-SR */
+#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP     0x4 /* 10GE passive
+                                                    * copper(compliant)
+                                                    */
+#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP      0x5 /* 10GE active limiting
+                                                    * copper(compliant)
+                                                    */
+#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP      0x6 /* 10GE passive copper
+                                                    * (legacy, best effort)
+                                                    */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_SX      0x7 /* 1000Base-SX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_LX      0x8 /* 1000Base-LX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_CX      0x9 /* 1000Base-CX */
+#define QLC_83XX_MODULE_TP_1000BASE_T          0xa /* 1000Base-T*/
+#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP      0xb /* 1GE passive copper
+                                                    * (legacy, best effort)
+                                                    */
+#define QLC_83XX_MODULE_UNKNOWN                        0xf /* Unknown module type */
+
+/* Port types */
+#define QLC_83XX_10_CAPABLE     BIT_8
+#define QLC_83XX_100_CAPABLE    BIT_9
+#define QLC_83XX_1G_CAPABLE     BIT_10
+#define QLC_83XX_10G_CAPABLE    BIT_11
+#define QLC_83XX_AUTONEG_ENABLE         BIT_15
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+       {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+       {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+       {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+       {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+       {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+       {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+       {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+       {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+       {QLCNIC_CMD_SET_MTU, 3, 1},
+       {QLCNIC_CMD_READ_PHY, 4, 2},
+       {QLCNIC_CMD_WRITE_PHY, 5, 1},
+       {QLCNIC_CMD_READ_HW_REG, 4, 1},
+       {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+       {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+       {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+       {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+       {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+       {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
+       {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+       {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+       {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+       {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+       {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+       {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+       {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+       {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+       {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+       {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+       {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+       {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+       {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+       {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+       {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+       {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+       {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+       {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+       {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+       {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+       {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+       {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+       {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+       {QLCNIC_CMD_IDC_ACK, 5, 1},
+       {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
+       {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+       {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+       {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+       {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
+       {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+       {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+       {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+       {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+       {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+       {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+       {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
+};
+
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
+       0x38CC,         /* Global Reset */
+       0x38F0,         /* Wildcard */
+       0x38FC,         /* Informant */
+       0x3038,         /* Host MBX ctrl */
+       0x303C,         /* FW MBX ctrl */
+       0x355C,         /* BOOT LOADER ADDRESS REG */
+       0x3560,         /* BOOT LOADER SIZE REG */
+       0x3564,         /* FW IMAGE ADDR REG */
+       0x1000,         /* MBX intr enable */
+       0x1200,         /* Default Intr mask */
+       0x1204,         /* Default Interrupt ID */
+       0x3780,         /* QLC_83XX_IDC_MAJ_VERSION */
+       0x3784,         /* QLC_83XX_IDC_DEV_STATE */
+       0x3788,         /* QLC_83XX_IDC_DRV_PRESENCE */
+       0x378C,         /* QLC_83XX_IDC_DRV_ACK */
+       0x3790,         /* QLC_83XX_IDC_CTRL */
+       0x3794,         /* QLC_83XX_IDC_DRV_AUDIT */
+       0x3798,         /* QLC_83XX_IDC_MIN_VERSION */
+       0x379C,         /* QLC_83XX_RECOVER_DRV_LOCK */
+       0x37A0,         /* QLC_83XX_IDC_PF_0 */
+       0x37A4,         /* QLC_83XX_IDC_PF_1 */
+       0x37A8,         /* QLC_83XX_IDC_PF_2 */
+       0x37AC,         /* QLC_83XX_IDC_PF_3 */
+       0x37B0,         /* QLC_83XX_IDC_PF_4 */
+       0x37B4,         /* QLC_83XX_IDC_PF_5 */
+       0x37B8,         /* QLC_83XX_IDC_PF_6 */
+       0x37BC,         /* QLC_83XX_IDC_PF_7 */
+       0x37C0,         /* QLC_83XX_IDC_PF_8 */
+       0x37C4,         /* QLC_83XX_IDC_PF_9 */
+       0x37C8,         /* QLC_83XX_IDC_PF_10 */
+       0x37CC,         /* QLC_83XX_IDC_PF_11 */
+       0x37D0,         /* QLC_83XX_IDC_PF_12 */
+       0x37D4,         /* QLC_83XX_IDC_PF_13 */
+       0x37D8,         /* QLC_83XX_IDC_PF_14 */
+       0x37DC,         /* QLC_83XX_IDC_PF_15 */
+       0x37E0,         /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+       0x37E4,         /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+       0x37F0,         /* QLC_83XX_DRV_OP_MODE */
+       0x37F4,         /* QLC_83XX_VNIC_STATE */
+       0x3868,         /* QLC_83XX_DRV_LOCK */
+       0x386C,         /* QLC_83XX_DRV_UNLOCK */
+       0x3504,         /* QLC_83XX_DRV_LOCK_ID */
+       0x34A4,         /* QLC_83XX_ASIC_TEMP */
+};
+
+const u32 qlcnic_83xx_reg_tbl[] = {
+       0x34A8,         /* PEG_HALT_STAT1 */
+       0x34AC,         /* PEG_HALT_STAT2 */
+       0x34B0,         /* FW_HEARTBEAT */
+       0x3500,         /* FLASH LOCK_ID */
+       0x3528,         /* FW_CAPABILITIES */
+       0x3538,         /* Driver active, DRV_REG0 */
+       0x3540,         /* Device state, DRV_REG1 */
+       0x3544,         /* Driver state, DRV_REG2 */
+       0x3548,         /* Driver scratch, DRV_REG3 */
+       0x354C,         /* Device partiton info, DRV_REG4 */
+       0x3524,         /* Driver IDC ver, DRV_REG5 */
+       0x3550,         /* FW_VER_MAJOR */
+       0x3554,         /* FW_VER_MINOR */
+       0x3558,         /* FW_VER_SUB */
+       0x359C,         /* NPAR STATE */
+       0x35FC,         /* FW_IMG_VALID */
+       0x3650,         /* CMD_PEG_STATE */
+       0x373C,         /* RCV_PEG_STATE */
+       0x37B4,         /* ASIC TEMP */
+       0x356C,         /* FW API */
+       0x3570,         /* DRV OP MODE */
+       0x3850,         /* FLASH LOCK */
+       0x3854,         /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+       .read_crb                       = qlcnic_83xx_read_crb,
+       .write_crb                      = qlcnic_83xx_write_crb,
+       .read_reg                       = qlcnic_83xx_rd_reg_indirect,
+       .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
+       .get_mac_address                = qlcnic_83xx_get_mac_address,
+       .setup_intr                     = qlcnic_83xx_setup_intr,
+       .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
+       .mbx_cmd                        = qlcnic_83xx_issue_cmd,
+       .get_func_no                    = qlcnic_83xx_get_func_no,
+       .api_lock                       = qlcnic_83xx_cam_lock,
+       .api_unlock                     = qlcnic_83xx_cam_unlock,
+       .add_sysfs                      = qlcnic_83xx_add_sysfs,
+       .remove_sysfs                   = qlcnic_83xx_remove_sysfs,
+       .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
+       .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
+       .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
+       .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
+       .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
+       .setup_link_event               = qlcnic_83xx_setup_link_event,
+       .get_nic_info                   = qlcnic_83xx_get_nic_info,
+       .get_pci_info                   = qlcnic_83xx_get_pci_info,
+       .set_nic_info                   = qlcnic_83xx_set_nic_info,
+       .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
+       .napi_enable                    = qlcnic_83xx_napi_enable,
+       .napi_disable                   = qlcnic_83xx_napi_disable,
+       .config_intr_coal               = qlcnic_83xx_config_intr_coal,
+       .config_rss                     = qlcnic_83xx_config_rss,
+       .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
+       .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
+       .change_l2_filter               = qlcnic_83xx_change_l2_filter,
+       .get_board_info                 = qlcnic_83xx_get_port_info,
+       .set_mac_filter_count           = qlcnic_83xx_set_mac_filter_count,
+       .free_mac_list                  = qlcnic_82xx_free_mac_list,
+       .io_error_detected              = qlcnic_83xx_io_error_detected,
+       .io_slot_reset                  = qlcnic_83xx_io_slot_reset,
+       .io_resume                      = qlcnic_83xx_io_resume,
+       .get_beacon_state               = qlcnic_83xx_get_beacon_state,
+       .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
+       .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
+       .enable_tx_intr                 = qlcnic_83xx_enable_tx_intr,
+       .disable_tx_intr                = qlcnic_83xx_disable_tx_intr,
+       .get_saved_state                = qlcnic_83xx_get_saved_state,
+       .set_saved_state                = qlcnic_83xx_set_saved_state,
+       .cache_tmpl_hdr_values          = qlcnic_83xx_cache_tmpl_hdr_values,
+       .get_cap_size                   = qlcnic_83xx_get_cap_size,
+       .set_sys_info                   = qlcnic_83xx_set_sys_info,
+       .store_cap_mask                 = qlcnic_83xx_store_cap_mask,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+       .config_bridged_mode    = qlcnic_config_bridged_mode,
+       .config_led             = qlcnic_config_led,
+       .request_reset          = qlcnic_83xx_idc_request_reset,
+       .cancel_idc_work        = qlcnic_83xx_idc_exit,
+       .napi_add               = qlcnic_83xx_napi_add,
+       .napi_del               = qlcnic_83xx_napi_del,
+       .config_ipaddr          = qlcnic_83xx_config_ipaddr,
+       .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
+       .shutdown               = qlcnic_83xx_shutdown,
+       .resume                 = qlcnic_83xx_resume,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+       ahw->hw_ops             = &qlcnic_83xx_hw_ops;
+       ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
+       ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+       u32 fw_major, fw_minor, fw_build;
+       struct pci_dev *pdev = adapter->pdev;
+
+       fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+       fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+       fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+       adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+       dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+                QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+       return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+       void __iomem *base;
+       u32 val;
+
+       base = adapter->ahw->pci_base0 +
+              QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+       writel(addr, base);
+       val = readl(base);
+       if (val != addr)
+               return -EIO;
+
+       return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+                               int *err)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       *err = __qlcnic_set_win_base(adapter, (u32) addr);
+       if (!*err) {
+               return QLCRDX(ahw, QLCNIC_WILDCARD);
+       } else {
+               dev_err(&adapter->pdev->dev,
+                       "%s failed, addr = 0x%lx\n", __func__, addr);
+               return -EIO;
+       }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+                                u32 data)
+{
+       int err;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       err = __qlcnic_set_win_base(adapter, (u32) addr);
+       if (!err) {
+               QLCWRX(ahw, QLCNIC_WILDCARD, data);
+               return 0;
+       } else {
+               dev_err(&adapter->pdev->dev,
+                       "%s failed, addr = 0x%x data = 0x%x\n",
+                       __func__, (int)addr, data);
+               return err;
+       }
+}
+
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       /* MSI-X enablement failed, use legacy interrupt */
+       adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+       adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+       adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+       adapter->msix_entries[0].vector = adapter->pdev->irq;
+       dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+       int num_msix;
+
+       num_msix = adapter->drv_sds_rings;
+
+       /* account for AEN interrupt MSI-X based interrupts */
+       num_msix += 1;
+
+       if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+               num_msix += adapter->drv_tx_rings;
+
+       return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err, i, num_msix;
+
+       if (adapter->flags & QLCNIC_TSS_RSS) {
+               err = qlcnic_setup_tss_rss_intr(adapter);
+               if (err < 0)
+                       return err;
+               num_msix = ahw->num_msix;
+       } else {
+               num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+               err = qlcnic_enable_msix(adapter, num_msix);
+               if (err == -ENOMEM)
+                       return err;
+
+               if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+                       num_msix = ahw->num_msix;
+               } else {
+                       if (qlcnic_sriov_vf_check(adapter))
+                               return -EINVAL;
+                       num_msix = 1;
+                       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+                       adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+               }
+       }
+
+       /* setup interrupt mapping table for fw */
+       ahw->intr_tbl = vzalloc(num_msix *
+                               sizeof(struct qlcnic_intrpt_config));
+       if (!ahw->intr_tbl)
+               return -ENOMEM;
+
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+                       dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+                               ahw->pci_func);
+                       return -EOPNOTSUPP;
+               }
+
+               qlcnic_83xx_enable_legacy(adapter);
+       }
+
+       for (i = 0; i < num_msix; i++) {
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+               else
+                       ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+               ahw->intr_tbl[i].id = i;
+               ahw->intr_tbl[i].src = 0;
+       }
+
+       return 0;
+}
+
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+       writel(0, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+       if (adapter->tgt_mask_reg)
+               writel(1, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+                                                   *adapter)
+{
+       u32 mask;
+
+       /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+        * source register. We could be here before contexts are created
+        * and sds_ring->crb_intr_mask has not been initialized, calculate
+        * BAR offset for Interrupt Source Register
+        */
+       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+       writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+       u32 mask;
+
+       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+       writel(1, adapter->ahw->pci_base0 + mask);
+       QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+                                    struct qlcnic_cmd_args *cmd)
+{
+       int i;
+
+       if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+               return;
+
+       for (i = 0; i < cmd->rsp.num; i++)
+               cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+       u32 intr_val;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int retries = 0;
+
+       intr_val = readl(adapter->tgt_status_reg);
+
+       if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+               return IRQ_NONE;
+
+       if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+               adapter->stats.spurious_intr++;
+               return IRQ_NONE;
+       }
+       /* The barrier is required to ensure writes to the registers */
+       wmb();
+
+       /* clear the interrupt trigger control register */
+       writel(0, adapter->isr_int_vec);
+       intr_val = readl(adapter->isr_int_vec);
+       do {
+               intr_val = readl(adapter->tgt_status_reg);
+               if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+                       break;
+               retries++;
+       } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+                (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+       return IRQ_HANDLED;
+}
+
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+       mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+       complete(&mbx->completion);
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+       u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mbx->aen_lock, flags);
+       resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+       if (!(resp & QLCNIC_SET_OWNER))
+               goto out;
+
+       event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+       if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+               __qlcnic_83xx_process_aen(adapter);
+       } else {
+               if (mbx->rsp_status != rsp_status)
+                       qlcnic_83xx_notify_mbx_response(mbx);
+       }
+out:
+       qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+       struct qlcnic_adapter *adapter = data;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
+
+       qlcnic_83xx_poll_process_aen(adapter);
+
+       if (ahw->diag_test) {
+               if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+                       ahw->diag_cnt++;
+               qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+               return IRQ_HANDLED;
+       }
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+       } else {
+               sds_ring = &adapter->recv_ctx->sds_rings[0];
+               napi_schedule(&sds_ring->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               goto done;
+
+       if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
+
+done:
+       adapter->ahw->diag_cnt++;
+       qlcnic_enable_sds_intr(adapter, sds_ring);
+
+       return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+       u32 num_msix;
+
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+               qlcnic_83xx_set_legacy_intr_mask(adapter);
+
+       qlcnic_83xx_disable_mbx_intr(adapter);
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               num_msix = adapter->ahw->num_msix - 1;
+       else
+               num_msix = 0;
+
+       msleep(20);
+
+       if (adapter->msix_entries) {
+               synchronize_irq(adapter->msix_entries[num_msix].vector);
+               free_irq(adapter->msix_entries[num_msix].vector, adapter);
+       }
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+       irq_handler_t handler;
+       u32 val;
+       int err = 0;
+       unsigned long flags = 0;
+
+       if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+           !(adapter->flags & QLCNIC_MSIX_ENABLED))
+               flags |= IRQF_SHARED;
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+               handler = qlcnic_83xx_handle_aen;
+               val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+               err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "failed to register MBX interrupt\n");
+                       return err;
+               }
+       } else {
+               handler = qlcnic_83xx_intr;
+               val = adapter->msix_entries[0].vector;
+               err = request_irq(val, handler, flags, "qlcnic", adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "failed to register INTx interrupt\n");
+                       return err;
+               }
+               qlcnic_83xx_clear_legacy_intr_mask(adapter);
+       }
+
+       /* Enable mailbox interrupt */
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+       return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+       u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+       adapter->ahw->pci_func = (val >> 24) & 0xff;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+       void __iomem *addr;
+       u32 val, limit = 0;
+
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+       do {
+               val = readl(addr);
+               if (val) {
+                       /* write the function number to register */
+                       QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+                                           ahw->pci_func);
+                       return 0;
+               }
+               usleep_range(1000, 2000);
+       } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+       return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+       void __iomem *addr;
+       u32 val;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+       val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+                         loff_t offset, size_t size)
+{
+       int ret = 0;
+       u32 data;
+
+       if (qlcnic_api_lock(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed to acquire lock. addr offset 0x%x\n",
+                       __func__, (u32)offset);
+               return;
+       }
+
+       data = QLCRD32(adapter, (u32) offset, &ret);
+       qlcnic_api_unlock(adapter);
+
+       if (ret == -EIO) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed. addr offset 0x%x\n",
+                       __func__, (u32)offset);
+               return;
+       }
+       memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+                          loff_t offset, size_t size)
+{
+       u32 data;
+
+       memcpy(&data, buf, size);
+       qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int status;
+
+       status = qlcnic_83xx_get_port_config(adapter);
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "Get Port Info failed\n");
+       } else {
+
+               if (ahw->port_config & QLC_83XX_10G_CAPABLE) {
+                       ahw->port_type = QLCNIC_XGBE;
+               } else if (ahw->port_config & QLC_83XX_10_CAPABLE ||
+                          ahw->port_config & QLC_83XX_100_CAPABLE ||
+                          ahw->port_config & QLC_83XX_1G_CAPABLE) {
+                       ahw->port_type = QLCNIC_GBE;
+               } else {
+                       ahw->port_type = QLCNIC_XGBE;
+               }
+
+               if (QLC_83XX_AUTONEG(ahw->port_config))
+                       ahw->link_autoneg = AUTONEG_ENABLE;
+
+       }
+       return status;
+}
+
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u16 act_pci_fn = ahw->total_nic_func;
+       u16 count;
+
+       ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
+       if (act_pci_fn <= 2)
+               count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
+                        act_pci_fn;
+       else
+               count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
+                        act_pci_fn;
+       ahw->max_uc_count = count;
+}
+
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+       else
+               val = BIT_2;
+
+       QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+       qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+                         const struct pci_device_id *ent)
+{
+       u32 op_mode, priv_level;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       ahw->fw_hal_version = 2;
+       qlcnic_get_func_no(adapter);
+
+       if (qlcnic_sriov_vf_check(adapter)) {
+               qlcnic_sriov_vf_set_ops(adapter);
+               return;
+       }
+
+       /* Determine function privilege level */
+       op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+       if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+               priv_level = QLCNIC_MGMT_FUNC;
+       else
+               priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+                                                        ahw->pci_func);
+
+       if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+               ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+               dev_info(&adapter->pdev->dev,
+                        "HAL Version: %d Non Privileged function\n",
+                        ahw->fw_hal_version);
+               adapter->nic_ops = &qlcnic_vf_ops;
+       } else {
+               if (pci_find_ext_capability(adapter->pdev,
+                                           PCI_EXT_CAP_ID_SRIOV))
+                       set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
+               adapter->nic_ops = &qlcnic_83xx_ops;
+       }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+                                       u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+                                           u32 data[]);
+
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+                    struct qlcnic_cmd_args *cmd)
+{
+       int i;
+
+       if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+               return;
+
+       dev_info(&adapter->pdev->dev,
+                "Host MBX regs(%d)\n", cmd->req.num);
+       for (i = 0; i < cmd->req.num; i++) {
+               if (i && !(i % 8))
+                       pr_info("\n");
+               pr_info("%08x ", cmd->req.arg[i]);
+       }
+       pr_info("\n");
+       dev_info(&adapter->pdev->dev,
+                "FW MBX regs(%d)\n", cmd->rsp.num);
+       for (i = 0; i < cmd->rsp.num; i++) {
+               if (i && !(i % 8))
+                       pr_info("\n");
+               pr_info("%08x ", cmd->rsp.arg[i]);
+       }
+       pr_info("\n");
+}
+
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+                                               struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int opcode = LSW(cmd->req.arg[0]);
+       unsigned long max_loops;
+
+       max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
+
+       for (; max_loops; max_loops--) {
+               if (atomic_read(&cmd->rsp_status) ==
+                   QLC_83XX_MBX_RESPONSE_ARRIVED)
+                       return;
+
+               udelay(1);
+       }
+
+       dev_err(&adapter->pdev->dev,
+               "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+               __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+       flush_workqueue(ahw->mailbox->work_q);
+       return;
+}
+
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int cmd_type, err, opcode;
+       unsigned long timeout;
+
+       if (!mbx)
+               return -EIO;
+
+       opcode = LSW(cmd->req.arg[0]);
+       cmd_type = cmd->type;
+       err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, opcode, cmd->type, ahw->pci_func,
+                       ahw->op_mode);
+               return err;
+       }
+
+       switch (cmd_type) {
+       case QLC_83XX_MBX_CMD_WAIT:
+               if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                               __func__, opcode, cmd_type, ahw->pci_func,
+                               ahw->op_mode);
+                       flush_workqueue(mbx->work_q);
+               }
+               break;
+       case QLC_83XX_MBX_CMD_NO_WAIT:
+               return 0;
+       case QLC_83XX_MBX_CMD_BUSY_WAIT:
+               qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+               break;
+       default:
+               dev_err(&adapter->pdev->dev,
+                       "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, opcode, cmd_type, ahw->pci_func,
+                       ahw->op_mode);
+               qlcnic_83xx_detach_mailbox_work(adapter);
+       }
+
+       return cmd->rsp_opcode;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+                              struct qlcnic_adapter *adapter, u32 type)
+{
+       int i, size;
+       u32 temp;
+       const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+       memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
+       mbx_tbl = qlcnic_83xx_mbx_tbl;
+       size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+       for (i = 0; i < size; i++) {
+               if (type == mbx_tbl[i].cmd) {
+                       mbx->op_type = QLC_83XX_FW_MBX_CMD;
+                       mbx->req.num = mbx_tbl[i].in_args;
+                       mbx->rsp.num = mbx_tbl[i].out_args;
+                       mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+                                              GFP_ATOMIC);
+                       if (!mbx->req.arg)
+                               return -ENOMEM;
+                       mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+                                              GFP_ATOMIC);
+                       if (!mbx->rsp.arg) {
+                               kfree(mbx->req.arg);
+                               mbx->req.arg = NULL;
+                               return -ENOMEM;
+                       }
+                       temp = adapter->ahw->fw_hal_version << 29;
+                       mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+                       mbx->cmd_op = type;
+                       return 0;
+               }
+       }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+               __func__, type);
+       return -EINVAL;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_cmd_args cmd;
+       int i, err = 0;
+
+       adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+       if (err)
+               return;
+
+       for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+               cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_info(&adapter->pdev->dev,
+                        "%s: Mailbox IDC ACK failed.\n", __func__);
+       qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+                                           u32 data[])
+{
+       dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+               QLCNIC_MBX_RSP(data[0]));
+       clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+       return;
+}
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 event[QLC_83XX_MBX_AEN_CNT];
+       int i;
+
+       for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+               event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+       switch (QLCNIC_MBX_RSP(event[0])) {
+
+       case QLCNIC_MBX_LINK_EVENT:
+               qlcnic_83xx_handle_link_aen(adapter, event);
+               break;
+       case QLCNIC_MBX_COMP_EVENT:
+               qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+               break;
+       case QLCNIC_MBX_REQUEST_EVENT:
+               for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+                       adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+               queue_delayed_work(adapter->qlcnic_wq,
+                                  &adapter->idc_aen_work, 0);
+               break;
+       case QLCNIC_MBX_TIME_EXTEND_EVENT:
+               ahw->extend_lb_time = event[1] >> 8 & 0xf;
+               break;
+       case QLCNIC_MBX_BC_EVENT:
+               qlcnic_sriov_handle_bc_event(adapter, event[1]);
+               break;
+       case QLCNIC_MBX_SFP_INSERT_EVENT:
+               dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+                        QLCNIC_MBX_RSP(event[0]));
+               break;
+       case QLCNIC_MBX_SFP_REMOVE_EVENT:
+               dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+                        QLCNIC_MBX_RSP(event[0]));
+               break;
+       case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
+               break;
+       default:
+               dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+                       QLCNIC_MBX_RSP(event[0]));
+               break;
+       }
+
+       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+       u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mbx->aen_lock, flags);
+       resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+       if (resp & QLCNIC_SET_OWNER) {
+               event = readl(QLCNIC_MBX_FW(ahw, 0));
+               if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+                       __qlcnic_83xx_process_aen(adapter);
+               } else {
+                       if (mbx->rsp_status != rsp_status)
+                               qlcnic_83xx_notify_mbx_response(mbx);
+               }
+       }
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter;
+
+       adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
+
+       if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+               return;
+
+       qlcnic_83xx_process_aen(adapter);
+       queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
+                          (HZ / 10));
+}
+
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+       if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+               return;
+
+       INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+       queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
+}
+
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+       if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+               return;
+       cancel_delayed_work_sync(&adapter->mbx_poll_work);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+       int index, i, err, sds_mbx_size;
+       u32 *buf, intrpt_id, intr_mask;
+       u16 context_id;
+       u8 num_sds;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_host_sds_ring *sds;
+       struct qlcnic_sds_mbx sds_mbx;
+       struct qlcnic_add_rings_mbx_out *mbx_out;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+       context_id = recv_ctx->context_id;
+       num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
+       ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_ADD_RCV_RINGS);
+       cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+       /* set up status rings, mbx 2-81 */
+       index = 2;
+       for (i = 8; i < adapter->drv_sds_rings; i++) {
+               memset(&sds_mbx, 0, sds_mbx_size);
+               sds = &recv_ctx->sds_rings[i];
+               sds->consumer = 0;
+               memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+               sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+               sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+               sds_mbx.sds_ring_size = sds->num_desc;
+
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       intrpt_id = ahw->intr_tbl[i].id;
+               else
+                       intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+               if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+                       sds_mbx.intrpt_id = intrpt_id;
+               else
+                       sds_mbx.intrpt_id = 0xffff;
+               sds_mbx.intrpt_val = 0;
+               buf = &cmd.req.arg[index];
+               memcpy(buf, &sds_mbx, sds_mbx_size);
+               index += sds_mbx_size / sizeof(u32);
+       }
+
+       /* send the mailbox command */
+       err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to add rings %d\n", err);
+               goto out;
+       }
+
+       mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+       index = 0;
+       /* status descriptor ring */
+       for (i = 8; i < adapter->drv_sds_rings; i++) {
+               sds = &recv_ctx->sds_rings[i];
+               sds->crb_sts_consumer = ahw->pci_base0 +
+                                       mbx_out->host_csmr[index];
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       intr_mask = ahw->intr_tbl[i].src;
+               else
+                       intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+               sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+               index++;
+       }
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       int err;
+       u32 temp = 0;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+               return;
+
+       if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+               cmd.req.arg[0] |= (0x3 << 29);
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+       cmd.req.arg[1] = recv_ctx->context_id | temp;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to destroy rx ctx in firmware\n");
+
+       recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+       qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       int i, err, index, sds_mbx_size, rds_mbx_size;
+       u8 num_sds, num_rds;
+       u32 *buf, intrpt_id, intr_mask, cap = 0;
+       struct qlcnic_host_sds_ring *sds;
+       struct qlcnic_host_rds_ring *rds;
+       struct qlcnic_sds_mbx sds_mbx;
+       struct qlcnic_rds_mbx rds_mbx;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_rcv_mbx_out *mbx_out;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       num_rds = adapter->max_rds_rings;
+
+       if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+               num_sds = adapter->drv_sds_rings;
+       else
+               num_sds = QLCNIC_MAX_SDS_RINGS;
+
+       sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+       rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+       cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+               cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+       /* set mailbox hdr and capabilities */
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CREATE_RX_CTX);
+       if (err)
+               return err;
+
+       if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+               cmd.req.arg[0] |= (0x3 << 29);
+
+       cmd.req.arg[1] = cap;
+       cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+                        (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+                                                        &cmd.req.arg[6]);
+       /* set up status rings, mbx 8-57/87 */
+       index = QLC_83XX_HOST_SDS_MBX_IDX;
+       for (i = 0; i < num_sds; i++) {
+               memset(&sds_mbx, 0, sds_mbx_size);
+               sds = &recv_ctx->sds_rings[i];
+               sds->consumer = 0;
+               memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+               sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+               sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+               sds_mbx.sds_ring_size = sds->num_desc;
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       intrpt_id = ahw->intr_tbl[i].id;
+               else
+                       intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+               if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+                       sds_mbx.intrpt_id = intrpt_id;
+               else
+                       sds_mbx.intrpt_id = 0xffff;
+               sds_mbx.intrpt_val = 0;
+               buf = &cmd.req.arg[index];
+               memcpy(buf, &sds_mbx, sds_mbx_size);
+               index += sds_mbx_size / sizeof(u32);
+       }
+       /* set up receive rings, mbx 88-111/135 */
+       index = QLCNIC_HOST_RDS_MBX_IDX;
+       rds = &recv_ctx->rds_rings[0];
+       rds->producer = 0;
+       memset(&rds_mbx, 0, rds_mbx_size);
+       rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+       rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
+       rds_mbx.reg_ring_sz = rds->dma_size;
+       rds_mbx.reg_ring_len = rds->num_desc;
+       /* Jumbo ring */
+       rds = &recv_ctx->rds_rings[1];
+       rds->producer = 0;
+       rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+       rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
+       rds_mbx.jmb_ring_sz = rds->dma_size;
+       rds_mbx.jmb_ring_len = rds->num_desc;
+       buf = &cmd.req.arg[index];
+       memcpy(buf, &rds_mbx, rds_mbx_size);
+
+       /* send the mailbox command */
+       err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to create Rx ctx in firmware%d\n", err);
+               goto out;
+       }
+       mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+       recv_ctx->context_id = mbx_out->ctx_id;
+       recv_ctx->state = mbx_out->state;
+       recv_ctx->virt_port = mbx_out->vport_id;
+       dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+                recv_ctx->context_id, recv_ctx->state);
+       /* Receive descriptor ring */
+       /* Standard ring */
+       rds = &recv_ctx->rds_rings[0];
+       rds->crb_rcv_producer = ahw->pci_base0 +
+                               mbx_out->host_prod[0].reg_buf;
+       /* Jumbo ring */
+       rds = &recv_ctx->rds_rings[1];
+       rds->crb_rcv_producer = ahw->pci_base0 +
+                               mbx_out->host_prod[0].jmb_buf;
+       /* status descriptor ring */
+       for (i = 0; i < num_sds; i++) {
+               sds = &recv_ctx->sds_rings[i];
+               sds->crb_sts_consumer = ahw->pci_base0 +
+                                       mbx_out->host_csmr[i];
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       intr_mask = ahw->intr_tbl[i].src;
+               else
+                       intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+               sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+       }
+
+       if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
+               err = qlcnic_83xx_add_rings(adapter);
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_tx_ring *tx_ring)
+{
+       struct qlcnic_cmd_args cmd;
+       u32 temp = 0;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+               return;
+
+       if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+               cmd.req.arg[0] |= (0x3 << 29);
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+       cmd.req.arg[1] = tx_ring->ctx_id | temp;
+       if (qlcnic_issue_cmd(adapter, &cmd))
+               dev_err(&adapter->pdev->dev,
+                       "Failed to destroy tx ctx in firmware\n");
+       qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+                             struct qlcnic_host_tx_ring *tx, int ring)
+{
+       int err;
+       u16 msix_id;
+       u32 *buf, intr_mask, temp = 0;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_tx_mbx mbx;
+       struct qlcnic_tx_mbx_out *mbx_out;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 msix_vector;
+
+       /* Reset host resources */
+       tx->producer = 0;
+       tx->sw_consumer = 0;
+       *(tx->hw_consumer) = 0;
+
+       memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+       /* setup mailbox inbox registerss */
+       mbx.phys_addr_low = LSD(tx->phys_addr);
+       mbx.phys_addr_high = MSD(tx->phys_addr);
+       mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+       mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
+       mbx.size = tx->num_desc;
+       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+               if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+                       msix_vector = adapter->drv_sds_rings + ring;
+               else
+                       msix_vector = adapter->drv_sds_rings - 1;
+               msix_id = ahw->intr_tbl[msix_vector].id;
+       } else {
+               msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+       }
+
+       if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+               mbx.intr_id = msix_id;
+       else
+               mbx.intr_id = 0xffff;
+       mbx.src = 0;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       if (err)
+               return err;
+
+       if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+               cmd.req.arg[0] |= (0x3 << 29);
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
+       cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+       cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
+       buf = &cmd.req.arg[6];
+       memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+       /* send the mailbox command*/
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               netdev_err(adapter->netdev,
+                          "Failed to create Tx ctx in firmware 0x%x\n", err);
+               goto out;
+       }
+       mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+       tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+       tx->ctx_id = mbx_out->ctx_id;
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
+               tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+       }
+       netdev_info(adapter->netdev,
+                   "Tx Context[0x%x] Created, state:0x%x\n",
+                   tx->ctx_id, mbx_out->state);
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+                                     u8 num_sds_ring)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_ring;
+       u16 adapter_state = adapter->is_up;
+       u8 ring;
+       int ret;
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       qlcnic_detach(adapter);
+
+       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+       adapter->ahw->diag_test = test;
+       adapter->ahw->linkup = 0;
+
+       ret = qlcnic_attach(adapter);
+       if (ret) {
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       ret = qlcnic_fw_create_ctx(adapter);
+       if (ret) {
+               qlcnic_detach(adapter);
+               if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+                       adapter->drv_sds_rings = num_sds_ring;
+                       qlcnic_attach(adapter);
+               }
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+               }
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+               adapter->ahw->loopback_state = 0;
+               adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+       }
+
+       set_bit(__QLCNIC_DEV_UP, &adapter->state);
+       return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+                                     u8 drv_sds_rings)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       int ring;
+
+       clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                               qlcnic_disable_sds_intr(adapter, sds_ring);
+               }
+       }
+
+       qlcnic_fw_destroy_ctx(adapter);
+       qlcnic_detach(adapter);
+
+       adapter->ahw->diag_test = 0;
+       adapter->drv_sds_rings = drv_sds_rings;
+
+       if (qlcnic_attach(adapter))
+               goto out;
+
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+
+out:
+       netif_device_attach(netdev);
+}
+
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
+       u8 beacon_state;
+       int err = 0;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+       if (!err) {
+               err = qlcnic_issue_cmd(adapter, &cmd);
+               if (!err) {
+                       beacon_state = cmd.rsp.arg[4];
+                       if (beacon_state == QLCNIC_BEACON_DISABLE)
+                               ahw->beacon_state = QLC_83XX_BEACON_OFF;
+                       else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+                               ahw->beacon_state = QLC_83XX_BEACON_ON;
+               }
+       } else {
+               netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+                          err);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return;
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+                          u32 beacon)
+{
+       struct qlcnic_cmd_args cmd;
+       u32 mbx_in;
+       int i, status = 0;
+
+       if (state) {
+               /* Get LED configuration */
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_GET_LED_CONFIG);
+               if (status)
+                       return status;
+
+               status = qlcnic_issue_cmd(adapter, &cmd);
+               if (status) {
+                       dev_err(&adapter->pdev->dev,
+                               "Get led config failed.\n");
+                       goto mbx_err;
+               } else {
+                       for (i = 0; i < 4; i++)
+                               adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+               }
+               qlcnic_free_mbx_args(&cmd);
+               /* Set LED Configuration */
+               mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+                         LSW(QLC_83XX_LED_CONFIG);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_SET_LED_CONFIG);
+               if (status)
+                       return status;
+
+               cmd.req.arg[1] = mbx_in;
+               cmd.req.arg[2] = mbx_in;
+               cmd.req.arg[3] = mbx_in;
+               if (beacon)
+                       cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+               status = qlcnic_issue_cmd(adapter, &cmd);
+               if (status) {
+                       dev_err(&adapter->pdev->dev,
+                               "Set led config failed.\n");
+               }
+mbx_err:
+               qlcnic_free_mbx_args(&cmd);
+               return status;
+
+       } else {
+               /* Restoring default LED configuration */
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_SET_LED_CONFIG);
+               if (status)
+                       return status;
+
+               cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+               cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+               cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+               if (beacon)
+                       cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+               status = qlcnic_issue_cmd(adapter, &cmd);
+               if (status)
+                       dev_err(&adapter->pdev->dev,
+                               "Restoring led config failed.\n");
+               qlcnic_free_mbx_args(&cmd);
+               return status;
+       }
+}
+
+int  qlcnic_83xx_set_led(struct net_device *netdev,
+                        enum ethtool_phys_id_state state)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err = -EIO, active = 1;
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               netdev_warn(netdev,
+                           "LED test is not supported in non-privileged mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+                       return -EBUSY;
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+                       break;
+
+               err = qlcnic_83xx_config_led(adapter, active, 0);
+               if (err)
+                       netdev_err(netdev, "Failed to set LED blink state\n");
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               active = 0;
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+                       break;
+
+               err = qlcnic_83xx_config_led(adapter, active, 0);
+               if (err)
+                       netdev_err(netdev, "Failed to reset LED blink state\n");
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (!active || err)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+       return err;
+}
+
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
+{
+       struct qlcnic_cmd_args cmd;
+       int status;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return;
+
+       if (enable)
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_INIT_NIC_FUNC);
+       else
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_STOP_NIC_FUNC);
+
+       if (status)
+               return;
+
+       cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+       if (adapter->dcb)
+               cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
+       status = qlcnic_issue_cmd(adapter, &cmd);
+       if (status)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to %s in NIC IDC function event.\n",
+                       (enable ? "register" : "unregister"));
+
+       qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = adapter->ahw->port_config;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+       if (err)
+               return err;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+       else
+               adapter->ahw->port_config = cmd.rsp.arg[1];
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+       int err;
+       u32 temp;
+       struct qlcnic_cmd_args cmd;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+       if (err)
+               return err;
+
+       temp = adapter->recv_ctx->context_id << 16;
+       cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_info(&adapter->pdev->dev,
+                        "Setup linkevent mailbox failed\n");
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+                                                u32 *interface_id)
+{
+       if (qlcnic_sriov_pf_check(adapter)) {
+               qlcnic_alloc_lb_filters_mem(adapter);
+               qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+               adapter->rx_mac_learn = true;
+       } else {
+               if (!qlcnic_sriov_vf_check(adapter))
+                       *interface_id = adapter->recv_ctx->context_id << 16;
+       }
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+       struct qlcnic_cmd_args *cmd = NULL;
+       u32 temp = 0;
+       int err;
+
+       if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+               return -EIO;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+       if (err)
+               goto out;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+       qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
+
+       if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+       cmd->req.arg[1] = mode | temp;
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (!err)
+               return err;
+
+       qlcnic_free_mbx_args(cmd);
+
+out:
+       kfree(cmd);
+       return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u8 drv_sds_rings = adapter->drv_sds_rings;
+       u8 drv_tx_rings = adapter->drv_tx_rings;
+       int ret = 0, loop = 0;
+
+       if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               netdev_warn(netdev,
+                           "Loopback test not supported in non privileged mode\n");
+               return -ENOTSUPP;
+       }
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               netdev_info(netdev, "Device is resetting\n");
+               return -EBUSY;
+       }
+
+       if (qlcnic_get_diag_lock(adapter)) {
+               netdev_info(netdev, "Device is in diagnostics mode\n");
+               return -EBUSY;
+       }
+
+       netdev_info(netdev, "%s loopback test in progress\n",
+                   mode == QLCNIC_ILB_MODE ? "internal" : "external");
+
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+                                        drv_sds_rings);
+       if (ret)
+               goto fail_diag_alloc;
+
+       ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+       if (ret)
+               goto free_diag_res;
+
+       /* Poll for link up event before running traffic */
+       do {
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       ret = -EBUSY;
+                       goto free_diag_res;
+               }
+               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+                       netdev_info(netdev,
+                                   "Firmware didn't sent link up event to loopback request\n");
+                       ret = -ETIMEDOUT;
+                       qlcnic_83xx_clear_lb_mode(adapter, mode);
+                       goto free_diag_res;
+               }
+       } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+       ret = qlcnic_do_lb_test(adapter, mode);
+
+       qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+       qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_alloc:
+       adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
+       qlcnic_release_diag_lock(adapter);
+       return ret;
+}
+
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+                                            u32 *max_wait_count)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int temp;
+
+       netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
+                   ahw->extend_lb_time);
+       temp = ahw->extend_lb_time * 1000;
+       *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+       ahw->extend_lb_time = 0;
+}
+
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
+       u32 config, max_wait_count;
+       int status = 0, loop = 0;
+
+       ahw->extend_lb_time = 0;
+       max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+       status = qlcnic_83xx_get_port_config(adapter);
+       if (status)
+               return status;
+
+       config = ahw->port_config;
+
+       /* Check if port is already in loopback mode */
+       if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+           (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+               netdev_err(netdev,
+                          "Port already in Loopback mode.\n");
+               return -EINPROGRESS;
+       }
+
+       set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+       if (mode == QLCNIC_ILB_MODE)
+               ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+       if (mode == QLCNIC_ELB_MODE)
+               ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+       status = qlcnic_83xx_set_port_config(adapter);
+       if (status) {
+               netdev_err(netdev,
+                          "Failed to Set Loopback Mode = 0x%x.\n",
+                          ahw->port_config);
+               ahw->port_config = config;
+               clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+               return status;
+       }
+
+       /* Wait for Link and IDC Completion AEN */
+       do {
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       return -EBUSY;
+               }
+
+               if (ahw->extend_lb_time)
+                       qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+                                                        &max_wait_count);
+
+               if (loop++ > max_wait_count) {
+                       netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+                                  __func__);
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       qlcnic_83xx_clear_lb_mode(adapter, mode);
+                       return -ETIMEDOUT;
+               }
+       } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+       qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+                                 QLCNIC_MAC_ADD);
+       return status;
+}
+
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 config = ahw->port_config, max_wait_count;
+       struct net_device *netdev = adapter->netdev;
+       int status = 0, loop = 0;
+
+       ahw->extend_lb_time = 0;
+       max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+       set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+       if (mode == QLCNIC_ILB_MODE)
+               ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+       if (mode == QLCNIC_ELB_MODE)
+               ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+       status = qlcnic_83xx_set_port_config(adapter);
+       if (status) {
+               netdev_err(netdev,
+                          "Failed to Clear Loopback Mode = 0x%x.\n",
+                          ahw->port_config);
+               ahw->port_config = config;
+               clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+               return status;
+       }
+
+       /* Wait for Link and IDC Completion AEN */
+       do {
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       return -EBUSY;
+               }
+
+               if (ahw->extend_lb_time)
+                       qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+                                                        &max_wait_count);
+
+               if (loop++ > max_wait_count) {
+                       netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+                                  __func__);
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       return -ETIMEDOUT;
+               }
+       } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+       qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+                                 QLCNIC_MAC_DEL);
+       return status;
+}
+
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+                                               u32 *interface_id)
+{
+       if (qlcnic_sriov_pf_check(adapter)) {
+               qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+       } else {
+               if (!qlcnic_sriov_vf_check(adapter))
+                       *interface_id = adapter->recv_ctx->context_id << 16;
+       }
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+                              int mode)
+{
+       int err;
+       u32 temp = 0, temp_ip;
+       struct qlcnic_cmd_args cmd;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_IP_ADDR);
+       if (err)
+               return;
+
+       qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+       if (mode == QLCNIC_IP_UP)
+               cmd.req.arg[1] = 1 | temp;
+       else
+               cmd.req.arg[1] = 2 | temp;
+
+       /*
+        * Adapter needs IP address in network byte order.
+        * But hardware mailbox registers go through writel(), hence IP address
+        * gets swapped on big endian architecture.
+        * To negate swapping of writel() on big endian architecture
+        * use swab32(value).
+        */
+
+       temp_ip = swab32(ntohl(ip));
+       memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err != QLCNIC_RCODE_SUCCESS)
+               dev_err(&adapter->netdev->dev,
+                       "could not notify %s IP 0x%x request\n",
+                       (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+       qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+       int err;
+       u32 temp, arg1;
+       struct qlcnic_cmd_args cmd;
+       int lro_bit_mask;
+
+       lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+       if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+               return 0;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+       if (err)
+               return err;
+
+       temp = adapter->recv_ctx->context_id << 16;
+       arg1 = lro_bit_mask | temp;
+       cmd.req.arg[1] = arg1;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_info(&adapter->pdev->dev, "LRO config failed\n");
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+       int err;
+       u32 word;
+       struct qlcnic_cmd_args cmd;
+       const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+                           0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+                           0x255b0ec26d5a56daULL };
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+       if (err)
+               return err;
+       /*
+        * RSS request:
+        * bits 3-0: Rsvd
+        *      5-4: hash_type_ipv4
+        *      7-6: hash_type_ipv6
+        *        8: enable
+        *        9: use indirection table
+        *    16-31: indirection table mask
+        */
+       word =  ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+               ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+               ((u32)(enable & 0x1) << 8) |
+               ((0x7ULL) << 16);
+       cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+       cmd.req.arg[2] = word;
+       memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err)
+               dev_info(&adapter->pdev->dev, "RSS config failed\n");
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+
+}
+
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+                                                u32 *interface_id)
+{
+       if (qlcnic_sriov_pf_check(adapter)) {
+               qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+       } else {
+               if (!qlcnic_sriov_vf_check(adapter))
+                       *interface_id = adapter->recv_ctx->context_id << 16;
+       }
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+                                  u16 vlan_id, u8 op)
+{
+       struct qlcnic_cmd_args *cmd = NULL;
+       struct qlcnic_macvlan_mbx mv;
+       u32 *buf, temp = 0;
+       int err;
+
+       if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+               return -EIO;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+       if (err)
+               goto out;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+
+       if (vlan_id)
+               op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+                    QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+
+       cmd->req.arg[1] = op | (1 << 8);
+       qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+       cmd->req.arg[1] |= temp;
+       mv.vlan = vlan_id;
+       mv.mac_addr0 = addr[0];
+       mv.mac_addr1 = addr[1];
+       mv.mac_addr2 = addr[2];
+       mv.mac_addr3 = addr[3];
+       mv.mac_addr4 = addr[4];
+       mv.mac_addr5 = addr[5];
+       buf = &cmd->req.arg[2];
+       memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (!err)
+               return err;
+
+       qlcnic_free_mbx_args(cmd);
+out:
+       kfree(cmd);
+       return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan_id)
+{
+       u8 mac[ETH_ALEN];
+       memcpy(&mac, addr, ETH_ALEN);
+       qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+                                     u8 type, struct qlcnic_cmd_args *cmd)
+{
+       switch (type) {
+       case QLCNIC_SET_STATION_MAC:
+       case QLCNIC_SET_FAC_DEF_MAC:
+               memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+               memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+               break;
+       }
+       cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+                               u8 function)
+{
+       int err, i;
+       struct qlcnic_cmd_args cmd;
+       u32 mac_low, mac_high;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       if (err)
+               return err;
+
+       qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               mac_low = cmd.rsp.arg[1];
+               mac_high = cmd.rsp.arg[2];
+
+               for (i = 0; i < 2; i++)
+                       mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+               for (i = 2; i < 6; i++)
+                       mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+       } else {
+               dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+                       err);
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+       struct qlcnic_cmd_args cmd;
+       u16 temp;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+       if (err)
+               return err;
+
+       temp = adapter->recv_ctx->context_id;
+       cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+       temp = coal->rx_time_us;
+       cmd.req.arg[2] = coal->rx_packets | temp << 16;
+       cmd.req.arg[3] = coal->flag;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err != QLCNIC_RCODE_SUCCESS)
+               netdev_err(adapter->netdev,
+                          "failed to set interrupt coalescing parameters\n");
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+       struct qlcnic_cmd_args cmd;
+       u16 temp;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+       if (err)
+               return err;
+
+       temp = adapter->tx_ring->ctx_id;
+       cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+       temp = coal->tx_time_us;
+       cmd.req.arg[2] = coal->tx_packets | temp << 16;
+       cmd.req.arg[3] = coal->flag;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err != QLCNIC_RCODE_SUCCESS)
+               netdev_err(adapter->netdev,
+                          "failed to set interrupt coalescing  parameters\n");
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+       int err = 0;
+
+       err = qlcnic_83xx_set_rx_intr_coal(adapter);
+       if (err)
+               netdev_err(adapter->netdev,
+                          "failed to set Rx coalescing parameters\n");
+
+       err = qlcnic_83xx_set_tx_intr_coal(adapter);
+       if (err)
+               netdev_err(adapter->netdev,
+                          "failed to set Tx coalescing parameters\n");
+
+       return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+                                struct ethtool_coalesce *ethcoal)
+{
+       struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+       u32 rx_coalesce_usecs, rx_max_frames;
+       u32 tx_coalesce_usecs, tx_max_frames;
+       int err;
+
+       if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+               return -EIO;
+
+       tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+       tx_max_frames = ethcoal->tx_max_coalesced_frames;
+       rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+       rx_max_frames = ethcoal->rx_max_coalesced_frames;
+       coal->flag = QLCNIC_INTR_DEFAULT;
+
+       if ((coal->rx_time_us == rx_coalesce_usecs) &&
+           (coal->rx_packets == rx_max_frames)) {
+               coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+               coal->tx_time_us = tx_coalesce_usecs;
+               coal->tx_packets = tx_max_frames;
+       } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+                  (coal->tx_packets == tx_max_frames)) {
+               coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+               coal->rx_time_us = rx_coalesce_usecs;
+               coal->rx_packets = rx_max_frames;
+       } else {
+               coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+               coal->rx_time_us = rx_coalesce_usecs;
+               coal->rx_packets = rx_max_frames;
+               coal->tx_time_us = tx_coalesce_usecs;
+               coal->tx_packets = tx_max_frames;
+       }
+
+       switch (coal->type) {
+       case QLCNIC_INTR_COAL_TYPE_RX:
+               err = qlcnic_83xx_set_rx_intr_coal(adapter);
+               break;
+       case QLCNIC_INTR_COAL_TYPE_TX:
+               err = qlcnic_83xx_set_tx_intr_coal(adapter);
+               break;
+       case QLCNIC_INTR_COAL_TYPE_RX_TX:
+               err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+               break;
+       default:
+               err = -EINVAL;
+               netdev_err(adapter->netdev,
+                          "Invalid Interrupt coalescing type\n");
+               break;
+       }
+
+       return err;
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+                                       u32 data[])
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u8 link_status, duplex;
+       /* link speed */
+       link_status = LSB(data[3]) & 1;
+       if (link_status) {
+               ahw->link_speed = MSW(data[2]);
+               duplex = LSB(MSW(data[3]));
+               if (duplex)
+                       ahw->link_duplex = DUPLEX_FULL;
+               else
+                       ahw->link_duplex = DUPLEX_HALF;
+       } else {
+               ahw->link_speed = SPEED_UNKNOWN;
+               ahw->link_duplex = DUPLEX_UNKNOWN;
+       }
+
+       ahw->link_autoneg = MSB(MSW(data[3]));
+       ahw->module_type = MSB(LSW(data[3]));
+       ahw->has_link_events = 1;
+       ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
+       qlcnic_advert_link_change(adapter, link_status);
+}
+
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+       u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+       struct qlcnic_adapter *adapter = data;
+       struct qlcnic_mailbox *mbx;
+       unsigned long flags;
+
+       mbx = adapter->ahw->mailbox;
+       spin_lock_irqsave(&mbx->aen_lock, flags);
+       resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+       if (!(resp & QLCNIC_SET_OWNER))
+               goto out;
+
+       event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+       if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+               __qlcnic_83xx_process_aen(adapter);
+       } else {
+               if (mbx->rsp_status != rsp_status)
+                       qlcnic_83xx_notify_mbx_response(mbx);
+               else
+                       adapter->stats.mbx_spurious_intr++;
+       }
+
+out:
+       mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+       writel(0, adapter->ahw->pci_base0 + mask);
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
+       return IRQ_HANDLED;
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_info *nic)
+{
+       int i, err = -EIO;
+       struct qlcnic_cmd_args cmd;
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Error, invoked by non management func\n",
+                       __func__);
+               return err;
+       }
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = (nic->pci_func << 16);
+       cmd.req.arg[2] = 0x1 << 16;
+       cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+       cmd.req.arg[4] = nic->capabilities;
+       cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+       cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+       cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+       for (i = 8; i < 32; i++)
+               cmd.req.arg[i] = 0;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+                       err);
+               err = -EIO;
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_info *npar_info, u8 func_id)
+{
+       int err;
+       u32 temp;
+       u8 op = 0;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       if (err)
+               return err;
+
+       if (func_id != ahw->pci_func) {
+               temp = func_id << 16;
+               cmd.req.arg[1] = op | BIT_31 | temp;
+       } else {
+               cmd.req.arg[1] = ahw->pci_func << 16;
+       }
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_info(&adapter->pdev->dev,
+                        "Failed to get nic info %d\n", err);
+               goto out;
+       }
+
+       npar_info->op_type = cmd.rsp.arg[1];
+       npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+       npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+       npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+       npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+       npar_info->capabilities = cmd.rsp.arg[4];
+       npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+       npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+       npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+       npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+       npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+       npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+       if (cmd.rsp.arg[8] & 0x1)
+               npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+       if (cmd.rsp.arg[8] & 0x10000) {
+               temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+               npar_info->max_linkspeed_reg_offset = temp;
+       }
+
+       memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+              sizeof(ahw->extra_capability));
+
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+                            u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+       struct device *dev = &adapter->pdev->dev;
+       int err = 0;
+
+       switch (type) {
+       case QLCNIC_TYPE_NIC:
+               (*nic)++;
+               break;
+       case QLCNIC_TYPE_FCOE:
+               (*fcoe)++;
+               break;
+       case QLCNIC_TYPE_ISCSI:
+               (*iscsi)++;
+               break;
+       default:
+               dev_err(dev, "%s: Unknown PCI type[%x]\n",
+                       __func__, type);
+               err = -EIO;
+       }
+
+       return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_pci_info *pci_info)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       u16 nic = 0, fcoe = 0, iscsi = 0;
+       struct qlcnic_cmd_args cmd;
+       int i, err = 0, j = 0;
+       u32 temp;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       if (err)
+               return err;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       ahw->total_nic_func = 0;
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
+               for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
+                       pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+                       pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+                       i++;
+                       if (!pci_info->active) {
+                               i += QLC_SKIP_INACTIVE_PCI_REGS;
+                               continue;
+                       }
+                       pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+                       err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+                                                      &nic, &fcoe, &iscsi);
+                       temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+                       pci_info->default_port = temp;
+                       i++;
+                       pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+                       temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+                       pci_info->tx_max_bw = temp;
+                       i = i + 2;
+                       memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+                       i++;
+                       memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+                       i = i + 3;
+               }
+       } else {
+               dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
+               err = -EIO;
+       }
+
+       ahw->total_nic_func = nic;
+       ahw->total_pci_func = nic + fcoe + iscsi;
+       if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+               dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+                       __func__, ahw->total_nic_func, ahw->total_pci_func);
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+       int i, index, err;
+       u8 max_ints;
+       u32 val, temp, type;
+       struct qlcnic_cmd_args cmd;
+
+       max_ints = adapter->ahw->num_msix - 1;
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = max_ints;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
+       for (i = 0, index = 2; i < max_ints; i++) {
+               type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+               val = type | (adapter->ahw->intr_tbl[i].type << 4);
+               if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+                       val |= (adapter->ahw->intr_tbl[i].id << 16);
+               cmd.req.arg[index++] = val;
+       }
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to configure interrupts 0x%x\n", err);
+               goto out;
+       }
+
+       max_ints = cmd.rsp.arg[1];
+       for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+               val = cmd.rsp.arg[index];
+               if (LSB(val)) {
+                       dev_info(&adapter->pdev->dev,
+                                "Can't configure interrupt %d\n",
+                                adapter->ahw->intr_tbl[i].id);
+                       continue;
+               }
+               if (op_type) {
+                       adapter->ahw->intr_tbl[i].id = MSW(val);
+                       adapter->ahw->intr_tbl[i].enabled = 1;
+                       temp = cmd.rsp.arg[index + 1];
+                       adapter->ahw->intr_tbl[i].src = temp;
+               } else {
+                       adapter->ahw->intr_tbl[i].id = i;
+                       adapter->ahw->intr_tbl[i].enabled = 0;
+                       adapter->ahw->intr_tbl[i].src = 0;
+               }
+       }
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+       int id, timeout = 0;
+       u32 status = 0;
+
+       while (status == 0) {
+               status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+               if (status)
+                       break;
+
+               if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+                       id = QLC_SHARED_REG_RD32(adapter,
+                                                QLCNIC_FLASH_LOCK_OWNER);
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed, lock held by %d\n", __func__, id);
+                       return -EIO;
+               }
+               usleep_range(1000, 2000);
+       }
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+       return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+       QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+                                     u32 flash_addr, u8 *p_data,
+                                     int count)
+{
+       u32 word, range, flash_offset, addr = flash_addr, ret;
+       ulong indirect_add, direct_window;
+       int i, err = 0;
+
+       flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+       if (addr & 0x3) {
+               dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+               return -EIO;
+       }
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+                                    (addr & 0xFFFF0000));
+
+       range = flash_offset + (count * sizeof(u32));
+       /* Check if data is spread across multiple sectors */
+       if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+               /* Multi sector read */
+               for (i = 0; i < count; i++) {
+                       indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+                       ret = QLCRD32(adapter, indirect_add, &err);
+                       if (err == -EIO)
+                               return err;
+
+                       word = ret;
+                       *(u32 *)p_data  = word;
+                       p_data = p_data + 4;
+                       addr = addr + 4;
+                       flash_offset = flash_offset + 4;
+
+                       if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+                               direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+                               /* This write is needed once for each sector */
+                               qlcnic_83xx_wrt_reg_indirect(adapter,
+                                                            direct_window,
+                                                            (addr));
+                               flash_offset = 0;
+                       }
+               }
+       } else {
+               /* Single sector read */
+               for (i = 0; i < count; i++) {
+                       indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+                       ret = QLCRD32(adapter, indirect_add, &err);
+                       if (err == -EIO)
+                               return err;
+
+                       word = ret;
+                       *(u32 *)p_data  = word;
+                       p_data = p_data + 4;
+                       addr = addr + 4;
+               }
+       }
+
+       return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+       u32 status;
+       int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+       int err = 0;
+
+       do {
+               status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+               if (err == -EIO)
+                       return err;
+
+               if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+                   QLC_83XX_FLASH_STATUS_READY)
+                       break;
+
+               usleep_range(1000, 1100);
+       } while (--retries);
+
+       if (!retries)
+               return -EIO;
+
+       return 0;
+}
+
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
+{
+       int ret;
+       u32 cmd;
+       cmd = adapter->ahw->fdt.write_statusreg_cmd;
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+                                    adapter->ahw->fdt.write_enable_bits);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret)
+               return -EIO;
+
+       return 0;
+}
+
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
+{
+       int ret;
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+                                    adapter->ahw->fdt.write_statusreg_cmd));
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+                                    adapter->ahw->fdt.write_disable_bits);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret)
+               return -EIO;
+
+       return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+       int ret, err = 0;
+       u32 mfg_id;
+
+       if (qlcnic_83xx_lock_flash(adapter))
+               return -EIO;
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_READ_CTRL);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               qlcnic_83xx_unlock_flash(adapter);
+               return -EIO;
+       }
+
+       mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+       if (err == -EIO) {
+               qlcnic_83xx_unlock_flash(adapter);
+               return err;
+       }
+
+       adapter->flash_mfg_id = (mfg_id & 0xFF);
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+       int count, fdt_size, ret = 0;
+
+       fdt_size = sizeof(struct qlcnic_fdt);
+       count = fdt_size / sizeof(u32);
+
+       if (qlcnic_83xx_lock_flash(adapter))
+               return -EIO;
+
+       memset(&adapter->ahw->fdt, 0, fdt_size);
+       ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+                                               (u8 *)&adapter->ahw->fdt,
+                                               count);
+       qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
+       qlcnic_83xx_unlock_flash(adapter);
+       return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+                                  u32 sector_start_addr)
+{
+       u32 reversed_addr, addr1, addr2, cmd;
+       int ret = -EIO;
+
+       if (qlcnic_83xx_lock_flash(adapter) != 0)
+               return -EIO;
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_enable_flash_write(adapter);
+               if (ret) {
+                       qlcnic_83xx_unlock_flash(adapter);
+                       dev_err(&adapter->pdev->dev,
+                               "%s failed at %d\n",
+                               __func__, __LINE__);
+                       return ret;
+               }
+       }
+
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               qlcnic_83xx_unlock_flash(adapter);
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed at %d\n", __func__, __LINE__);
+               return -EIO;
+       }
+
+       addr1 = (sector_start_addr & 0xFF) << 16;
+       addr2 = (sector_start_addr & 0xFF0000) >> 16;
+       reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+                                    reversed_addr);
+       cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+               qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+       else
+               qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                            QLC_83XX_FLASH_OEM_ERASE_SIG);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               qlcnic_83xx_unlock_flash(adapter);
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed at %d\n", __func__, __LINE__);
+               return -EIO;
+       }
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_disable_flash_write(adapter);
+               if (ret) {
+                       qlcnic_83xx_unlock_flash(adapter);
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed at %d\n", __func__, __LINE__);
+                       return ret;
+               }
+       }
+
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+                             u32 *p_data)
+{
+       int ret = -EIO;
+       u32 addr1 = 0x00800000 | (addr >> 2);
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed at %d\n", __func__, __LINE__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+                                u32 *p_data, int count)
+{
+       u32 temp;
+       int ret = -EIO, err = 0;
+
+       if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+           (count > QLC_83XX_FLASH_WRITE_MAX)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Invalid word count\n", __func__);
+               return -EIO;
+       }
+
+       temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+       if (err == -EIO)
+               return err;
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+                                    (temp | QLC_83XX_FLASH_SPI_CTRL));
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+       /* First DWORD write */
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_FIRST_MS_PATTERN);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed at %d\n", __func__, __LINE__);
+               return -EIO;
+       }
+
+       count--;
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+       /* Second to N-1 DWORD writes */
+       while (count != 1) {
+               qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+                                            *p_data++);
+               qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                            QLC_83XX_FLASH_SECOND_MS_PATTERN);
+               ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+               if (ret) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed at %d\n", __func__, __LINE__);
+                       return -EIO;
+               }
+               count--;
+       }
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    QLC_83XX_FLASH_ADDR_TEMP_VAL |
+                                    (addr >> 2));
+       /* Last DWORD write */
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_LAST_MS_PATTERN);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: failed at %d\n", __func__, __LINE__);
+               return -EIO;
+       }
+
+       ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+       if (err == -EIO)
+               return err;
+
+       if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+               dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+                       __func__, __LINE__);
+               /* Operation failed, clear error bit */
+               temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+               if (err == -EIO)
+                       return err;
+
+               qlcnic_83xx_wrt_reg_indirect(adapter,
+                                            QLC_83XX_FLASH_SPI_CONTROL,
+                                            (temp | QLC_83XX_FLASH_SPI_CTRL));
+       }
+
+       return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+       u32 val, id;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+       /* Check if recovery need to be performed by the calling function */
+       if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+               val = val & ~0x3F;
+               val = val | ((adapter->portnum << 2) |
+                            QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+               QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+               dev_info(&adapter->pdev->dev,
+                        "%s: lock recovery initiated\n", __func__);
+               msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+               val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+               id = ((val >> 2) & 0xF);
+               if (id == adapter->portnum) {
+                       val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+                       val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+                       QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+                       /* Force release the lock */
+                       QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+                       /* Clear recovery bits */
+                       val = val & ~0x3F;
+                       QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+                       dev_info(&adapter->pdev->dev,
+                                "%s: lock recovery completed\n", __func__);
+               } else {
+                       dev_info(&adapter->pdev->dev,
+                                "%s: func %d to resume lock recovery process\n",
+                                __func__, id);
+               }
+       } else {
+               dev_info(&adapter->pdev->dev,
+                        "%s: lock recovery initiated by other functions\n",
+                        __func__);
+       }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+       u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+       int max_attempt = 0;
+
+       while (status == 0) {
+               status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+               if (status)
+                       break;
+
+               msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+               i++;
+
+               if (i == 1)
+                       temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+               if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+                       val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+                       if (val == temp) {
+                               id = val & 0xFF;
+                               dev_info(&adapter->pdev->dev,
+                                        "%s: lock to be recovered from %d\n",
+                                        __func__, id);
+                               qlcnic_83xx_recover_driver_lock(adapter);
+                               i = 0;
+                               max_attempt++;
+                       } else {
+                               dev_err(&adapter->pdev->dev,
+                                       "%s: failed to get lock\n", __func__);
+                               return -EIO;
+                       }
+               }
+
+               /* Force exit from while loop after few attempts */
+               if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed to get lock\n", __func__);
+                       return -EIO;
+               }
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+       lock_alive_counter = val >> 8;
+       lock_alive_counter++;
+       val = lock_alive_counter << 8 | adapter->portnum;
+       QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+       return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+       u32 val, lock_alive_counter, id;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+       id = val & 0xFF;
+       lock_alive_counter = val >> 8;
+
+       if (id != adapter->portnum)
+               dev_err(&adapter->pdev->dev,
+                       "%s:Warning func %d is unlocking lock owned by %d\n",
+                       __func__, adapter->portnum, id);
+
+       val = (lock_alive_counter << 8) | 0xFF;
+       QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+       QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+                               u32 *data, u32 count)
+{
+       int i, j, ret = 0;
+       u32 temp;
+
+       /* Check alignment */
+       if (addr & 0xF)
+               return -EIO;
+
+       mutex_lock(&adapter->ahw->mem_lock);
+       qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+       for (i = 0; i < count; i++, addr += 16) {
+               if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+                                    QLCNIC_ADDR_QDR_NET_MAX)) ||
+                     (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+                                    QLCNIC_ADDR_DDR_NET_MAX)))) {
+                       mutex_unlock(&adapter->ahw->mem_lock);
+                       return -EIO;
+               }
+
+               qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
+
+               for (j = 0; j < MAX_CTL_CHECK; j++) {
+                       temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+
+                       if ((temp & TA_CTL_BUSY) == 0)
+                               break;
+               }
+
+               /* Status check failure */
+               if (j >= MAX_CTL_CHECK) {
+                       printk_ratelimited(KERN_WARNING
+                                          "MS memory write failed\n");
+                       mutex_unlock(&adapter->ahw->mem_lock);
+                       return -EIO;
+               }
+       }
+
+       mutex_unlock(&adapter->ahw->mem_lock);
+
+       return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+                            u8 *p_data, int count)
+{
+       u32 word, addr = flash_addr, ret;
+       ulong  indirect_addr;
+       int i, err = 0;
+
+       if (qlcnic_83xx_lock_flash(adapter) != 0)
+               return -EIO;
+
+       if (addr & 0x3) {
+               dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+               qlcnic_83xx_unlock_flash(adapter);
+               return -EIO;
+       }
+
+       for (i = 0; i < count; i++) {
+               if (qlcnic_83xx_wrt_reg_indirect(adapter,
+                                                QLC_83XX_FLASH_DIRECT_WINDOW,
+                                                (addr))) {
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+
+               indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+               ret = QLCRD32(adapter, indirect_addr, &err);
+               if (err == -EIO)
+                       return err;
+
+               word = ret;
+               *(u32 *)p_data  = word;
+               p_data = p_data + 4;
+               addr = addr + 4;
+       }
+
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+       u8 pci_func;
+       int err;
+       u32 config = 0, state;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               pci_func = adapter->portnum;
+       else
+               pci_func = ahw->pci_func;
+
+       state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+       if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
+               dev_info(&adapter->pdev->dev, "link state down\n");
+               return config;
+       }
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+       if (err)
+               return err;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_info(&adapter->pdev->dev,
+                        "Get Link Status Command failed: 0x%x\n", err);
+               goto out;
+       } else {
+               config = cmd.rsp.arg[1];
+               switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+               case QLC_83XX_10M_LINK:
+                       ahw->link_speed = SPEED_10;
+                       break;
+               case QLC_83XX_100M_LINK:
+                       ahw->link_speed = SPEED_100;
+                       break;
+               case QLC_83XX_1G_LINK:
+                       ahw->link_speed = SPEED_1000;
+                       break;
+               case QLC_83XX_10G_LINK:
+                       ahw->link_speed = SPEED_10000;
+                       break;
+               default:
+                       ahw->link_speed = 0;
+                       break;
+               }
+               config = cmd.rsp.arg[3];
+               switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
+               case QLC_83XX_MODULE_FIBRE_10GBASE_LRM:
+               case QLC_83XX_MODULE_FIBRE_10GBASE_LR:
+               case QLC_83XX_MODULE_FIBRE_10GBASE_SR:
+                       ahw->supported_type = PORT_FIBRE;
+                       ahw->port_type = QLCNIC_XGBE;
+                       break;
+               case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
+               case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
+               case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
+                       ahw->supported_type = PORT_FIBRE;
+                       ahw->port_type = QLCNIC_GBE;
+                       break;
+               case QLC_83XX_MODULE_TP_1000BASE_T:
+                       ahw->supported_type = PORT_TP;
+                       ahw->port_type = QLCNIC_GBE;
+                       break;
+               case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP:
+               case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP:
+               case QLC_83XX_MODULE_DA_10GE_LEGACY_CP:
+               case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP:
+                       ahw->supported_type = PORT_DA;
+                       ahw->port_type = QLCNIC_XGBE;
+                       break;
+               default:
+                       ahw->supported_type = PORT_OTHER;
+                       ahw->port_type = QLCNIC_XGBE;
+               }
+               if (config & 1)
+                       err = 1;
+       }
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 config = 0;
+       int status = 0;
+
+       if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+               /* Get port configuration info */
+               status = qlcnic_83xx_get_port_info(adapter);
+               /* Get Link Status related info */
+               config = qlcnic_83xx_test_link(adapter);
+               ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+       }
+
+       /* hard code until there is a way to get it from flash */
+       ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+       if (netif_running(adapter->netdev) && ahw->has_link_events) {
+               ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+               ecmd->duplex = ahw->link_duplex;
+               ecmd->autoneg = ahw->link_autoneg;
+       } else {
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+               ecmd->autoneg = AUTONEG_DISABLE;
+       }
+
+       ecmd->supported = (SUPPORTED_10baseT_Full |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_Autoneg);
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
+               if (ahw->port_config & QLC_83XX_10_CAPABLE)
+                       ecmd->advertising |= SUPPORTED_10baseT_Full;
+               if (ahw->port_config & QLC_83XX_100_CAPABLE)
+                       ecmd->advertising |= SUPPORTED_100baseT_Full;
+               if (ahw->port_config & QLC_83XX_1G_CAPABLE)
+                       ecmd->advertising |= SUPPORTED_1000baseT_Full;
+               if (ahw->port_config & QLC_83XX_10G_CAPABLE)
+                       ecmd->advertising |= SUPPORTED_10000baseT_Full;
+               if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
+                       ecmd->advertising |= ADVERTISED_Autoneg;
+       } else {
+               switch (ahw->link_speed) {
+               case SPEED_10:
+                       ecmd->advertising = SUPPORTED_10baseT_Full;
+                       break;
+               case SPEED_100:
+                       ecmd->advertising = SUPPORTED_100baseT_Full;
+                       break;
+               case SPEED_1000:
+                       ecmd->advertising = SUPPORTED_1000baseT_Full;
+                       break;
+               case SPEED_10000:
+                       ecmd->advertising = SUPPORTED_10000baseT_Full;
+                       break;
+               default:
+                       break;
+               }
+
+       }
+
+       switch (ahw->supported_type) {
+       case PORT_FIBRE:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       case PORT_TP:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               ecmd->transceiver = XCVR_INTERNAL;
+               break;
+       case PORT_DA:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_DA;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       default:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_OTHER;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       }
+       ecmd->phy_address = ahw->physical_port;
+       return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 config = adapter->ahw->port_config;
+       int status = 0;
+
+       /* 83xx devices do not support Half duplex */
+       if (ecmd->duplex == DUPLEX_HALF) {
+                       netdev_info(adapter->netdev,
+                                   "Half duplex mode not supported\n");
+                       return -EINVAL;
+       }
+
+       if (ecmd->autoneg) {
+               ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
+               ahw->port_config |= (QLC_83XX_100_CAPABLE |
+                                    QLC_83XX_1G_CAPABLE |
+                                    QLC_83XX_10G_CAPABLE);
+       } else { /* force speed */
+               ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
+               switch (ethtool_cmd_speed(ecmd)) {
+               case SPEED_10:
+                       ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
+                                             QLC_83XX_1G_CAPABLE |
+                                             QLC_83XX_10G_CAPABLE);
+                       ahw->port_config |= QLC_83XX_10_CAPABLE;
+                       break;
+               case SPEED_100:
+                       ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+                                             QLC_83XX_1G_CAPABLE |
+                                             QLC_83XX_10G_CAPABLE);
+                       ahw->port_config |= QLC_83XX_100_CAPABLE;
+                       break;
+               case SPEED_1000:
+                       ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+                                             QLC_83XX_100_CAPABLE |
+                                             QLC_83XX_10G_CAPABLE);
+                       ahw->port_config |= QLC_83XX_1G_CAPABLE;
+                       break;
+               case SPEED_10000:
+                       ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+                                             QLC_83XX_100_CAPABLE |
+                                             QLC_83XX_1G_CAPABLE);
+                       ahw->port_config |= QLC_83XX_10G_CAPABLE;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       status = qlcnic_83xx_set_port_config(adapter);
+       if (status) {
+               netdev_info(adapter->netdev,
+                           "Failed to Set Link Speed and autoneg.\n");
+               ahw->port_config = config;
+       }
+
+       return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+                                         u64 *data, int index)
+{
+       u32 low, hi;
+       u64 val;
+
+       low = cmd->rsp.arg[index];
+       hi = cmd->rsp.arg[index + 1];
+       val = (((u64) low) | (((u64) hi) << 32));
+       *data++ = val;
+       return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_cmd_args *cmd, u64 *data,
+                                  int type, int *ret)
+{
+       int err, k, total_regs;
+
+       *ret = 0;
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_info(&adapter->pdev->dev,
+                        "Error in get statistics mailbox command\n");
+               *ret = -EIO;
+               return data;
+       }
+       total_regs = cmd->rsp.num;
+       switch (type) {
+       case QLC_83XX_STAT_MAC:
+               /* fill in MAC tx counters */
+               for (k = 2; k < 28; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* skip 24 bytes of reserved area */
+               /* fill in MAC rx counters */
+               for (k += 6; k < 60; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* skip 24 bytes of reserved area */
+               /* fill in MAC rx frame stats */
+               for (k += 6; k < 80; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* fill in eSwitch stats */
+               for (; k < total_regs; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               break;
+       case QLC_83XX_STAT_RX:
+               for (k = 2; k < 8; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* skip 8 bytes of reserved data */
+               for (k += 2; k < 24; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* skip 8 bytes containing RE1FBQ error data */
+               for (k += 2; k < total_regs; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               break;
+       case QLC_83XX_STAT_TX:
+               for (k = 2; k < 10; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               /* skip 8 bytes of reserved data */
+               for (k += 2; k < total_regs; k += 2)
+                       data = qlcnic_83xx_copy_stats(cmd, data, k);
+               break;
+       default:
+               dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+               *ret = -EIO;
+       }
+       return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+       struct qlcnic_cmd_args cmd;
+       struct net_device *netdev = adapter->netdev;
+       int ret = 0;
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+       if (ret)
+               return;
+       /* Get Tx stats */
+       cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+       cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+       data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+                                     QLC_83XX_STAT_TX, &ret);
+       if (ret) {
+               netdev_err(netdev, "Error getting Tx stats\n");
+               goto out;
+       }
+       /* Get MAC stats */
+       cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+       cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+       memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+       data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+                                     QLC_83XX_STAT_MAC, &ret);
+       if (ret) {
+               netdev_err(netdev, "Error getting MAC stats\n");
+               goto out;
+       }
+       /* Get Rx stats */
+       cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+       cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+       memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+       data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+                                     QLC_83XX_STAT_RX, &ret);
+       if (ret)
+               netdev_err(netdev, "Error getting Rx stats\n");
+out:
+       qlcnic_free_mbx_args(&cmd);
+}
+
+#define QLCNIC_83XX_ADD_PORT0          BIT_0
+#define QLCNIC_83XX_ADD_PORT1          BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE  13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+       cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+       cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "failed to issue extend iSCSI minidump capability\n");
+
+       return err;
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+       u32 major, minor, sub;
+
+       major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+       minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+       sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+       if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+               dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+                        __func__);
+               return 1;
+       }
+       return 0;
+}
+
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+       return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+               sizeof(*adapter->ahw->ext_reg_tbl)) +
+               (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+               sizeof(*adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+       int i, j = 0;
+
+       for (i = QLCNIC_DEV_INFO_SIZE + 1;
+            j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+               regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+       for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+               regs_buff[i++] = QLCRDX(adapter->ahw, j);
+       return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
+       u8 val, drv_sds_rings = adapter->drv_sds_rings;
+       u8 drv_tx_rings = adapter->drv_tx_rings;
+       u32 data;
+       u16 intrpt_id, id;
+       int ret;
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               netdev_info(netdev, "Device is resetting\n");
+               return -EBUSY;
+       }
+
+       if (qlcnic_get_diag_lock(adapter)) {
+               netdev_info(netdev, "Device in diagnostics mode\n");
+               return -EBUSY;
+       }
+
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+                                        drv_sds_rings);
+       if (ret)
+               goto fail_diag_irq;
+
+       ahw->diag_cnt = 0;
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       if (ret)
+               goto fail_diag_irq;
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               intrpt_id = ahw->intr_tbl[0].id;
+       else
+               intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+       cmd.req.arg[1] = 1;
+       cmd.req.arg[2] = intrpt_id;
+       cmd.req.arg[3] = BIT_0;
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       data = cmd.rsp.arg[2];
+       id = LSW(data);
+       val = LSB(MSW(data));
+       if (id != intrpt_id)
+               dev_info(&adapter->pdev->dev,
+                        "Interrupt generated: 0x%x, requested:0x%x\n",
+                        id, intrpt_id);
+       if (val)
+               dev_err(&adapter->pdev->dev,
+                        "Interrupt test error: 0x%x\n", val);
+       if (ret)
+               goto done;
+
+       msleep(20);
+       ret = !ahw->diag_cnt;
+
+done:
+       qlcnic_free_mbx_args(&cmd);
+       qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_irq:
+       adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
+       qlcnic_release_diag_lock(adapter);
+       return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+                               struct ethtool_pauseparam *pause)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int status = 0;
+       u32 config;
+
+       status = qlcnic_83xx_get_port_config(adapter);
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Get Pause Config failed\n", __func__);
+               return;
+       }
+       config = ahw->port_config;
+       if (config & QLC_83XX_CFG_STD_PAUSE) {
+               switch (MSW(config)) {
+               case QLC_83XX_TX_PAUSE:
+                       pause->tx_pause = 1;
+                       break;
+               case QLC_83XX_RX_PAUSE:
+                       pause->rx_pause = 1;
+                       break;
+               case QLC_83XX_TX_RX_PAUSE:
+               default:
+                       /* Backward compatibility for existing
+                        * flash definitions
+                        */
+                       pause->tx_pause = 1;
+                       pause->rx_pause = 1;
+               }
+       }
+
+       if (QLC_83XX_AUTONEG(config))
+               pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+                              struct ethtool_pauseparam *pause)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int status = 0;
+       u32 config;
+
+       status = qlcnic_83xx_get_port_config(adapter);
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Get Pause Config failed.\n", __func__);
+               return status;
+       }
+       config = ahw->port_config;
+
+       if (ahw->port_type == QLCNIC_GBE) {
+               if (pause->autoneg)
+                       ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+               if (!pause->autoneg)
+                       ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+       } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+               return -EOPNOTSUPP;
+       }
+
+       if (!(config & QLC_83XX_CFG_STD_PAUSE))
+               ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+       if (pause->rx_pause && pause->tx_pause) {
+               ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+       } else if (pause->rx_pause && !pause->tx_pause) {
+               ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+               ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+       } else if (pause->tx_pause && !pause->rx_pause) {
+               ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+               ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+       } else if (!pause->rx_pause && !pause->tx_pause) {
+               ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+                                     QLC_83XX_CFG_STD_PAUSE);
+       }
+       status = qlcnic_83xx_set_port_config(adapter);
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Set Pause Config failed.\n", __func__);
+               ahw->port_config = config;
+       }
+       return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+       int ret, err = 0;
+       u32 temp;
+
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+                                    QLC_83XX_FLASH_OEM_READ_SIG);
+       qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+                                    QLC_83XX_FLASH_READ_CTRL);
+       ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+       if (ret)
+               return -EIO;
+
+       temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+       if (err == -EIO)
+               return err;
+
+       return temp & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+       int status;
+
+       status = qlcnic_83xx_read_flash_status_reg(adapter);
+       if (status == -EIO) {
+               dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+                        __func__);
+               return 1;
+       }
+       return 0;
+}
+
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_83xx_disable_mbx_intr(adapter);
+       cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       return 0;
+}
+
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       int err = 0;
+
+       err = qlcnic_83xx_idc_init(adapter);
+       if (err)
+               return err;
+
+       if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
+               if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
+                       qlcnic_83xx_set_vnic_opmode(adapter);
+               } else {
+                       err = qlcnic_83xx_check_vnic_state(adapter);
+                       if (err)
+                               return err;
+               }
+       }
+
+       err = qlcnic_83xx_idc_reattach_driver(adapter);
+       if (err)
+               return err;
+
+       qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+                            idc->delay);
+       return err;
+}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+       reinit_completion(&mbx->completion);
+       set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+       if (!mbx)
+               return;
+
+       destroy_workqueue(mbx->work_q);
+       kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+               return;
+       }
+       complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       struct list_head *head = &mbx->cmd_q;
+       struct qlcnic_cmd_args *cmd = NULL;
+
+       spin_lock(&mbx->queue_lock);
+
+       while (!list_empty(head)) {
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+               dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+                        __func__, cmd->cmd_op);
+               list_del(&cmd->list);
+               mbx->num_cmds--;
+               qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+       }
+
+       spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       u32 host_mbx_ctrl;
+
+       if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+               return -EBUSY;
+
+       host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+       if (host_mbx_ctrl) {
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               ahw->idc.collect_dump = 1;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+                                             u8 issue_cmd)
+{
+       if (issue_cmd)
+               QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+       else
+               QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+                                       struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       spin_lock(&mbx->queue_lock);
+
+       list_del(&cmd->list);
+       mbx->num_cmds--;
+
+       spin_unlock(&mbx->queue_lock);
+
+       qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int i, j;
+
+       if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+               mbx_cmd = cmd->req.arg[0];
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+               for (i = 1; i < cmd->req.num; i++)
+                       writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+       } else {
+               fw_hal_version = ahw->fw_hal_version;
+               hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+               total_size = cmd->pay_size + hdr_size;
+               tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+               mbx_cmd = tmp | fw_hal_version << 29;
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+               /* Back channel specific operations bits */
+               mbx_cmd = 0x1 | 1 << 4;
+
+               if (qlcnic_sriov_pf_check(adapter))
+                       mbx_cmd |= cmd->func_num << 5;
+
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+               for (i = 2, j = 0; j < hdr_size; i++, j++)
+                       writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+               for (j = 0; j < cmd->pay_size; j++, i++)
+                       writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+       }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       if (!mbx)
+               return;
+
+       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+       complete(&mbx->completion);
+       cancel_work_sync(&mbx->work);
+       flush_workqueue(mbx->work_q);
+       qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd,
+                                      unsigned long *timeout)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+               atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+               init_completion(&cmd->completion);
+               cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+               spin_lock(&mbx->queue_lock);
+
+               list_add_tail(&cmd->list, &mbx->cmd_q);
+               mbx->num_cmds++;
+               cmd->total_cmds = mbx->num_cmds;
+               *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+               queue_work(mbx->work_q, &mbx->work);
+
+               spin_unlock(&mbx->queue_lock);
+
+               return 0;
+       }
+
+       return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       u8 mac_cmd_rcode;
+       u32 fw_data;
+
+       if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+               fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+               mac_cmd_rcode = (u8)fw_data;
+               if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+                   mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+                   mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+                       cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+                       return QLCNIC_RCODE_SUCCESS;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       u8 mbx_err_code;
+       u32 fw_data;
+
+       fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+       mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+       qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+       switch (mbx_err_code) {
+       case QLCNIC_MBX_RSP_OK:
+       case QLCNIC_MBX_PORT_RSP_OK:
+               cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+               break;
+       default:
+               if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+                       break;
+
+               dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+                       __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+                       ahw->op_mode, mbx_err_code);
+               cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+               qlcnic_dump_mbx(adapter, cmd);
+       }
+
+       return;
+}
+
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 offset;
+
+       offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+       dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+                readl(ahw->pci_base0 + offset),
+                QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+                QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+                QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+       struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+                                                 work);
+       struct qlcnic_adapter *adapter = mbx->adapter;
+       const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+       struct device *dev = &adapter->pdev->dev;
+       struct list_head *head = &mbx->cmd_q;
+       struct qlcnic_hardware_context *ahw;
+       struct qlcnic_cmd_args *cmd = NULL;
+       unsigned long flags;
+
+       ahw = adapter->ahw;
+
+       while (true) {
+               if (qlcnic_83xx_check_mbx_status(adapter)) {
+                       qlcnic_83xx_flush_mbx_queue(adapter);
+                       return;
+               }
+
+               spin_lock_irqsave(&mbx->aen_lock, flags);
+               mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+               spin_unlock_irqrestore(&mbx->aen_lock, flags);
+
+               spin_lock(&mbx->queue_lock);
+
+               if (list_empty(head)) {
+                       spin_unlock(&mbx->queue_lock);
+                       return;
+               }
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+               spin_unlock(&mbx->queue_lock);
+
+               mbx_ops->encode_cmd(adapter, cmd);
+               mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+               if (wait_for_completion_timeout(&mbx->completion,
+                                               QLC_83XX_MBX_TIMEOUT)) {
+                       mbx_ops->decode_resp(adapter, cmd);
+                       mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+               } else {
+                       dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+                               __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+                               ahw->op_mode);
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+                       qlcnic_dump_mailbox_registers(adapter);
+                       qlcnic_83xx_get_mbx_data(adapter, cmd);
+                       qlcnic_dump_mbx(adapter, cmd);
+                       qlcnic_83xx_idc_request_reset(adapter,
+                                                     QLCNIC_FORCE_FW_DUMP_KEY);
+                       cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+               }
+               mbx_ops->dequeue_cmd(adapter, cmd);
+       }
+}
+
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+       .enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
+       .dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
+       .decode_resp    = qlcnic_83xx_decode_mbx_rsp,
+       .encode_cmd     = qlcnic_83xx_encode_mbx_cmd,
+       .nofity_fw      = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx;
+
+       ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+       if (!ahw->mailbox)
+               return -ENOMEM;
+
+       mbx = ahw->mailbox;
+       mbx->ops = &qlcnic_83xx_mbx_ops;
+       mbx->adapter = adapter;
+
+       spin_lock_init(&mbx->queue_lock);
+       spin_lock_init(&mbx->aen_lock);
+       INIT_LIST_HEAD(&mbx->cmd_q);
+       init_completion(&mbx->completion);
+
+       mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+       if (mbx->work_q == NULL) {
+               kfree(mbx);
+               return -ENOMEM;
+       }
+
+       INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+       set_bit(QLC_83XX_MBX_READY, &mbx->status);
+       return 0;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+                                                     pci_channel_state_t state)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       if (state == pci_channel_io_normal)
+               return PCI_ERS_RESULT_RECOVERED;
+
+       set_bit(__QLCNIC_AER, &adapter->state);
+       set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       qlcnic_83xx_aer_stop_poll_work(adapter);
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       int err = 0;
+
+       pdev->error_state = pci_channel_io_normal;
+       err = pci_enable_device(pdev);
+       if (err)
+               goto disconnect;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       err = qlcnic_83xx_aer_reset(adapter);
+       if (err == 0)
+               return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+       clear_bit(__QLCNIC_AER, &adapter->state);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+               qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644 (file)
index 0000000..331ae2c
--- /dev/null
@@ -0,0 +1,663 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+
+#include "qlcnic_hw.h"
+
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE          0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f)       (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE         0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE       0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f)      (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f)    (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f)         (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f)         (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR     10
+#define QLC_83xx_FUNC_VAL(v, f)        ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR              0x38C0
+#define QLC_83XX_INTX_TRGR             0x38C4
+#define QLC_83XX_INTX_MASK             0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER                 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY                   20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY                1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS         2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT         3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY               200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK         0x3
+#define QLC_83XX_LB_WAIT_COUNT                         250
+#define QLC_83XX_LB_MSLEEP_COUNT                       20
+#define QLC_83XX_NO_NIC_RESOURCE       0x5
+#define QLC_83XX_MAC_PRESENT           0xC
+#define QLC_83XX_MAC_ABSENT            0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE             (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE               0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val)         ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val)         ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val)                ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY         100
+#define QLC_83XX_LEGACY_INTX_DELAY             4
+#define QLC_83XX_REG_DESC                      1
+#define QLC_83XX_LRO_DESC                      2
+#define QLC_83XX_CTRL_DESC                     3
+#define QLC_83XX_FW_CAPABILITY_TSO             BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS                BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE          0
+#define QLC_83XX_HOST_SDS_MBX_IDX              8
+
+#define QLCNIC_HOST_RDS_MBX_IDX                        88
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG          0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD       0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD       0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG       0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG       0x0B201388
+#define QLC_83XX_PORT0_TC_STATS                0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS                0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD   0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD   0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0         0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1         0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2         0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3         0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4         0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME          "83xx_fw.bin"
+#define QLC_83XX_POST_FW_FILE_NAME     "83xx_post_fw.bin"
+#define QLC_84XX_FW_FILE_NAME          "84xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH       0
+#define QLC_83XX_BOOT_FROM_FILE                0x12345678
+
+#define QLC_FW_FILE_NAME_LEN           20
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+#define QLC_83XX_MBX_POST_BC_OP                0x1
+#define QLC_83XX_MBX_COMPLETION                0x0
+#define QLC_83XX_MBX_REQUEST           0x1
+
+#define QLC_83XX_MBX_TIMEOUT           (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP          5000000
+
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+       u32     phy_addr_low;
+       u32     phy_addr_high;
+       u32     rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+       u16     sds_ring_size;
+       u16     rsvd2;
+       u16     rsvd3[2];
+       u16     intrpt_id;
+       u8      intrpt_val;
+       u8      rsvd4;
+#elif defined(__BIG_ENDIAN)
+       u16     rsvd2;
+       u16     sds_ring_size;
+       u16     rsvd3[2];
+       u8      rsvd4;
+       u8      intrpt_val;
+       u16     intrpt_id;
+#endif
+       u32     rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+       u32     phy_addr_reg_low;
+       u32     phy_addr_reg_high;
+       u32     phy_addr_jmb_low;
+       u32     phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+       u16     reg_ring_sz;
+       u16     reg_ring_len;
+       u16     jmb_ring_sz;
+       u16     jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+       u16     reg_ring_len;
+       u16     reg_ring_sz;
+       u16     jmb_ring_len;
+       u16     jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+       u32     reg_buf;
+       u32     jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+       u8      rcv_num;
+       u8      sts_num;
+       u16     ctx_id;
+       u8      state;
+       u8      num_pci_func;
+       u8      phy_port;
+       u8      vport_id;
+#elif defined(__BIG_ENDIAN)
+       u16     ctx_id;
+       u8      sts_num;
+       u8      rcv_num;
+       u8      vport_id;
+       u8      phy_port;
+       u8      num_pci_func;
+       u8      state;
+#endif
+       u32     host_csmr[QLCNIC_MAX_SDS_RINGS];
+       struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+       u8      rcv_num;
+       u8      sts_num;
+       u16     ctx_id;
+#elif defined(__BIG_ENDIAN)
+       u16     ctx_id;
+       u8      sts_num;
+       u8      rcv_num;
+#endif
+       u32  host_csmr[QLCNIC_MAX_SDS_RINGS];
+       struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrupt id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+       u32     phys_addr_low;
+       u32     phys_addr_high;
+       u32     cnsmr_index_low;
+       u32     cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+       u16     size;
+       u16     intr_id;
+       u8      src;
+       u8      rsvd[3];
+#elif defined(__BIG_ENDIAN)
+       u16     intr_id;
+       u16     size;
+       u8      rsvd[3];
+       u8      src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+       u32     host_prod;
+#if defined(__LITTLE_ENDIAN)
+       u16     ctx_id;
+       u8      state;
+       u8      rsvd;
+#elif defined(__BIG_ENDIAN)
+       u8      rsvd;
+       u8      state;
+       u16     ctx_id;
+#endif
+} __packed;
+
+struct qlcnic_intrpt_config {
+       u8      type;
+       u8      enabled;
+       u16     id;
+       u32     src;
+};
+
+struct qlcnic_macvlan_mbx {
+#if defined(__LITTLE_ENDIAN)
+       u8      mac_addr0;
+       u8      mac_addr1;
+       u8      mac_addr2;
+       u8      mac_addr3;
+       u8      mac_addr4;
+       u8      mac_addr5;
+       u16     vlan;
+#elif defined(__BIG_ENDIAN)
+       u8      mac_addr3;
+       u8      mac_addr2;
+       u8      mac_addr1;
+       u8      mac_addr0;
+       u16     vlan;
+       u8      mac_addr5;
+       u8      mac_addr4;
+#endif
+};
+
+struct qlc_83xx_fw_info {
+       const struct firmware   *fw;
+       char    fw_file_name[QLC_FW_FILE_NAME_LEN];
+};
+
+struct qlc_83xx_reset {
+       struct qlc_83xx_reset_hdr *hdr;
+       int     seq_index;
+       int     seq_error;
+       int     array_index;
+       u32     array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+       u8      *buff;
+       u8      *stop_offset;
+       u8      *start_offset;
+       u8      *init_offset;
+       u8      seq_end;
+       u8      template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY         0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET                   0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP                   0x4
+#define QLC_83XX_IDC_TIMESTAMP                         0
+#define QLC_83XX_IDC_DURATION                          1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS                 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS            10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS                10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS          20
+#define QLC_83XX_IDC_FW_POLL_DELAY                     (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH                    2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO       8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS                 16
+#define QLC_83XX_IDC_MAJOR_VERSION                     1
+#define QLC_83XX_IDC_MINOR_VERSION                     0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR                  0x3e8020
+
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+struct qlc_83xx_idc {
+       int (*state_entry) (struct qlcnic_adapter *);
+       u64             sec_counter;
+       u64             delay;
+       unsigned long   status;
+       int             err_code;
+       int             collect_dump;
+       u8              curr_state;
+       u8              prev_state;
+       u8              vnic_state;
+       u8              vnic_wait_limit;
+       u8              quiesce_req;
+       u8              delay_reset;
+       char            **name;
+};
+
+enum qlcnic_vlan_operations {
+       QLC_VLAN_ADD = 0,
+       QLC_VLAN_DELETE
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+       QLC_83XX_IDC_DEV_UNKNOWN,
+       QLC_83XX_IDC_DEV_COLD,
+       QLC_83XX_IDC_DEV_INIT,
+       QLC_83XX_IDC_DEV_READY,
+       QLC_83XX_IDC_DEV_NEED_RESET,
+       QLC_83XX_IDC_DEV_NEED_QUISCENT,
+       QLC_83XX_IDC_DEV_FAILED,
+       QLC_83XX_IDC_DEV_QUISCENT
+};
+
+#define QLCNIC_MBX_RSP(reg)            LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg)       (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg)         (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i)        ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i)          ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN                  3
+#define QLC_83XX_MBX_AEN_CNT                   5
+#define QLC_83XX_MODULE_LOADED                 1
+#define QLC_83XX_MBX_READY                     2
+#define QLC_83XX_MBX_AEN_ACK                   3
+#define QLC_83XX_SFP_PRESENT(data)             ((data) & 3)
+#define QLC_83XX_SFP_ERR(data)                 (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data)         (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data)           (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data)            ((data) & BIT_10)
+#define QLC_83XX_LINK_STATS(data)              ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data)      (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data)              (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data)                 (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data)                ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data)                ((data) & BIT_13)
+#define QLC_83XX_DCBX(data)                    (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data)                 ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE                      0x10
+#define QLC_83XX_RX_PAUSE                      0x20
+#define QLC_83XX_TX_RX_PAUSE                   0x30
+#define QLC_83XX_CFG_STD_PAUSE                 (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE              (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE              (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE           (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG                (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS              (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY              (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT              (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON         0xe
+#define QLC_83XX_BEACON_ON             1
+#define QLC_83XX_BEACON_OFF            0
+#define QLC_83XX_LED_RATE              0xff
+#define QLC_83XX_LED_ACT               (1 << 10)
+#define QLC_83XX_LED_MOD               (0 << 13)
+#define QLC_83XX_LED_CONFIG    (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+                                QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK      1
+#define QLC_83XX_100M_LINK     2
+#define QLC_83XX_1G_LINK       3
+#define QLC_83XX_10G_LINK      4
+#define QLC_83XX_STAT_TX       3
+#define QLC_83XX_STAT_RX       2
+#define QLC_83XX_STAT_MAC      1
+#define QLC_83XX_TX_STAT_REGS  14
+#define QLC_83XX_RX_STAT_REGS  40
+#define QLC_83XX_MAC_STAT_REGS 94
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN)   (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN)      ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE                        0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC                        0x1
+#define QLC_83XX_VIRTUAL_FUNC                          0x2
+
+#define QLC_83XX_LB_MAX_FILTERS                        2048
+#define QLC_83XX_LB_BUCKET_SIZE                        256
+#define QLC_83XX_MINIMUM_VECTOR                        3
+#define QLC_83XX_MAX_MC_COUNT                  38
+#define QLC_83XX_MAX_UC_COUNT                  4096
+
+#define QLC_83XX_PVID_STRIP_CAPABILITY         BIT_22
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val)     (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val)               (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val)               (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val)            (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val)        (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val)        (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY                    BIT_23
+#define QLC_83XX_SRIOV_MODE                            0x1
+#define QLCNIC_BRDTYPE_83XX_10G                        0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS              0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL             0x2808E014
+#define QLC_83XX_FLASH_STATUS                  0x42100004
+#define QLC_83XX_FLASH_CONTROL                 0x42110004
+#define QLC_83XX_FLASH_ADDR                    0x42110008
+#define QLC_83XX_FLASH_WRDATA                  0x4211000C
+#define QLC_83XX_FLASH_RDDATA                  0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW           0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA)       (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD        0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD               0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD          0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT        5000
+#define QLC_83XX_FLASH_STATUS_READY            0x6
+#define QLC_83XX_FLASH_WRITE_MIN               2
+#define QLC_83XX_FLASH_WRITE_MAX               64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY   1
+#define QLC_83XX_ERASE_MODE                    1
+#define QLC_83XX_WRITE_MODE                    2
+#define QLC_83XX_BULK_WRITE_MODE               3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG       0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG       0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL     0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG           0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG           0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG            0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL           0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL    0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF              0x0
+#define QLC_83XX_FLASH_READ_CTRL               0x3F
+#define QLC_83XX_FLASH_SPI_CTRL                0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL      0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL     0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL       0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN        0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN       0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN         0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC           100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT            10000
+
+enum qlc_83xx_mbx_cmd_type {
+       QLC_83XX_MBX_CMD_WAIT = 0,
+       QLC_83XX_MBX_CMD_NO_WAIT,
+       QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+       QLC_83XX_MBX_RESPONSE_WAIT = 0,
+       QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED   0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN  0x3
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+       QLCNIC_GLOBAL_RESET = 0,
+       QLCNIC_WILDCARD,
+       QLCNIC_INFORMANT,
+       QLCNIC_HOST_MBX_CTRL,
+       QLCNIC_FW_MBX_CTRL,
+       QLCNIC_BOOTLOADER_ADDR,
+       QLCNIC_BOOTLOADER_SIZE,
+       QLCNIC_FW_IMAGE_ADDR,
+       QLCNIC_MBX_INTR_ENBL,
+       QLCNIC_DEF_INT_MASK,
+       QLCNIC_DEF_INT_ID,
+       QLC_83XX_IDC_MAJ_VERSION,
+       QLC_83XX_IDC_DEV_STATE,
+       QLC_83XX_IDC_DRV_PRESENCE,
+       QLC_83XX_IDC_DRV_ACK,
+       QLC_83XX_IDC_CTRL,
+       QLC_83XX_IDC_DRV_AUDIT,
+       QLC_83XX_IDC_MIN_VERSION,
+       QLC_83XX_RECOVER_DRV_LOCK,
+       QLC_83XX_IDC_PF_0,
+       QLC_83XX_IDC_PF_1,
+       QLC_83XX_IDC_PF_2,
+       QLC_83XX_IDC_PF_3,
+       QLC_83XX_IDC_PF_4,
+       QLC_83XX_IDC_PF_5,
+       QLC_83XX_IDC_PF_6,
+       QLC_83XX_IDC_PF_7,
+       QLC_83XX_IDC_PF_8,
+       QLC_83XX_IDC_PF_9,
+       QLC_83XX_IDC_PF_10,
+       QLC_83XX_IDC_PF_11,
+       QLC_83XX_IDC_PF_12,
+       QLC_83XX_IDC_PF_13,
+       QLC_83XX_IDC_PF_14,
+       QLC_83XX_IDC_PF_15,
+       QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+       QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+       QLC_83XX_DRV_OP_MODE,
+       QLC_83XX_VNIC_STATE,
+       QLC_83XX_DRV_LOCK,
+       QLC_83XX_DRV_UNLOCK,
+       QLC_83XX_DRV_LOCK_ID,
+       QLC_83XX_ASIC_TEMP,
+};
+
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_LB_IDC            BIT_0
+#define QLC_REGISTER_DCB_AEN           BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO    BIT_29
+#define QLC_INIT_FW_RESOURCES          BIT_31
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+                             struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+                           struct qlcnic_host_tx_ring *);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+                              struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+                         struct qlcnic_info *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+                                struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+                         const struct pci_device_id *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+                                     u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *, int);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+                                   struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+                               struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+                              struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644 (file)
index 0000000..a496390
--- /dev/null
@@ -0,0 +1,2612 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE         0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR           0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION             0x0101
+
+#define QLC_83XX_OPCODE_NOP                    0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST             0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST                0x0002
+#define QLC_83XX_OPCODE_POLL_LIST              0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST                0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE      0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE              0x0020
+#define QLC_83XX_OPCODE_SEQ_END                        0x0040
+#define QLC_83XX_OPCODE_TMPL_END               0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST         0x0100
+
+/* EPORT control registers */
+#define QLC_83XX_RESET_CONTROL                 0x28084E50
+#define QLC_83XX_RESET_REG                     0x28084E60
+#define QLC_83XX_RESET_PORT0                   0x28084E70
+#define QLC_83XX_RESET_PORT1                   0x28084E80
+#define QLC_83XX_RESET_PORT2                   0x28084E90
+#define QLC_83XX_RESET_PORT3                   0x28084EA0
+#define QLC_83XX_RESET_SRESHIM                 0x28084EB0
+#define QLC_83XX_RESET_EPGSHIM                 0x28084EC0
+#define QLC_83XX_RESET_ETHERPCS                        0x28084ED0
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
+       u16     version;
+       u16     signature;
+       u16     size;
+       u16     entries;
+       u16     hdr_size;
+       u16     checksum;
+       u16     init_offset;
+       u16     start_offset;
+#elif defined(__BIG_ENDIAN)
+       u16     signature;
+       u16     version;
+       u16     entries;
+       u16     size;
+       u16     checksum;
+       u16     hdr_size;
+       u16     start_offset;
+       u16     init_offset;
+#endif
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+#if defined(__LITTLE_ENDIAN)
+       u16     cmd;
+       u16     size;
+       u16     count;
+       u16     delay;
+#elif defined(__BIG_ENDIAN)
+       u16     size;
+       u16     cmd;
+       u16     delay;
+       u16     count;
+#endif
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+       u32     mask;
+       u32     status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+       u32     mask;
+       u32     xor_value;
+       u32     or_value;
+#if defined(__LITTLE_ENDIAN)
+       u8      shl;
+       u8      shr;
+       u8      index_a;
+       u8      rsvd;
+#elif defined(__BIG_ENDIAN)
+       u8      rsvd;
+       u8      index_a;
+       u8      shr;
+       u8      shl;
+#endif
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+       u32 arg1;
+       u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+       u32 dr_addr;
+       u32 dr_value;
+       u32 ar_addr;
+       u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+       "Unknown",
+       "Cold",
+       "Init",
+       "Ready",
+       "Need Reset",
+       "Need Quiesce",
+       "Failed",
+       "Quiesce"
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+       if ((val & 0xFFFF))
+               return 1;
+       else
+               return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+       u32 cur, prev;
+       cur = adapter->ahw->idc.curr_state;
+       prev = adapter->ahw->idc.prev_state;
+
+       dev_info(&adapter->pdev->dev,
+                "current state  = %s,  prev state = %s\n",
+                adapter->ahw->idc.name[cur],
+                adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+                                           u8 mode, int lock)
+{
+       u32 val;
+       int seconds;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+       val |= (adapter->portnum & 0xf);
+       val |= mode << 7;
+       if (mode)
+               seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+       else
+               seconds = jiffies / HZ;
+
+       val |= seconds << 8;
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+       adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+       val = val & ~(0x3 << (adapter->portnum * 2));
+       val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+                                               int lock)
+{
+       u32 val;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+       val = val & ~0xFF;
+       val = val | QLC_83XX_IDC_MAJOR_VERSION;
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+                                       int status, int lock)
+{
+       u32 val;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+       if (status)
+               val = val | (1 << adapter->portnum);
+       else
+               val = val & ~(1 << adapter->portnum);
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+       qlcnic_83xx_idc_update_minor_version(adapter);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+       u8 version;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+       version = val & 0xFF;
+
+       if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+               dev_info(&adapter->pdev->dev,
+                        "%s:mismatch. version 0x%x, expected version 0x%x\n",
+                        __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+                                          int lock)
+{
+       u32 val;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+       /* Clear graceful reset bit */
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+                                             int flag, int lock)
+{
+       u32 val;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+       if (flag)
+               val = val | (1 << adapter->portnum);
+       else
+               val = val & ~(1 << adapter->portnum);
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+                                        int time_limit)
+{
+       u64 seconds;
+
+       seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+       if (seconds <= time_limit)
+               return 0;
+       else
+               return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+       int timeout;
+       u32 ack, presence, val;
+
+       timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+       ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+       presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+       dev_info(&adapter->pdev->dev,
+                "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+       if (!((ack & presence) == presence)) {
+               if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+                       /* Clear functions which failed to ACK */
+                       dev_info(&adapter->pdev->dev,
+                                "%s: ACK wait exceeds time limit\n", __func__);
+                       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+                       val = val & ~(ack ^ presence);
+                       if (qlcnic_83xx_lock_driver(adapter))
+                               return -EBUSY;
+                       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+                       dev_info(&adapter->pdev->dev,
+                                "%s: updated drv presence reg = 0x%x\n",
+                                __func__, val);
+                       qlcnic_83xx_unlock_driver(adapter);
+                       return 0;
+
+               } else {
+                       return 1;
+               }
+       } else {
+               dev_info(&adapter->pdev->dev,
+                        "%s: Reset ACK received from all functions\n",
+                        __func__);
+               return 0;
+       }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY  or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       netif_device_detach(netdev);
+       qlcnic_down(adapter, netdev);
+       qlcnic_up(adapter, netdev);
+       netif_device_attach(netdev);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
+
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+       int i;
+       struct net_device *netdev = adapter->netdev;
+
+       netif_device_detach(netdev);
+       qlcnic_83xx_detach_mailbox_work(adapter);
+
+       /* Disable mailbox interrupt */
+       qlcnic_83xx_disable_mbx_intr(adapter);
+       qlcnic_down(adapter, netdev);
+       for (i = 0; i < adapter->ahw->num_msix; i++) {
+               adapter->ahw->intr_tbl[i].id = i;
+               adapter->ahw->intr_tbl[i].enabled = 0;
+               adapter->ahw->intr_tbl[i].src = 0;
+       }
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_sriov_pf_reset(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (netif_running(netdev)) {
+               if (qlcnic_up(adapter, netdev))
+                       goto done;
+               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+done:
+       netif_device_attach(netdev);
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+                                             int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       qlcnic_83xx_idc_clear_registers(adapter, 0);
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       qlcnic_83xx_idc_log_state_history(adapter);
+       dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+                                           int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+                                             int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+              QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+              QLC_83XX_IDC_DEV_NEED_RESET);
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+                                            int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+       u32 reg, reg1, reg2, i, j, owner, class;
+
+       reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+       reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+       owner = QLCNIC_TYPE_NIC;
+       i = 0;
+       j = 0;
+       reg = reg1;
+
+       do {
+               class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+               if (class == owner)
+                       break;
+               if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+                       reg = reg2;
+                       j = 0;
+               } else {
+                       j++;
+               }
+
+               if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+                       if (owner == QLCNIC_TYPE_NIC)
+                               owner = QLCNIC_TYPE_ISCSI;
+                       else if (owner == QLCNIC_TYPE_ISCSI)
+                               owner = QLCNIC_TYPE_FCOE;
+                       else if (owner == QLCNIC_TYPE_FCOE)
+                               return -EIO;
+                       reg = reg1;
+                       j = 0;
+                       i = 0;
+               }
+       } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+       return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+       int ret = 0;
+
+       ret = qlcnic_83xx_restart_hw(adapter);
+
+       if (ret) {
+               qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+       } else {
+               qlcnic_83xx_idc_clear_registers(adapter, lock);
+               ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+       }
+
+       return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+       u32 status;
+
+       status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+       if (status & QLCNIC_RCODE_FATAL_ERROR) {
+               dev_err(&adapter->pdev->dev,
+                       "peg halt status1=0x%x\n", status);
+               if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+                       dev_err(&adapter->pdev->dev,
+                               "On board active cooling fan failed. "
+                               "Device has been halted.\n");
+                       dev_err(&adapter->pdev->dev,
+                               "Replace the adapter.\n");
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+       qlcnic_83xx_initialize_nic(adapter, 1);
+
+       err = qlcnic_sriov_pf_reinit(adapter);
+       if (err)
+               return err;
+
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+       if (qlcnic_83xx_configure_opmode(adapter)) {
+               qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+               return -EIO;
+       }
+
+       if (adapter->nic_ops->init_driver(adapter)) {
+               qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+               return -EIO;
+       }
+
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
+
+       qlcnic_dcb_get_info(adapter->dcb);
+       qlcnic_83xx_idc_attach_driver(adapter);
+
+       return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+       qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+       set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+       ahw->idc.quiesce_req = 0;
+       ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+       ahw->idc.err_code = 0;
+       ahw->idc.collect_dump = 0;
+       ahw->reset_context = 0;
+       adapter->tx_timeo_cnt = 0;
+       ahw->idc.delay_reset = 0;
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+               qlcnic_83xx_idc_update_idc_params(adapter);
+               /* Re-attach the device if required */
+               if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+                   (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+                       if (qlcnic_83xx_idc_reattach_driver(adapter))
+                               return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+       u32 state;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       /* Privileged function waits till mgmt function enables VNIC mode */
+       state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+       if (state != QLCNIC_DEV_NPAR_OPER) {
+               if (!ahw->idc.vnic_wait_limit--) {
+                       qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+                       return -EIO;
+               }
+               dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+               return -EIO;
+
+       } else {
+               /* Perform one time initialization from ready state */
+               if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+                       qlcnic_83xx_idc_update_idc_params(adapter);
+
+                       /* If the previous state is UNKNOWN, device will be
+                          already attached properly by Init routine*/
+                       if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+                               if (qlcnic_83xx_idc_reattach_driver(adapter))
+                                       return -EIO;
+                       }
+                       adapter->ahw->idc.vnic_state =  QLCNIC_DEV_NPAR_OPER;
+                       dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+               }
+       }
+
+       return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->idc.err_code = -EIO;
+       dev_err(&adapter->pdev->dev,
+               "%s: Device in unknown state\n", __func__);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+       qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+       qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+       if (qlcnic_load_fw_file) {
+               qlcnic_83xx_idc_restart_hw(adapter, 0);
+       } else {
+               if (qlcnic_83xx_check_hw_status(adapter)) {
+                       qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+                       return -EIO;
+               } else {
+                       qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+               }
+       }
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+       int timeout, ret = 0;
+       u32 owner;
+
+       timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+       if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+               owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+               if (adapter->ahw->pci_func == owner)
+                       ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+       } else {
+               ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+       }
+
+       return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       int ret = 0;
+       u32 val;
+
+       /* Perform NIC configuration based ready state entry actions */
+       if (ahw->idc.state_entry(adapter))
+               return -EIO;
+
+       if (qlcnic_check_temp(adapter)) {
+               if (ahw->temp == QLCNIC_TEMP_PANIC) {
+                       qlcnic_83xx_idc_check_fan_failure(adapter);
+                       dev_err(&adapter->pdev->dev,
+                               "Error: device temperature %d above limits\n",
+                               adapter->ahw->temp);
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+                       set_bit(__QLCNIC_RESETTING, &adapter->state);
+                       qlcnic_83xx_idc_detach_driver(adapter);
+                       qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+                       return -EIO;
+               }
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       ret = qlcnic_83xx_check_heartbeat(adapter);
+       if (ret) {
+               adapter->flags |= QLCNIC_FW_HANG;
+               if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+                       set_bit(__QLCNIC_RESETTING, &adapter->state);
+                       qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+               }  else {
+                       netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                                   __func__);
+                       qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+               }
+               return -EIO;
+       }
+
+       if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
+               /* Move to need reset state and prepare for reset */
+               qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+               return ret;
+       }
+
+       /* Check for soft reset request */
+       if (ahw->reset_context &&
+           !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+               adapter->ahw->reset_context = 0;
+               qlcnic_83xx_idc_tx_soft_reset(adapter);
+               return ret;
+       }
+
+       /* Move to need quiesce state if requested */
+       if (adapter->ahw->idc.quiesce_req) {
+               qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+               qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+               return ret;
+       }
+
+       return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ *     Reset request ACK's are received from all the functions
+ *     Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       int ret = 0;
+
+       if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+               qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+               set_bit(__QLCNIC_RESETTING, &adapter->state);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
+                       qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+               if (qlcnic_check_diag_status(adapter)) {
+                       dev_info(&adapter->pdev->dev,
+                                "%s: Wait for diag completion\n", __func__);
+                       adapter->ahw->idc.delay_reset = 1;
+                       return 0;
+               } else {
+                       qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+                       qlcnic_83xx_idc_detach_driver(adapter);
+               }
+       }
+
+       if (qlcnic_check_diag_status(adapter)) {
+               dev_info(&adapter->pdev->dev,
+                        "%s: Wait for diag completion\n", __func__);
+               return  -1;
+       } else {
+               if (adapter->ahw->idc.delay_reset) {
+                       qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+                       qlcnic_83xx_idc_detach_driver(adapter);
+                       adapter->ahw->idc.delay_reset = 0;
+               }
+
+               /* Check for ACK from other functions */
+               ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+               if (ret) {
+                       dev_info(&adapter->pdev->dev,
+                                "%s: Waiting for reset ACK\n", __func__);
+                       return -1;
+               }
+       }
+
+       /* Transit to INIT state and restart the HW */
+       qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+       return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+       dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+       return 0;
+}
+
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 val, owner;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+               if (ahw->pci_func == owner) {
+                       qlcnic_83xx_stop_hw(adapter);
+                       qlcnic_dump_fw(adapter);
+               }
+       }
+
+       netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+                   __func__);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       ahw->idc.err_code = -EIO;
+
+       return;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+       dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+       return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+                                               u32 state)
+{
+       u32 cur, prev, next;
+
+       cur = adapter->ahw->idc.curr_state;
+       prev = adapter->ahw->idc.prev_state;
+       next = state;
+
+       if ((next < QLC_83XX_IDC_DEV_COLD) ||
+           (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: curr %d, prev %d, next state %d is  invalid\n",
+                       __func__, cur, prev, state);
+               return 1;
+       }
+
+       if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+           (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+               if ((next != QLC_83XX_IDC_DEV_COLD) &&
+                   (next != QLC_83XX_IDC_DEV_READY)) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed, cur %d prev %d next %d\n",
+                               __func__, cur, prev, next);
+                       return 1;
+               }
+       }
+
+       if (next == QLC_83XX_IDC_DEV_INIT) {
+               if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+                   (prev != QLC_83XX_IDC_DEV_COLD) &&
+                   (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s: failed, cur %d prev %d next %d\n",
+                               __func__, cur, prev, next);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+#define QLC_83XX_ENCAP_TYPE_VXLAN      BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID                BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT   BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+       u16 port = adapter->ahw->vxlan_port;
+       struct qlcnic_cmd_args cmd;
+       int ret = 0;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_INIT_NIC_FUNC);
+       if (ret)
+               return ret;
+
+       cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+       cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+                        QLC_83XX_SET_VXLAN_UDP_DPORT |
+                        QLC_83XX_VXLAN_UDP_DPORT(port);
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Failed to set VXLAN port %d in adapter\n",
+                          port);
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+                                   bool state)
+{
+       u16 vxlan_port = adapter->ahw->vxlan_port;
+       struct qlcnic_cmd_args cmd;
+       int ret = 0;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_SET_INGRESS_ENCAP);
+       if (ret)
+               return ret;
+
+       cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+                                QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Failed to %s VXLAN parsing for port %d\n",
+                          state ? "enable" : "disable", vxlan_port);
+       else
+               netdev_info(adapter->netdev,
+                           "%s VXLAN parsing for port %d\n",
+                           state ? "Enabled" : "Disabled", vxlan_port);
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return ret;
+}
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+       if (adapter->fhash.fnum)
+               qlcnic_prune_lb_filters(adapter);
+
+       if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+               if (qlcnic_set_vxlan_port(adapter))
+                       return;
+
+               if (qlcnic_set_vxlan_parsing(adapter, true))
+                       return;
+
+               adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+       } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+               if (qlcnic_set_vxlan_parsing(adapter, false))
+                       return;
+
+               adapter->ahw->vxlan_port = 0;
+               adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+       }
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter;
+       u32 state;
+
+       adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+       state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+       if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+               qlcnic_83xx_idc_log_state_history(adapter);
+               adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+       } else {
+               adapter->ahw->idc.curr_state = state;
+       }
+
+       switch (adapter->ahw->idc.curr_state) {
+       case QLC_83XX_IDC_DEV_READY:
+               qlcnic_83xx_idc_ready_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_NEED_RESET:
+               qlcnic_83xx_idc_need_reset_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+               qlcnic_83xx_idc_need_quiesce_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_FAILED:
+               qlcnic_83xx_idc_failed_state(adapter);
+               return;
+       case QLC_83XX_IDC_DEV_INIT:
+               qlcnic_83xx_idc_init_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_QUISCENT:
+               qlcnic_83xx_idc_quiesce_state(adapter);
+               break;
+       default:
+               qlcnic_83xx_idc_unknown_state(adapter);
+               return;
+       }
+       adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+       qlcnic_83xx_periodic_tasks(adapter);
+
+       /* Re-schedule the function */
+       if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+               qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+                                    adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+       u32 idc_params, val;
+
+       if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+                                    (u8 *)&idc_params, 1)) {
+               dev_info(&adapter->pdev->dev,
+                        "%s:failed to get IDC params from flash\n", __func__);
+               adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+               adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+       } else {
+               adapter->dev_init_timeo = idc_params & 0xFFFF;
+               adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+       }
+
+       adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+       adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+       adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+       adapter->ahw->idc.err_code = 0;
+       adapter->ahw->idc.collect_dump = 0;
+       adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+       /* Check if reset recovery is disabled */
+       if (!qlcnic_auto_fw_reset) {
+               /* Propagate do not reset request to other functions */
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+       }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+       u32 state, val;
+
+       if (qlcnic_83xx_lock_driver(adapter))
+               return -EIO;
+
+       /* Clear driver lock register */
+       QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+       if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+               qlcnic_83xx_unlock_driver(adapter);
+               return -EIO;
+       }
+
+       state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+       if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+               qlcnic_83xx_unlock_driver(adapter);
+               return -EIO;
+       }
+
+       if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+                      QLC_83XX_IDC_DEV_COLD);
+               state = QLC_83XX_IDC_DEV_COLD;
+       }
+
+       adapter->ahw->idc.curr_state = state;
+       /* First to load function should cold boot the device */
+       if (state == QLC_83XX_IDC_DEV_COLD)
+               qlcnic_83xx_idc_cold_state_handler(adapter);
+
+       /* Check if reset recovery is enabled */
+       if (qlcnic_auto_fw_reset) {
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+       }
+
+       qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+       int ret = -EIO;
+
+       qlcnic_83xx_setup_idc_parameters(adapter);
+
+       if (qlcnic_83xx_get_reset_instruction_template(adapter))
+               return ret;
+
+       if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+               if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+                       return -EIO;
+       } else {
+               if (qlcnic_83xx_idc_check_major_version(adapter))
+                       return -EIO;
+       }
+
+       qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+       return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+       int id;
+       u32 val;
+
+       while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               usleep_range(10000, 11000);
+
+       id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+       id = id & 0xFF;
+
+       if (id == adapter->portnum) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: wait for lock recovery.. %d\n", __func__, id);
+               msleep(20);
+               id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+               id = id & 0xFF;
+       }
+
+       /* Clear driver presence bit */
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+       val = val & ~(1 << adapter->portnum);
+       QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+       clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+       u32 val;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return;
+
+       if (qlcnic_83xx_lock_driver(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s:failed, please retry\n", __func__);
+               return;
+       }
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                           __func__);
+               qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+               qlcnic_83xx_unlock_driver(adapter);
+               return;
+       }
+
+       if (key == QLCNIC_FORCE_FW_RESET) {
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+       } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+               adapter->ahw->idc.collect_dump = 1;
+       }
+
+       qlcnic_83xx_unlock_driver(adapter);
+       return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+       u8 *p_cache;
+       u32 src, size;
+       u64 dest;
+       int ret = -EIO;
+
+       src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+       dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+       size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+       /* alignment check */
+       if (size & 0xF)
+               size = (size + 16) & ~0xF;
+
+       p_cache = vzalloc(size);
+       if (p_cache == NULL)
+               return -ENOMEM;
+
+       ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+                                               size / sizeof(u32));
+       if (ret) {
+               vfree(p_cache);
+               return ret;
+       }
+       /* 16 byte write to MS memory */
+       ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+                                    size / 16);
+       if (ret) {
+               vfree(p_cache);
+               return ret;
+       }
+       vfree(p_cache);
+
+       return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+       const struct firmware *fw = fw_info->fw;
+       u32 dest, *p_cache, *temp;
+       int i, ret = -EIO;
+       __le32 *temp_le;
+       u8 data[16];
+       size_t size;
+       u64 addr;
+
+       temp = vzalloc(fw->size);
+       if (!temp) {
+               release_firmware(fw);
+               fw_info->fw = NULL;
+               return -ENOMEM;
+       }
+
+       temp_le = (__le32 *)fw->data;
+
+       /* FW image in file is in little endian, swap the data to nullify
+        * the effect of writel() operation on big endian platform.
+        */
+       for (i = 0; i < fw->size / sizeof(u32); i++)
+               temp[i] = __le32_to_cpu(temp_le[i]);
+
+       dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+       size = (fw->size & ~0xF);
+       p_cache = temp;
+       addr = (u64)dest;
+
+       ret = qlcnic_ms_mem_write128(adapter, addr,
+                                    p_cache, size / 16);
+       if (ret) {
+               dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+               goto exit;
+       }
+
+       /* alignment check */
+       if (fw->size & 0xF) {
+               addr = dest + size;
+               for (i = 0; i < (fw->size & 0xF); i++)
+                       data[i] = ((u8 *)temp)[size + i];
+               for (; i < 16; i++)
+                       data[i] = 0;
+               ret = qlcnic_ms_mem_write128(adapter, addr,
+                                            (u32 *)data, 1);
+               if (ret) {
+                       dev_err(&adapter->pdev->dev,
+                               "MS memory write failed\n");
+                       goto exit;
+               }
+       }
+
+exit:
+       release_firmware(fw);
+       fw_info->fw = NULL;
+       vfree(temp);
+
+       return ret;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+       int i, j;
+       u32 val = 0, val1 = 0, reg = 0;
+       int err = 0;
+
+       val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+       if (err == -EIO)
+               return;
+       dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+       for (j = 0; j < 2; j++) {
+               if (j == 0) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+                       reg = QLC_83XX_PORT0_THRESHOLD;
+               } else if (j == 1) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+                       reg = QLC_83XX_PORT1_THRESHOLD;
+               }
+               for (i = 0; i < 8; i++) {
+                       val = QLCRD32(adapter, reg + (i * 0x4), &err);
+                       if (err == -EIO)
+                               return;
+                       dev_info(&adapter->pdev->dev, "0x%x  ", val);
+               }
+               dev_info(&adapter->pdev->dev, "\n");
+       }
+
+       for (j = 0; j < 2; j++) {
+               if (j == 0) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 0 RxB TC Max Cell Registers[4..1]:");
+                       reg = QLC_83XX_PORT0_TC_MC_REG;
+               } else if (j == 1) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 1 RxB TC Max Cell Registers[4..1]:");
+                       reg = QLC_83XX_PORT1_TC_MC_REG;
+               }
+               for (i = 0; i < 4; i++) {
+                       val = QLCRD32(adapter, reg + (i * 0x4), &err);
+                       if (err == -EIO)
+                               return;
+                       dev_info(&adapter->pdev->dev, "0x%x  ", val);
+               }
+               dev_info(&adapter->pdev->dev, "\n");
+       }
+
+       for (j = 0; j < 2; j++) {
+               if (j == 0) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+                       reg = QLC_83XX_PORT0_TC_STATS;
+               } else if (j == 1) {
+                       dev_info(&adapter->pdev->dev,
+                                "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+                       reg = QLC_83XX_PORT1_TC_STATS;
+               }
+               for (i = 7; i >= 0; i--) {
+                       val = QLCRD32(adapter, reg, &err);
+                       if (err == -EIO)
+                               return;
+                       val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
+                       QLCWR32(adapter, reg, (val | (i << 29)));
+                       val = QLCRD32(adapter, reg, &err);
+                       if (err == -EIO)
+                               return;
+                       dev_info(&adapter->pdev->dev, "0x%x  ", val);
+               }
+               dev_info(&adapter->pdev->dev, "\n");
+       }
+
+       val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+       if (err == -EIO)
+               return;
+       val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+       if (err == -EIO)
+               return;
+       dev_info(&adapter->pdev->dev,
+                "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+                val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+       u32 reg = 0, i, j;
+
+       if (qlcnic_83xx_lock_driver(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s:failed to acquire driver lock\n", __func__);
+               return;
+       }
+
+       qlcnic_83xx_dump_pause_control_regs(adapter);
+       QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+       for (j = 0; j < 2; j++) {
+               if (j == 0)
+                       reg = QLC_83XX_PORT0_THRESHOLD;
+               else if (j == 1)
+                       reg = QLC_83XX_PORT1_THRESHOLD;
+
+               for (i = 0; i < 8; i++)
+                       QLCWR32(adapter, reg + (i * 0x4), 0x0);
+       }
+
+       for (j = 0; j < 2; j++) {
+               if (j == 0)
+                       reg = QLC_83XX_PORT0_TC_MC_REG;
+               else if (j == 1)
+                       reg = QLC_83XX_PORT1_TC_MC_REG;
+
+               for (i = 0; i < 4; i++)
+                       QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+       }
+
+       QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+       QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+       dev_info(&adapter->pdev->dev,
+                "Disabled pause frames successfully on all ports\n");
+       qlcnic_83xx_unlock_driver(adapter);
+}
+
+static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
+{
+       QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
+       QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+       u32 heartbeat, peg_status;
+       int retries, ret = -EIO, err = 0;
+
+       retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+       p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+                                              QLCNIC_PEG_ALIVE_COUNTER);
+
+       do {
+               msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+               heartbeat = QLC_SHARED_REG_RD32(p_dev,
+                                               QLCNIC_PEG_ALIVE_COUNTER);
+               if (heartbeat != p_dev->heartbeat) {
+                       ret = QLCNIC_RCODE_SUCCESS;
+                       break;
+               }
+       } while (--retries);
+
+       if (ret) {
+               dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+               qlcnic_83xx_take_eport_out_of_reset(p_dev);
+               qlcnic_83xx_disable_pause_frames(p_dev);
+               peg_status = QLC_SHARED_REG_RD32(p_dev,
+                                                QLCNIC_PEG_HALT_STATUS1);
+               dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+                        "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+                        "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+                        "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+                        "PEG_NET_4_PC: 0x%x\n", peg_status,
+                        QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
+
+               if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+                       dev_err(&p_dev->pdev->dev,
+                               "Device is being reset err code 0x00006700.\n");
+       }
+
+       return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+       int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+       u32 val;
+
+       do {
+               val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+               if (val == QLC_83XX_CMDPEG_COMPLETE)
+                       return 0;
+               msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+       } while (--retries);
+
+       dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+       return -EIO;
+}
+
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+       int err;
+
+       err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+       if (err)
+               return err;
+
+       err = qlcnic_83xx_check_heartbeat(p_dev);
+       if (err)
+               return err;
+
+       return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+                               int duration, u32 mask, u32 status)
+{
+       int timeout_error, err = 0;
+       u32 value;
+       u8 retries;
+
+       value = QLCRD32(p_dev, addr, &err);
+       if (err == -EIO)
+               return err;
+       retries = duration / 10;
+
+       do {
+               if ((value & mask) != status) {
+                       timeout_error = 1;
+                       msleep(duration / 10);
+                       value = QLCRD32(p_dev, addr, &err);
+                       if (err == -EIO)
+                               return err;
+               } else {
+                       timeout_error = 0;
+                       break;
+               }
+       } while (retries--);
+
+       if (timeout_error) {
+               p_dev->ahw->reset.seq_error++;
+               dev_err(&p_dev->pdev->dev,
+                       "%s: Timeout Err, entry_num = %d\n",
+                       __func__, p_dev->ahw->reset.seq_index);
+               dev_err(&p_dev->pdev->dev,
+                       "0x%08x 0x%08x 0x%08x\n",
+                       value, mask, status);
+       }
+
+       return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+       u32 sum = 0;
+       u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+       int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+       while (count-- > 0)
+               sum += *buff++;
+
+       while (sum >> 16)
+               sum = (sum & 0xFFFF) + (sum >> 16);
+
+       if (~sum) {
+               return 0;
+       } else {
+               dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+               return -1;
+       }
+}
+
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+       struct qlcnic_hardware_context *ahw = p_dev->ahw;
+       u32 addr, count, prev_ver, curr_ver;
+       u8 *p_buff;
+
+       if (ahw->reset.buff != NULL) {
+               prev_ver = p_dev->fw_version;
+               curr_ver = qlcnic_83xx_get_fw_version(p_dev);
+               if (curr_ver > prev_ver)
+                       kfree(ahw->reset.buff);
+               else
+                       return 0;
+       }
+
+       ahw->reset.seq_error = 0;
+       ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+       if (p_dev->ahw->reset.buff == NULL)
+               return -ENOMEM;
+
+       p_buff = p_dev->ahw->reset.buff;
+       addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+       count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+       /* Copy template header from flash */
+       if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+               dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+               return -EIO;
+       }
+       ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+       addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+       p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+       count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+       /* Copy rest of the template */
+       if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+               dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+               return -EIO;
+       }
+
+       if (qlcnic_83xx_reset_template_checksum(p_dev))
+               return -EIO;
+       /* Get Stop, Start and Init command offsets */
+       ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+       ahw->reset.start_offset = ahw->reset.buff +
+                                 ahw->reset.hdr->start_offset;
+       ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+       return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+                                          u32 raddr, u32 waddr)
+{
+       int err = 0;
+       u32 value;
+
+       value = QLCRD32(p_dev, raddr, &err);
+       if (err == -EIO)
+               return;
+       qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+                                   u32 raddr, u32 waddr,
+                                   struct qlc_83xx_rmw *p_rmw_hdr)
+{
+       int err = 0;
+       u32 value;
+
+       if (p_rmw_hdr->index_a) {
+               value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+       } else {
+               value = QLCRD32(p_dev, raddr, &err);
+               if (err == -EIO)
+                       return;
+       }
+
+       value &= p_rmw_hdr->mask;
+       value <<= p_rmw_hdr->shl;
+       value >>= p_rmw_hdr->shr;
+       value |= p_rmw_hdr->or_value;
+       value ^= p_rmw_hdr->xor_value;
+       qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+                                  struct qlc_83xx_entry_hdr *p_hdr)
+{
+       int i;
+       struct qlc_83xx_entry *entry;
+
+       entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+                                         sizeof(struct qlc_83xx_entry_hdr));
+
+       for (i = 0; i < p_hdr->count; i++, entry++) {
+               qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+                                            entry->arg2);
+               if (p_hdr->delay)
+                       udelay((u32)(p_hdr->delay));
+       }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+                                       struct qlc_83xx_entry_hdr *p_hdr)
+{
+       int i;
+       struct qlc_83xx_entry *entry;
+
+       entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+                                         sizeof(struct qlc_83xx_entry_hdr));
+
+       for (i = 0; i < p_hdr->count; i++, entry++) {
+               qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+                                              entry->arg2);
+               if (p_hdr->delay)
+                       udelay((u32)(p_hdr->delay));
+       }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+                                 struct qlc_83xx_entry_hdr *p_hdr)
+{
+       long delay;
+       struct qlc_83xx_entry *entry;
+       struct qlc_83xx_poll *poll;
+       int i, err = 0;
+       unsigned long arg1, arg2;
+
+       poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+                                       sizeof(struct qlc_83xx_entry_hdr));
+
+       entry = (struct qlc_83xx_entry *)((char *)poll +
+                                         sizeof(struct qlc_83xx_poll));
+       delay = (long)p_hdr->delay;
+
+       if (!delay) {
+               for (i = 0; i < p_hdr->count; i++, entry++)
+                       qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+                                            delay, poll->mask,
+                                            poll->status);
+       } else {
+               for (i = 0; i < p_hdr->count; i++, entry++) {
+                       arg1 = entry->arg1;
+                       arg2 = entry->arg2;
+                       if (delay) {
+                               if (qlcnic_83xx_poll_reg(p_dev,
+                                                        arg1, delay,
+                                                        poll->mask,
+                                                        poll->status)){
+                                       QLCRD32(p_dev, arg1, &err);
+                                       if (err == -EIO)
+                                               return;
+                                       QLCRD32(p_dev, arg2, &err);
+                                       if (err == -EIO)
+                                               return;
+                               }
+                       }
+               }
+       }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+                                       struct qlc_83xx_entry_hdr *p_hdr)
+{
+       int i;
+       long delay;
+       struct qlc_83xx_quad_entry *entry;
+       struct qlc_83xx_poll *poll;
+
+       poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+                                       sizeof(struct qlc_83xx_entry_hdr));
+       entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+                                              sizeof(struct qlc_83xx_poll));
+       delay = (long)p_hdr->delay;
+
+       for (i = 0; i < p_hdr->count; i++, entry++) {
+               qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+                                            entry->dr_value);
+               qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+                                            entry->ar_value);
+               if (delay)
+                       qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+                                            poll->mask, poll->status);
+       }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+                                         struct qlc_83xx_entry_hdr *p_hdr)
+{
+       int i;
+       struct qlc_83xx_entry *entry;
+       struct qlc_83xx_rmw *rmw_hdr;
+
+       rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+                                         sizeof(struct qlc_83xx_entry_hdr));
+
+       entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+                                         sizeof(struct qlc_83xx_rmw));
+
+       for (i = 0; i < p_hdr->count; i++, entry++) {
+               qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+                                       entry->arg2, rmw_hdr);
+               if (p_hdr->delay)
+                       udelay((u32)(p_hdr->delay));
+       }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+       if (p_hdr->delay)
+               mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+                                      struct qlc_83xx_entry_hdr *p_hdr)
+{
+       long delay;
+       int index, i, j, err;
+       struct qlc_83xx_quad_entry *entry;
+       struct qlc_83xx_poll *poll;
+       unsigned long addr;
+
+       poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+                                       sizeof(struct qlc_83xx_entry_hdr));
+
+       entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+                                              sizeof(struct qlc_83xx_poll));
+       delay = (long)p_hdr->delay;
+
+       for (i = 0; i < p_hdr->count; i++, entry++) {
+               qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+                                            entry->ar_value);
+               if (delay) {
+                       if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+                                                 poll->mask, poll->status)){
+                               index = p_dev->ahw->reset.array_index;
+                               addr = entry->dr_addr;
+                               j = QLCRD32(p_dev, addr, &err);
+                               if (err == -EIO)
+                                       return;
+
+                               p_dev->ahw->reset.array[index++] = j;
+
+                               if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+                                       p_dev->ahw->reset.array_index = 1;
+                       }
+               }
+       }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+       p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+       p_dev->ahw->reset.template_end = 1;
+       if (p_dev->ahw->reset.seq_error == 0)
+               dev_err(&p_dev->pdev->dev,
+                       "HW restart process completed successfully.\n");
+       else
+               dev_err(&p_dev->pdev->dev,
+                       "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+                                         char *p_buff)
+{
+       int index, entries;
+       struct qlc_83xx_entry_hdr *p_hdr;
+       char *entry = p_buff;
+
+       p_dev->ahw->reset.seq_end = 0;
+       p_dev->ahw->reset.template_end = 0;
+       entries = p_dev->ahw->reset.hdr->entries;
+       index = p_dev->ahw->reset.seq_index;
+
+       for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+               p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+               switch (p_hdr->cmd) {
+               case QLC_83XX_OPCODE_NOP:
+                       break;
+               case QLC_83XX_OPCODE_WRITE_LIST:
+                       qlcnic_83xx_write_list(p_dev, p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_READ_WRITE_LIST:
+                       qlcnic_83xx_read_write_list(p_dev, p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_POLL_LIST:
+                       qlcnic_83xx_poll_list(p_dev, p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+                       qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+                       qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_SEQ_PAUSE:
+                       qlcnic_83xx_pause(p_hdr);
+                       break;
+               case QLC_83XX_OPCODE_SEQ_END:
+                       qlcnic_83xx_seq_end(p_dev);
+                       break;
+               case QLC_83XX_OPCODE_TMPL_END:
+                       qlcnic_83xx_template_end(p_dev);
+                       break;
+               case QLC_83XX_OPCODE_POLL_READ_LIST:
+                       qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+                       break;
+               default:
+                       dev_err(&p_dev->pdev->dev,
+                               "%s: Unknown opcode 0x%04x in template %d\n",
+                               __func__, p_hdr->cmd, index);
+                       break;
+               }
+               entry += p_hdr->size;
+       }
+       p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+       p_dev->ahw->reset.seq_index = 0;
+
+       qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+       if (p_dev->ahw->reset.seq_end != 1)
+               dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+       qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+       if (p_dev->ahw->reset.template_end != 1)
+               dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+       qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+       if (p_dev->ahw->reset.seq_end != 1)
+               dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+/* POST FW related definations*/
+#define QLC_83XX_POST_SIGNATURE_REG    0x41602014
+#define QLC_83XX_POST_MODE_REG         0x41602018
+#define QLC_83XX_POST_FAST_MODE                0
+#define QLC_83XX_POST_MEDIUM_MODE      1
+#define QLC_83XX_POST_SLOW_MODE                2
+
+/* POST Timeout values in milliseconds */
+#define QLC_83XX_POST_FAST_MODE_TIMEOUT        690
+#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930
+#define QLC_83XX_POST_SLOW_MODE_TIMEOUT        7500
+
+/* POST result values */
+#define QLC_83XX_POST_PASS                     0xfffffff0
+#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL    0xffffffff
+#define QLC_83XX_POST_DDR_TEST_FAIL            0xfffffffe
+#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL    0xfffffffc
+#define QLC_83XX_POST_FLASH_TEST_FAIL          0xfffffff8
+
+static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+       struct device *dev = &adapter->pdev->dev;
+       int timeout, count, ret = 0;
+       u32 signature;
+
+       /* Set timeout values with extra 2 seconds of buffer */
+       switch (adapter->ahw->post_mode) {
+       case QLC_83XX_POST_FAST_MODE:
+               timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000;
+               break;
+       case QLC_83XX_POST_MEDIUM_MODE:
+               timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000;
+               break;
+       case QLC_83XX_POST_SLOW_MODE:
+               timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+               QLC_FW_FILE_NAME_LEN);
+
+       ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
+       if (ret) {
+               dev_err(dev, "POST firmware can not be loaded, skipping POST\n");
+               return 0;
+       }
+
+       ret = qlcnic_83xx_copy_fw_file(adapter);
+       if (ret)
+               return ret;
+
+       /* clear QLC_83XX_POST_SIGNATURE_REG register */
+       qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0);
+
+       /* Set POST mode */
+       qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG,
+                     adapter->ahw->post_mode);
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+                           QLC_83XX_BOOT_FROM_FILE);
+
+       qlcnic_83xx_start_hw(adapter);
+
+       count = 0;
+       do {
+               msleep(100);
+               count += 100;
+
+               signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG);
+               if (signature == QLC_83XX_POST_PASS)
+                       break;
+       } while (timeout > count);
+
+       if (timeout <= count) {
+               dev_err(dev, "POST timed out, signature = 0x%08x\n", signature);
+               return -EIO;
+       }
+
+       switch (signature) {
+       case QLC_83XX_POST_PASS:
+               dev_info(dev, "POST passed, Signature = 0x%08x\n", signature);
+               break;
+       case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL:
+               dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n",
+                       signature);
+               ret = -EIO;
+               break;
+       case QLC_83XX_POST_DDR_TEST_FAIL:
+               dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n",
+                       signature);
+               ret = -EIO;
+               break;
+       case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL:
+               dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n",
+                       signature);
+               ret = -EIO;
+               break;
+       case QLC_83XX_POST_FLASH_TEST_FAIL:
+               dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n",
+                       signature);
+               ret = -EIO;
+               break;
+       default:
+               dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n",
+                       signature);
+               ret = -EIO;
+               break;
+       }
+
+       return ret;
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+       int err = -EIO;
+
+       if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+                            &(adapter->pdev->dev))) {
+               dev_err(&adapter->pdev->dev,
+                       "No file FW image, loading flash FW image.\n");
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+                                   QLC_83XX_BOOT_FROM_FLASH);
+       } else {
+               if (qlcnic_83xx_copy_fw_file(adapter))
+                       return err;
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+                                   QLC_83XX_BOOT_FROM_FILE);
+       }
+
+       return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+       int err = -EIO;
+
+       qlcnic_83xx_stop_hw(adapter);
+
+       /* Collect FW register dump if required */
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+               qlcnic_dump_fw(adapter);
+
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                           __func__);
+               qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+               return err;
+       }
+
+       qlcnic_83xx_init_hw(adapter);
+
+       if (qlcnic_83xx_copy_bootloader(adapter))
+               return err;
+
+       /* Check if POST needs to be run */
+       if (adapter->ahw->run_post) {
+               err = qlcnic_83xx_run_post(adapter);
+               if (err)
+                       return err;
+
+               /* No need to run POST in next reset sequence */
+               adapter->ahw->run_post = false;
+
+               /* Again reset the adapter to load regular firmware  */
+               qlcnic_83xx_stop_hw(adapter);
+               qlcnic_83xx_init_hw(adapter);
+
+               err = qlcnic_83xx_copy_bootloader(adapter);
+               if (err)
+                       return err;
+       }
+
+       /* Boot either flash image or firmware image from host file system */
+       if (qlcnic_load_fw_file == 1) {
+               if (qlcnic_83xx_load_fw_image_from_host(adapter))
+                       return err;
+       } else {
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+                                   QLC_83XX_BOOT_FROM_FLASH);
+       }
+
+       qlcnic_83xx_start_hw(adapter);
+       if (qlcnic_83xx_check_hw_status(adapter))
+               return -EIO;
+
+       return 0;
+}
+
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+       int err;
+       struct qlcnic_info nic_info;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       memset(&nic_info, 0, sizeof(struct qlcnic_info));
+       err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+       if (err)
+               return -EIO;
+
+       ahw->physical_port = (u8) nic_info.phys_port;
+       ahw->switch_mode = nic_info.switch_mode;
+       ahw->max_tx_ques = nic_info.max_tx_ques;
+       ahw->max_rx_ques = nic_info.max_rx_ques;
+       ahw->capabilities = nic_info.capabilities;
+       ahw->max_mac_filters = nic_info.max_mac_filters;
+       ahw->max_mtu = nic_info.max_mtu;
+
+       /* eSwitch capability indicates vNIC mode.
+        * vNIC and SRIOV are mutually exclusive operational modes.
+        * If SR-IOV capability is detected, SR-IOV physical function
+        * will get initialized in default mode.
+        * SR-IOV virtual function initialization follows a
+        * different code path and opmode.
+        * SRIOV mode has precedence over vNIC mode.
+        */
+       if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+               return QLC_83XX_DEFAULT_OPMODE;
+
+       if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+               return QLCNIC_VNIC_MODE;
+
+       return QLC_83XX_DEFAULT_OPMODE;
+}
+
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u16 max_sds_rings, max_tx_rings;
+       int ret;
+
+       ret = qlcnic_83xx_get_nic_configuration(adapter);
+       if (ret == -EIO)
+               return -EIO;
+
+       if (ret == QLCNIC_VNIC_MODE) {
+               ahw->nic_mode = QLCNIC_VNIC_MODE;
+
+               if (qlcnic_83xx_config_vnic_opmode(adapter))
+                       return -EIO;
+
+               max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+               max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+       } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+               ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+               adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+               max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+               max_tx_rings = QLCNIC_MAX_TX_RINGS;
+       } else {
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+                       __func__, ret);
+               return -EIO;
+       }
+
+       adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+       adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
+       return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (ahw->port_type == QLCNIC_XGBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+       } else if (ahw->port_type == QLCNIC_GBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+       }
+       adapter->num_txd = MAX_CMD_DESCRIPTORS;
+       adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+       int err = -EIO;
+
+       qlcnic_83xx_get_minidump_template(adapter);
+       if (qlcnic_83xx_get_port_info(adapter))
+               return err;
+
+       qlcnic_83xx_config_buff_descriptors(adapter);
+       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+       dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+                adapter->ahw->fw_hal_version);
+
+       return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       u32 presence_mask, audit_mask;
+       int status;
+
+       presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+       audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+       if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_STOP_NIC_FUNC);
+               if (status)
+                       return;
+
+               cmd.req.arg[1] = BIT_31;
+               status = qlcnic_issue_cmd(adapter, &cmd);
+               if (status)
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to clean up the function resources\n");
+               qlcnic_free_mbx_args(&cmd);
+       }
+}
+
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct pci_dev *pdev = adapter->pdev;
+       struct qlc_83xx_fw_info *fw_info;
+       int err = 0;
+
+       ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+       if (!ahw->fw_info) {
+               err = -ENOMEM;
+       } else {
+               fw_info = ahw->fw_info;
+               switch (pdev->device) {
+               case PCI_DEVICE_ID_QLOGIC_QLE834X:
+               case PCI_DEVICE_ID_QLOGIC_QLE8830:
+                       strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+                               QLC_FW_FILE_NAME_LEN);
+                       break;
+               case PCI_DEVICE_ID_QLOGIC_QLE844X:
+                       strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+                               QLC_FW_FILE_NAME_LEN);
+                       break;
+               default:
+                       dev_err(&pdev->dev, "%s: Invalid device id\n",
+                               __func__);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+
+       return err;
+}
+
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+       u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+       u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+       adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+       adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+       if (!adapter->ahw->msix_supported) {
+               rx_cnt = QLCNIC_SINGLE_RING;
+               tx_cnt = QLCNIC_SINGLE_RING;
+       }
+
+       /* compute and set drv sds rings */
+       qlcnic_set_tx_ring_count(adapter, tx_cnt);
+       qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err = 0;
+
+       adapter->rx_mac_learn = false;
+       ahw->msix_supported = !!qlcnic_use_msi_x;
+
+       /* Check if POST needs to be run */
+       switch (qlcnic_load_fw_file) {
+       case 2:
+               ahw->post_mode = QLC_83XX_POST_FAST_MODE;
+               ahw->run_post = true;
+               break;
+       case 3:
+               ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE;
+               ahw->run_post = true;
+               break;
+       case 4:
+               ahw->post_mode = QLC_83XX_POST_SLOW_MODE;
+               ahw->run_post = true;
+               break;
+       default:
+               ahw->run_post = false;
+               break;
+       }
+
+       qlcnic_83xx_init_rings(adapter);
+
+       err = qlcnic_83xx_init_mailbox_work(adapter);
+       if (err)
+               goto exit;
+
+       if (qlcnic_sriov_vf_check(adapter)) {
+               err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+               if (err)
+                       goto detach_mbx;
+               else
+                       return err;
+       }
+
+       if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+           qlcnic_83xx_read_flash_mfg_id(adapter)) {
+               dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+               err = -ENOTRECOVERABLE;
+               goto detach_mbx;
+       }
+
+       err = qlcnic_83xx_check_hw_status(adapter);
+       if (err)
+               goto detach_mbx;
+
+       err = qlcnic_83xx_get_fw_info(adapter);
+       if (err)
+               goto detach_mbx;
+
+       err = qlcnic_83xx_idc_init(adapter);
+       if (err)
+               goto detach_mbx;
+
+       err = qlcnic_setup_intr(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+               goto disable_intr;
+       }
+
+       INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+       err = qlcnic_83xx_setup_mbx_intr(adapter);
+       if (err)
+               goto disable_mbx_intr;
+
+       qlcnic_83xx_clear_function_resources(adapter);
+       qlcnic_dcb_enable(adapter->dcb);
+       qlcnic_83xx_initialize_nic(adapter, 1);
+       qlcnic_dcb_get_info(adapter->dcb);
+
+       /* Configure default, SR-IOV or Virtual NIC mode of operation */
+       err = qlcnic_83xx_configure_opmode(adapter);
+       if (err)
+               goto disable_mbx_intr;
+
+
+       /* Perform operating mode specific initialization */
+       err = adapter->nic_ops->init_driver(adapter);
+       if (err)
+               goto disable_mbx_intr;
+
+       /* Periodically monitor device status */
+       qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+       return 0;
+
+disable_mbx_intr:
+       qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+       qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+       qlcnic_83xx_detach_mailbox_work(adapter);
+       qlcnic_83xx_free_mailbox(ahw->mailbox);
+       ahw->mailbox = NULL;
+exit:
+       return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+
+       clear_bit(QLC_83XX_MBX_READY, &idc->status);
+       cancel_delayed_work_sync(&adapter->fw_work);
+
+       if (ahw->nic_mode == QLCNIC_VNIC_MODE)
+               qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+       qlcnic_83xx_idc_detach_driver(adapter);
+       qlcnic_83xx_initialize_nic(adapter, 0);
+
+       cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       int ret = 0;
+       u32 owner;
+
+       /* Mark the previous IDC state as NEED_RESET so
+        * that state_entry() will perform the reattachment
+        * and bringup the device
+        */
+       idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+       owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+       if (ahw->pci_func == owner) {
+               ret = qlcnic_83xx_restart_hw(adapter);
+               if (ret < 0)
+                       return ret;
+               qlcnic_83xx_idc_clear_registers(adapter, 0);
+       }
+
+       ret = idc->state_entry(adapter);
+       return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       u32 owner;
+
+       idc->prev_state = QLC_83XX_IDC_DEV_READY;
+       owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+       if (ahw->pci_func == owner)
+               qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+
+       qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644 (file)
index 0000000..3490675
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+       QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (lock) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+       }
+
+       QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+       ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+       if (lock)
+               qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+       u8 id;
+       int ret = -EBUSY;
+       u32 data = QLCNIC_MGMT_FUNC;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (qlcnic_83xx_lock_driver(adapter))
+               return ret;
+
+       id = ahw->pci_func;
+       data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+       data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
+              QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
+
+       QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+       qlcnic_83xx_unlock_driver(adapter);
+
+       return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (ahw->port_type == QLCNIC_XGBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+       } else if (ahw->port_type == QLCNIC_GBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+       }
+       adapter->num_txd = MAX_CMD_DESCRIPTORS;
+       adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_npar_info *npar;
+       int i, err = -EIO;
+
+       qlcnic_83xx_get_minidump_template(adapter);
+
+       if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+               if (qlcnic_init_pci_info(adapter))
+                       return err;
+
+               npar = adapter->npars;
+
+               for (i = 0; i < ahw->total_nic_func; i++, npar++) {
+                       dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+                                npar->pci_func, npar->active, npar->type,
+                                npar->phy_port, npar->min_bw, npar->max_bw,
+                                npar->mac);
+               }
+
+               dev_info(dev, "Max functions = %d, active functions = %d\n",
+                        ahw->max_pci_func, ahw->total_nic_func);
+
+               if (qlcnic_83xx_set_vnic_opmode(adapter))
+                       return err;
+
+               if (qlcnic_set_default_offload_settings(adapter))
+                       return err;
+       } else {
+               if (qlcnic_reset_npar_config(adapter))
+                       return err;
+       }
+
+       if (qlcnic_83xx_get_port_info(adapter))
+               return err;
+
+       qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+       ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+       qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+       dev_info(dev, "HAL Version: %d, Management function\n",
+                ahw->fw_hal_version);
+
+       return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+       int err = -EIO;
+
+       qlcnic_83xx_get_minidump_template(adapter);
+       if (qlcnic_83xx_get_port_info(adapter))
+               return err;
+
+       qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+       dev_info(&adapter->pdev->dev,
+                "HAL Version: %d, Privileged function\n",
+                adapter->ahw->fw_hal_version);
+       return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+       int err = -EIO;
+
+       qlcnic_83xx_get_fw_version(adapter);
+       if (qlcnic_set_eswitch_port_config(adapter))
+               return err;
+
+       if (qlcnic_83xx_get_port_info(adapter))
+               return err;
+
+       qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+       dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+                adapter->ahw->fw_hal_version);
+
+       return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+       u32 op_mode, priv_level;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+       qlcnic_get_func_no(adapter);
+       op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+       if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+               priv_level = QLCNIC_MGMT_FUNC;
+       else
+               priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+                                                        ahw->pci_func);
+       switch (priv_level) {
+       case QLCNIC_NON_PRIV_FUNC:
+               ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+               nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+               break;
+       case QLCNIC_PRIV_FUNC:
+               ahw->op_mode = QLCNIC_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+               nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+               break;
+       case QLCNIC_MGMT_FUNC:
+               ahw->op_mode = QLCNIC_MGMT_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+               nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+               break;
+       default:
+               dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+               return -EIO;
+       }
+
+       if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
+               adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+               if (adapter->drv_mac_learn)
+                       adapter->rx_mac_learn = true;
+       } else {
+               adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+               adapter->rx_mac_learn = false;
+       }
+
+       ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+       ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+       return 0;
+}
+
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       u32 state;
+
+       state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+       while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
+               idc->vnic_wait_limit--;
+               msleep(1000);
+               state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+       }
+
+       if (state != QLCNIC_DEV_NPAR_OPER) {
+               dev_err(&adapter->pdev->dev,
+                       "vNIC mode not operational, state check timed out.\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+                                       int func, int *port_id)
+{
+       struct qlcnic_info nic_info;
+       int err = 0;
+
+       memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+       err = qlcnic_get_nic_info(adapter, &nic_info, func);
+       if (err)
+               return err;
+
+       if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+               *port_id = nic_info.phys_port;
+       else
+               err = -EIO;
+
+       if (!err)
+               adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
+
+       return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
new file mode 100644 (file)
index 0000000..daf0515
--- /dev/null
@@ -0,0 +1,1428 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+       {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+       {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+       {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+       {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
+       {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+       {QLCNIC_CMD_SET_MTU, 4, 1},
+       {QLCNIC_CMD_READ_PHY, 4, 2},
+       {QLCNIC_CMD_WRITE_PHY, 5, 1},
+       {QLCNIC_CMD_READ_HW_REG, 4, 1},
+       {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+       {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+       {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+       {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+       {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+       {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+       {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+       {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+       {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+       {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+       {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+       {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+       {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+       {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+       {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+       {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
+       {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+       {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+       {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+       {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
+       {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+       {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+       {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+       {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+       return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+              (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+                              struct qlcnic_adapter *adapter, u32 type)
+{
+       int i, size;
+       const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+       mbx_tbl = qlcnic_mbx_tbl;
+       size = ARRAY_SIZE(qlcnic_mbx_tbl);
+       for (i = 0; i < size; i++) {
+               if (type == mbx_tbl[i].cmd) {
+                       mbx->req.num = mbx_tbl[i].in_args;
+                       mbx->rsp.num = mbx_tbl[i].out_args;
+                       mbx->req.arg = kcalloc(mbx->req.num,
+                                              sizeof(u32), GFP_ATOMIC);
+                       if (!mbx->req.arg)
+                               return -ENOMEM;
+                       mbx->rsp.arg = kcalloc(mbx->rsp.num,
+                                              sizeof(u32), GFP_ATOMIC);
+                       if (!mbx->rsp.arg) {
+                               kfree(mbx->req.arg);
+                               mbx->req.arg = NULL;
+                               return -ENOMEM;
+                       }
+                       mbx->req.arg[0] = type;
+                       break;
+               }
+       }
+       return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+       kfree(cmd->req.arg);
+       cmd->req.arg = NULL;
+       kfree(cmd->rsp.arg);
+       cmd->rsp.arg = NULL;
+}
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+       u32 rsp;
+       int timeout = 0, err = 0;
+
+       do {
+               /* give atleast 1ms for firmware to respond */
+               mdelay(1);
+
+               if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+                       return QLCNIC_CDRP_RSP_TIMEOUT;
+
+               rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
+       } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+       return rsp;
+}
+
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+                         struct qlcnic_cmd_args *cmd)
+{
+       int i, err = 0;
+       u32 rsp;
+       u32 signature;
+       struct pci_dev *pdev = adapter->pdev;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       const char *fmt;
+
+       signature = qlcnic_get_cmd_signature(ahw);
+
+       /* Acquire semaphore before accessing CRB */
+       if (qlcnic_api_lock(adapter)) {
+               cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+               return cmd->rsp.arg[0];
+       }
+
+       QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+       for (i = 1; i < cmd->req.num; i++)
+               QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
+       QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
+               QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
+       rsp = qlcnic_poll_rsp(adapter);
+
+       if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+               dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
+               cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+       } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+               cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
+               switch (cmd->rsp.arg[0]) {
+               case QLCNIC_RCODE_INVALID_ARGS:
+                       fmt = "CDRP invalid args: [%d]\n";
+                       break;
+               case QLCNIC_RCODE_NOT_SUPPORTED:
+               case QLCNIC_RCODE_NOT_IMPL:
+                       fmt = "CDRP command not supported: [%d]\n";
+                       break;
+               case QLCNIC_RCODE_NOT_PERMITTED:
+                       fmt = "CDRP requested action not permitted: [%d]\n";
+                       break;
+               case QLCNIC_RCODE_INVALID:
+                       fmt = "CDRP invalid or unknown cmd received: [%d]\n";
+                       break;
+               case QLCNIC_RCODE_TIMEOUT:
+                       fmt = "CDRP command timeout: [%d]\n";
+                       break;
+               default:
+                       fmt = "CDRP command failed: [%d]\n";
+                       break;
+               }
+               dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+               qlcnic_dump_mbx(adapter, cmd);
+       } else if (rsp == QLCNIC_CDRP_RSP_OK)
+               cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+       for (i = 1; i < cmd->rsp.num; i++)
+               cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
+
+       /* Release semaphore */
+       qlcnic_api_unlock(adapter);
+       return cmd->rsp.arg[0];
+}
+
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
+{
+       struct qlcnic_cmd_args cmd;
+       u32 arg1, arg2, arg3;
+       char drv_string[12];
+       int err = 0;
+
+       memset(drv_string, 0, sizeof(drv_string));
+       snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+                _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+                _QLCNIC_LINUX_SUBVERSION);
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
+       if (err)
+               return err;
+
+       memcpy(&arg1, drv_string, sizeof(u32));
+       memcpy(&arg2, drv_string + 4, sizeof(u32));
+       memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+       cmd.req.arg[1] = arg1;
+       cmd.req.arg[2] = arg2;
+       cmd.req.arg[3] = arg3;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_info(&adapter->pdev->dev,
+                        "Failed to set driver version in firmware\n");
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+       int err = 0;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+               return err;
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = recv_ctx->context_id;
+       cmd.req.arg[2] = mtu;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+       struct net_device *netdev = adapter->netdev;
+       u32 temp_intr_crb_mode, temp_rds_crb_mode;
+       struct qlcnic_cardrsp_rds_ring *prsp_rds;
+       struct qlcnic_cardrsp_sds_ring *prsp_sds;
+       struct qlcnic_hostrq_rds_ring *prq_rds;
+       struct qlcnic_hostrq_sds_ring *prq_sds;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_cardrsp_rx_ctx *prsp;
+       struct qlcnic_hostrq_rx_ctx *prq;
+       u8 i, nrds_rings, nsds_rings;
+       struct qlcnic_cmd_args cmd;
+       size_t rq_size, rsp_size;
+       u32 cap, reg, val, reg2;
+       u64 phys_addr;
+       u16 temp_u16;
+       void *addr;
+       int err;
+
+       nrds_rings = adapter->max_rds_rings;
+       nsds_rings = adapter->drv_sds_rings;
+
+       rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+                                  nsds_rings);
+       rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+                                    nsds_rings);
+
+       addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+                                 &hostrq_phys_addr, GFP_KERNEL);
+       if (addr == NULL)
+               return -ENOMEM;
+       prq = addr;
+
+       addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+                       &cardrsp_phys_addr, GFP_KERNEL);
+       if (addr == NULL) {
+               err = -ENOMEM;
+               goto out_free_rq;
+       }
+       prsp = addr;
+
+       prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+       cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+                                               | QLCNIC_CAP0_VALIDOFF);
+       cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test) {
+               cap |= QLCNIC_CAP0_TX_MULTI;
+       } else {
+               temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+               prq->valid_field_offset = cpu_to_le16(temp_u16);
+               prq->txrx_sds_binding = nsds_rings - 1;
+               temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+               prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+               temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+               prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+       }
+
+       prq->capabilities[0] = cpu_to_le32(cap);
+
+       prq->num_rds_rings = cpu_to_le16(nrds_rings);
+       prq->num_sds_rings = cpu_to_le16(nsds_rings);
+       prq->rds_ring_offset = 0;
+
+       val = le32_to_cpu(prq->rds_ring_offset) +
+               (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+       prq->sds_ring_offset = cpu_to_le32(val);
+
+       prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+                       le32_to_cpu(prq->rds_ring_offset));
+
+       for (i = 0; i < nrds_rings; i++) {
+               rds_ring = &recv_ctx->rds_rings[i];
+               rds_ring->producer = 0;
+               prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+               prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+               prq_rds[i].ring_kind = cpu_to_le32(i);
+               prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+       }
+
+       prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+                       le32_to_cpu(prq->sds_ring_offset));
+
+       for (i = 0; i < nsds_rings; i++) {
+               sds_ring = &recv_ctx->sds_rings[i];
+               sds_ring->consumer = 0;
+               memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
+               prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+               prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test)
+                       prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+               else
+                       prq_sds[i].msi_index = cpu_to_le16(i);
+       }
+
+       phys_addr = hostrq_phys_addr;
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+       if (err)
+               goto out_free_rsp;
+
+       cmd.req.arg[1] = MSD(phys_addr);
+       cmd.req.arg[2] = LSD(phys_addr);
+       cmd.req.arg[3] = rq_size;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to create rx ctx in firmware%d\n", err);
+               goto out_free_rsp;
+       }
+
+       prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+                        &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+       for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+               rds_ring = &recv_ctx->rds_rings[i];
+               reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+               rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
+       }
+
+       prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+                       &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+       for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+               sds_ring = &recv_ctx->sds_rings[i];
+               reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+               if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+                       reg2 = ahw->intr_tbl[i].src;
+               else
+                       reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+
+               sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+               sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
+       }
+
+       recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+       recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+       recv_ctx->virt_port = prsp->virt_port;
+
+       netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+                   recv_ctx->context_id, recv_ctx->state);
+       qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+       dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+                         cardrsp_phys_addr);
+out_free_rq:
+       dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
+       return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+       int err;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+       if (err)
+               return;
+
+       cmd.req.arg[1] = recv_ctx->context_id;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to destroy rx ctx in firmware\n");
+
+       recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+       qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+                                    struct qlcnic_host_tx_ring *tx_ring,
+                                    int ring)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_hostrq_tx_ctx     *prq;
+       struct qlcnic_hostrq_cds_ring   *prq_cds;
+       struct qlcnic_cardrsp_tx_ctx    *prsp;
+       struct qlcnic_cmd_args cmd;
+       u32 temp, intr_mask, temp_int_crb_mode;
+       dma_addr_t rq_phys_addr, rsp_phys_addr;
+       int temp_nsds_rings, index, err;
+       void *rq_addr, *rsp_addr;
+       size_t rq_size, rsp_size;
+       u64 phys_addr;
+       u16 msix_id;
+
+       /* reset host resources */
+       tx_ring->producer = 0;
+       tx_ring->sw_consumer = 0;
+       *(tx_ring->hw_consumer) = 0;
+
+       rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+       rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+                                     &rq_phys_addr, GFP_KERNEL);
+       if (!rq_addr)
+               return -ENOMEM;
+
+       rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+       rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+                                      &rsp_phys_addr, GFP_KERNEL);
+       if (!rsp_addr) {
+               err = -ENOMEM;
+               goto out_free_rq;
+       }
+
+       prq = rq_addr;
+       prsp = rsp_addr;
+
+       prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+       temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+               QLCNIC_CAP0_LSO);
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+               temp |= QLCNIC_CAP0_TX_MULTI;
+
+       prq->capabilities[0] = cpu_to_le32(temp);
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test) {
+               temp_nsds_rings = adapter->drv_sds_rings;
+               index = temp_nsds_rings + ring;
+               msix_id = ahw->intr_tbl[index].id;
+               prq->msi_index = cpu_to_le16(msix_id);
+       } else {
+               temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+               prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+               prq->msi_index = 0;
+       }
+
+       prq->interrupt_ctl = 0;
+       prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+       prq_cds = &prq->cds_ring;
+
+       prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+       prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+       phys_addr = rq_phys_addr;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       if (err)
+               goto out_free_rsp;
+
+       cmd.req.arg[1] = MSD(phys_addr);
+       cmd.req.arg[2] = LSD(phys_addr);
+       cmd.req.arg[3] = rq_size;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
+               temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+               tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
+               tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test &&
+                   (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+                       index = adapter->drv_sds_rings + ring;
+                       intr_mask = ahw->intr_tbl[index].src;
+                       tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+               }
+
+               netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+                           tx_ring->ctx_id, tx_ring->state);
+       } else {
+               netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+                          err);
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+       dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+                         rsp_phys_addr);
+out_free_rq:
+       dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+
+       return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_host_tx_ring *tx_ring)
+{
+       struct qlcnic_cmd_args cmd;
+       int ret;
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+       if (ret)
+               return;
+
+       cmd.req.arg[1] = tx_ring->ctx_id;
+       if (qlcnic_issue_cmd(adapter, &cmd))
+               dev_err(&adapter->pdev->dev,
+                       "Failed to destroy tx ctx in firmware\n");
+       qlcnic_free_mbx_args(&cmd);
+}
+
+int
+qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
+{
+       int err;
+       struct qlcnic_cmd_args cmd;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = config;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+       void *addr;
+       int err, ring;
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       __le32 *ptr;
+
+       struct pci_dev *pdev = adapter->pdev;
+
+       recv_ctx = adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+                                                  &tx_ring->hw_cons_phys_addr,
+                                                  GFP_KERNEL);
+               if (ptr == NULL)
+                       return -ENOMEM;
+
+               tx_ring->hw_consumer = ptr;
+               /* cmd desc ring */
+               addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+                                         &tx_ring->phys_addr,
+                                         GFP_KERNEL);
+               if (addr == NULL) {
+                       err = -ENOMEM;
+                       goto err_out_free;
+               }
+
+               tx_ring->desc_head = addr;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               addr = dma_alloc_coherent(&adapter->pdev->dev,
+                                         RCV_DESC_RINGSIZE(rds_ring),
+                                         &rds_ring->phys_addr, GFP_KERNEL);
+               if (addr == NULL) {
+                       err = -ENOMEM;
+                       goto err_out_free;
+               }
+               rds_ring->desc_head = addr;
+
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               addr = dma_alloc_coherent(&adapter->pdev->dev,
+                                         STATUS_DESC_RINGSIZE(sds_ring),
+                                         &sds_ring->phys_addr, GFP_KERNEL);
+               if (addr == NULL) {
+                       err = -ENOMEM;
+                       goto err_out_free;
+               }
+               sds_ring->desc_head = addr;
+       }
+
+       return 0;
+
+err_out_free:
+       qlcnic_free_hw_resources(adapter);
+       return err;
+}
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
+{
+       int i, err, ring;
+
+       if (dev->flags & QLCNIC_NEED_FLR) {
+               pci_reset_function(dev->pdev);
+               dev->flags &= ~QLCNIC_NEED_FLR;
+       }
+
+       if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+               if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+                       err = qlcnic_83xx_config_intrpt(dev, 1);
+                       if (err)
+                               return err;
+               }
+       }
+
+       if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+           qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+               err = qlcnic_82xx_mq_intrpt(dev, 1);
+               if (err)
+                       return err;
+       }
+
+       err = qlcnic_fw_cmd_create_rx_ctx(dev);
+       if (err)
+               goto err_out;
+
+       for (ring = 0; ring < dev->drv_tx_rings; ring++) {
+               err = qlcnic_fw_cmd_create_tx_ctx(dev,
+                                                 &dev->tx_ring[ring],
+                                                 ring);
+               if (err) {
+                       qlcnic_fw_cmd_del_rx_ctx(dev);
+                       if (ring == 0)
+                               goto err_out;
+
+                       for (i = 0; i < ring; i++)
+                               qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
+
+                       goto err_out;
+               }
+       }
+
+       set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
+       return 0;
+
+err_out:
+       if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+           qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+                       qlcnic_82xx_config_intrpt(dev, 0);
+
+       if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+               if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+                       qlcnic_83xx_config_intrpt(dev, 0);
+       }
+
+       return err;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
+       int ring;
+
+       if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+               qlcnic_fw_cmd_del_rx_ctx(adapter);
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+                       qlcnic_fw_cmd_del_tx_ctx(adapter,
+                                                &adapter->tx_ring[ring]);
+
+               if (qlcnic_82xx_check(adapter) &&
+                   (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+                   qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test)
+                               qlcnic_82xx_config_intrpt(adapter, 0);
+
+               if (qlcnic_83xx_check(adapter) &&
+                   (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+                       if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+                               qlcnic_83xx_config_intrpt(adapter, 0);
+               }
+               /* Allow dma queues to drain after context reset */
+               mdelay(20);
+       }
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       int ring;
+
+       recv_ctx = adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               if (tx_ring->hw_consumer != NULL) {
+                       dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+                                         tx_ring->hw_consumer,
+                                         tx_ring->hw_cons_phys_addr);
+
+                       tx_ring->hw_consumer = NULL;
+               }
+
+               if (tx_ring->desc_head != NULL) {
+                       dma_free_coherent(&adapter->pdev->dev,
+                                         TX_DESC_RINGSIZE(tx_ring),
+                                         tx_ring->desc_head,
+                                         tx_ring->phys_addr);
+                       tx_ring->desc_head = NULL;
+               }
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+
+               if (rds_ring->desc_head != NULL) {
+                       dma_free_coherent(&adapter->pdev->dev,
+                                       RCV_DESC_RINGSIZE(rds_ring),
+                                       rds_ring->desc_head,
+                                       rds_ring->phys_addr);
+                       rds_ring->desc_head = NULL;
+               }
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               if (sds_ring->desc_head != NULL) {
+                       dma_free_coherent(&adapter->pdev->dev,
+                               STATUS_DESC_RINGSIZE(sds_ring),
+                               sds_ring->desc_head,
+                               sds_ring->phys_addr);
+                       sds_ring->desc_head = NULL;
+               }
+       }
+}
+
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_cmd_args cmd;
+       u32 type, val;
+       int i, err = 0;
+
+       for (i = 0; i < ahw->num_msix; i++) {
+               err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                           QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+               if (err)
+                       return err;
+               type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+               val = type | (ahw->intr_tbl[i].type << 4);
+               if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+                       val |= (ahw->intr_tbl[i].id << 16);
+               cmd.req.arg[1] = val;
+               err = qlcnic_issue_cmd(adapter, &cmd);
+               if (err) {
+                       netdev_err(netdev, "Failed to %s interrupts %d\n",
+                                  op_type == QLCNIC_INTRPT_ADD ? "Add" :
+                                  "Delete", err);
+                       qlcnic_free_mbx_args(&cmd);
+                       return err;
+               }
+               val = cmd.rsp.arg[1];
+               if (LSB(val)) {
+                       netdev_info(netdev,
+                                   "failed to configure interrupt for %d\n",
+                                   ahw->intr_tbl[i].id);
+                       continue;
+               }
+               if (op_type) {
+                       ahw->intr_tbl[i].id = MSW(val);
+                       ahw->intr_tbl[i].enabled = 1;
+                       ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+               } else {
+                       ahw->intr_tbl[i].id = i;
+                       ahw->intr_tbl[i].enabled = 0;
+                       ahw->intr_tbl[i].src = 0;
+               }
+               qlcnic_free_mbx_args(&cmd);
+       }
+
+       return err;
+}
+
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+                               u8 function)
+{
+       int err, i;
+       struct qlcnic_cmd_args cmd;
+       u32 mac_low, mac_high;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = function | BIT_8;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               mac_low = cmd.rsp.arg[1];
+               mac_high = cmd.rsp.arg[2];
+
+               for (i = 0; i < 2; i++)
+                       mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+               for (i = 2; i < 6; i++)
+                       mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+       } else {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to get mac address%d\n", err);
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_info *npar_info, u8 func_id)
+{
+       int     err;
+       dma_addr_t nic_dma_t;
+       const struct qlcnic_info_le *nic_info;
+       void *nic_info_addr;
+       struct qlcnic_cmd_args cmd;
+       size_t  nic_size = sizeof(struct qlcnic_info_le);
+
+       nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+                                           &nic_dma_t, GFP_KERNEL);
+       if (!nic_info_addr)
+               return -ENOMEM;
+
+       nic_info = nic_info_addr;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       if (err)
+               goto out_free_dma;
+
+       cmd.req.arg[1] = MSD(nic_dma_t);
+       cmd.req.arg[2] = LSD(nic_dma_t);
+       cmd.req.arg[3] = (func_id << 16 | nic_size);
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to get nic info%d\n", err);
+               err = -EIO;
+       } else {
+               npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
+               npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+               npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+               npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+               npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+               npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+               npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+               npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+               npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+               npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+                         nic_dma_t);
+
+       return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_info *nic)
+{
+       int err = -EIO;
+       dma_addr_t nic_dma_t;
+       void *nic_info_addr;
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_info_le *nic_info;
+       size_t nic_size = sizeof(struct qlcnic_info_le);
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return err;
+
+       nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+                                           &nic_dma_t, GFP_KERNEL);
+       if (!nic_info_addr)
+               return -ENOMEM;
+
+       nic_info = nic_info_addr;
+
+       nic_info->pci_func = cpu_to_le16(nic->pci_func);
+       nic_info->op_mode = cpu_to_le16(nic->op_mode);
+       nic_info->phys_port = cpu_to_le16(nic->phys_port);
+       nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+       nic_info->capabilities = cpu_to_le32(nic->capabilities);
+       nic_info->max_mac_filters = nic->max_mac_filters;
+       nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+       nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+       nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+       nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               goto out_free_dma;
+
+       cmd.req.arg[1] = MSD(nic_dma_t);
+       cmd.req.arg[2] = LSD(nic_dma_t);
+       cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to set nic info%d\n", err);
+               err = -EIO;
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+                         nic_dma_t);
+
+       return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+                            struct qlcnic_pci_info *pci_info)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+       size_t pci_size = npar_size * ahw->max_vnic_func;
+       u16 nic = 0, fcoe = 0, iscsi = 0;
+       struct qlcnic_pci_info_le *npar;
+       struct qlcnic_cmd_args cmd;
+       dma_addr_t pci_info_dma_t;
+       void *pci_info_addr;
+       int err = 0, i;
+
+       pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+                                           &pci_info_dma_t, GFP_KERNEL);
+       if (!pci_info_addr)
+               return -ENOMEM;
+
+       npar = pci_info_addr;
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       if (err)
+               goto out_free_dma;
+
+       cmd.req.arg[1] = MSD(pci_info_dma_t);
+       cmd.req.arg[2] = LSD(pci_info_dma_t);
+       cmd.req.arg[3] = pci_size;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       ahw->total_nic_func = 0;
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
+                       pci_info->id = le16_to_cpu(npar->id);
+                       pci_info->active = le16_to_cpu(npar->active);
+                       if (!pci_info->active)
+                               continue;
+                       pci_info->type = le16_to_cpu(npar->type);
+                       err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+                                                      &nic, &fcoe, &iscsi);
+                       pci_info->default_port =
+                               le16_to_cpu(npar->default_port);
+                       pci_info->tx_min_bw =
+                               le16_to_cpu(npar->tx_min_bw);
+                       pci_info->tx_max_bw =
+                               le16_to_cpu(npar->tx_max_bw);
+                       memcpy(pci_info->mac, npar->mac, ETH_ALEN);
+               }
+       } else {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to get PCI Info%d\n", err);
+               err = -EIO;
+       }
+
+       ahw->total_nic_func = nic;
+       ahw->total_pci_func = nic + fcoe + iscsi;
+       if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+                       __func__, ahw->total_nic_func, ahw->total_pci_func);
+               err = -EIO;
+       }
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
+               pci_info_dma_t);
+
+       return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+                                u8 enable_mirroring, u8 pci_func)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
+       int err = -EIO;
+       u32 arg1;
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
+           !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
+               return err;
+       }
+
+       arg1 = id | (enable_mirroring ? BIT_4 : 0);
+       arg1 |= pci_func << 8;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_SET_PORTMIRRORING);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = arg1;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS)
+               dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
+                       pci_func, id);
+       else
+               dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
+                        pci_func, id);
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+               const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+       size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
+       struct qlcnic_esw_stats_le *stats;
+       dma_addr_t stats_dma_t;
+       void *stats_addr;
+       u32 arg1;
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       if (esw_stats == NULL)
+               return -ENOMEM;
+
+       if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
+           (func != adapter->ahw->pci_func)) {
+               dev_err(&adapter->pdev->dev,
+                       "Not privilege to query stats for func=%d", func);
+               return -EIO;
+       }
+
+       stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+                                        &stats_dma_t, GFP_KERNEL);
+       if (!stats_addr)
+               return -ENOMEM;
+
+       arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+       arg1 |= rx_tx << 15 | stats_size << 16;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_STATS);
+       if (err)
+               goto out_free_dma;
+
+       cmd.req.arg[1] = arg1;
+       cmd.req.arg[2] = MSD(stats_dma_t);
+       cmd.req.arg[3] = LSD(stats_dma_t);
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (!err) {
+               stats = stats_addr;
+               esw_stats->context_id = le16_to_cpu(stats->context_id);
+               esw_stats->version = le16_to_cpu(stats->version);
+               esw_stats->size = le16_to_cpu(stats->size);
+               esw_stats->multicast_frames =
+                               le64_to_cpu(stats->multicast_frames);
+               esw_stats->broadcast_frames =
+                               le64_to_cpu(stats->broadcast_frames);
+               esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+               esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+               esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+               esw_stats->errors = le64_to_cpu(stats->errors);
+               esw_stats->numbytes = le64_to_cpu(stats->numbytes);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+                         stats_dma_t);
+
+       return err;
+}
+
+/* This routine will retrieve the MAC statistics from firmware */
+int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
+               struct qlcnic_mac_statistics *mac_stats)
+{
+       struct qlcnic_mac_statistics_le *stats;
+       struct qlcnic_cmd_args cmd;
+       size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
+       dma_addr_t stats_dma_t;
+       void *stats_addr;
+       int err;
+
+       if (mac_stats == NULL)
+               return -ENOMEM;
+
+       stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+                                        &stats_dma_t, GFP_KERNEL);
+       if (!stats_addr)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+       if (err)
+               goto out_free_dma;
+
+       cmd.req.arg[1] = stats_size << 16;
+       cmd.req.arg[2] = MSD(stats_dma_t);
+       cmd.req.arg[3] = LSD(stats_dma_t);
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (!err) {
+               stats = stats_addr;
+               mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
+               mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
+               mac_stats->mac_tx_mcast_pkts =
+                                       le64_to_cpu(stats->mac_tx_mcast_pkts);
+               mac_stats->mac_tx_bcast_pkts =
+                                       le64_to_cpu(stats->mac_tx_bcast_pkts);
+               mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
+               mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
+               mac_stats->mac_rx_mcast_pkts =
+                                       le64_to_cpu(stats->mac_rx_mcast_pkts);
+               mac_stats->mac_rx_length_error =
+                               le64_to_cpu(stats->mac_rx_length_error);
+               mac_stats->mac_rx_length_small =
+                               le64_to_cpu(stats->mac_rx_length_small);
+               mac_stats->mac_rx_length_large =
+                               le64_to_cpu(stats->mac_rx_length_large);
+               mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
+               mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
+               mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+       } else {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Get mac stats failed, err=%d.\n", __func__, err);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+                         stats_dma_t);
+
+       return err;
+}
+
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+               const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+       struct __qlcnic_esw_statistics port_stats;
+       u8 i;
+       int ret = -EIO;
+
+       if (esw_stats == NULL)
+               return -ENOMEM;
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return -EIO;
+       if (adapter->npars == NULL)
+               return -EIO;
+
+       memset(esw_stats, 0, sizeof(u64));
+       esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
+       esw_stats->context_id = eswitch;
+
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               if (adapter->npars[i].phy_port != eswitch)
+                       continue;
+
+               memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+               if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
+                                         rx_tx, &port_stats))
+                       continue;
+
+               esw_stats->size = port_stats.size;
+               esw_stats->version = port_stats.version;
+               QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
+                                               port_stats.unicast_frames);
+               QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
+                                               port_stats.multicast_frames);
+               QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
+                                               port_stats.broadcast_frames);
+               QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
+                                               port_stats.dropped_frames);
+               QLCNIC_ADD_ESW_STATS(esw_stats->errors,
+                                               port_stats.errors);
+               QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
+                                               port_stats.local_frames);
+               QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
+                                               port_stats.numbytes);
+               ret = 0;
+       }
+       return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+               const u8 port, const u8 rx_tx)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
+       int err;
+       u32 arg1;
+
+       if (ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return -EIO;
+
+       if (func_esw == QLCNIC_STATS_PORT) {
+               if (port >= ahw->max_vnic_func)
+                       goto err_ret;
+       } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+               if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+                       goto err_ret;
+       } else {
+               goto err_ret;
+       }
+
+       if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+               goto err_ret;
+
+       arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+       arg1 |= BIT_14 | rx_tx << 15;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_STATS);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = arg1;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+
+err_ret:
+       dev_err(&adapter->pdev->dev,
+               "Invalid args func_esw %d port %d rx_ctx %d\n",
+               func_esw, port, rx_tx);
+       return -EIO;
+}
+
+static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+                                           u32 *arg1, u32 *arg2)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
+       u8 pci_func = *arg1 >> 8;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = *arg1;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       *arg1 = cmd.rsp.arg[1];
+       *arg2 = cmd.rsp.arg[2];
+       qlcnic_free_mbx_args(&cmd);
+
+       if (err == QLCNIC_RCODE_SUCCESS)
+               dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
+                        pci_func);
+       else
+               dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
+                       pci_func);
+       return err;
+}
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting  vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+               struct qlcnic_esw_func_cfg *esw_cfg)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
+       int err = -EIO, index;
+       u32 arg1, arg2 = 0;
+       u8 pci_func;
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
+               return err;
+       }
+
+       pci_func = esw_cfg->pci_func;
+       index = qlcnic_is_valid_nic_func(adapter, pci_func);
+       if (index < 0)
+               return err;
+       arg1 = (adapter->npars[index].phy_port & BIT_0);
+       arg1 |= (pci_func << 8);
+
+       if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+               return err;
+       arg1 &= ~(0x0ff << 8);
+       arg1 |= (pci_func << 8);
+       arg1 &= ~(BIT_2 | BIT_3);
+       switch (esw_cfg->op_mode) {
+       case QLCNIC_PORT_DEFAULTS:
+               arg1 |= (BIT_4 | BIT_6 | BIT_7);
+               arg2 |= (BIT_0 | BIT_1);
+               if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+                       arg2 |= (BIT_2 | BIT_3);
+               if (!(esw_cfg->discard_tagged))
+                       arg1 &= ~BIT_4;
+               if (!(esw_cfg->promisc_mode))
+                       arg1 &= ~BIT_6;
+               if (!(esw_cfg->mac_override))
+                       arg1 &= ~BIT_7;
+               if (!(esw_cfg->mac_anti_spoof))
+                       arg2 &= ~BIT_0;
+               if (!(esw_cfg->offload_flags & BIT_0))
+                       arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+               if (!(esw_cfg->offload_flags & BIT_1))
+                       arg2 &= ~BIT_2;
+               if (!(esw_cfg->offload_flags & BIT_2))
+                       arg2 &= ~BIT_3;
+               break;
+       case QLCNIC_ADD_VLAN:
+                       arg1 &= ~(0x0ffff << 16);
+                       arg1 |= (BIT_2 | BIT_5);
+                       arg1 |= (esw_cfg->vlan_id << 16);
+                       break;
+       case QLCNIC_DEL_VLAN:
+                       arg1 |= (BIT_3 | BIT_5);
+                       arg1 &= ~(0x0ffff << 16);
+                       break;
+       default:
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+                       __func__, esw_cfg->op_mode);
+               return err;
+       }
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_ESWITCH);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = arg1;
+       cmd.req.arg[2] = arg2;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       qlcnic_free_mbx_args(&cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS)
+               dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
+                       pci_func);
+       else
+               dev_info(dev, "Configured eSwitch for vNIC function %d\n",
+                        pci_func);
+
+       return err;
+}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+                       struct qlcnic_esw_func_cfg *esw_cfg)
+{
+       u32 arg1, arg2;
+       int index;
+       u8 phy_port;
+
+       if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+               index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
+               if (index < 0)
+                       return -EIO;
+               phy_port = adapter->npars[index].phy_port;
+       } else {
+               phy_port = adapter->ahw->physical_port;
+       }
+       arg1 = phy_port;
+       arg1 |= (esw_cfg->pci_func << 8);
+       if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+               return -EIO;
+
+       esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+       esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+       esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+       esw_cfg->mac_override = !!(arg1 & BIT_7);
+       esw_cfg->vlan_id = LSW(arg1 >> 16);
+       esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+       esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644 (file)
index 0000000..4b76c69
--- /dev/null
@@ -0,0 +1,1147 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM              3
+#define QLC_DCB_LOCAL_IDX              0
+#define QLC_DCB_OPER_IDX               1
+#define QLC_DCB_PEER_IDX               2
+
+#define QLC_DCB_GET_MAP(V)             (1 << V)
+
+#define QLC_DCB_FW_VER                 0x2
+#define QLC_DCB_MAX_TC                 0x8
+#define QLC_DCB_MAX_APP                        0x8
+#define QLC_DCB_MAX_PRIO               QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG                 QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V)         (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V)         ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V)     ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V)          ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V)      ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V)      ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P)      ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P)    ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P)     ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P)       ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P)     (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X)    ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X)    (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID       0x3
+#define QLC_DCB_OPER_PARAM_FWID                0x1
+#define QLC_DCB_PEER_PARAM_FWID                0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X)     ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X)      (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X)      ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X)        (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X)     ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X)      ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X)      ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X)        ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X)        (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP       (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+struct qlcnic_dcb_capability {
+       bool    tsa_capability;
+       bool    ets_capability;
+       u8      max_num_tc;
+       u8      max_ets_tc;
+       u8      max_pfc_tc;
+       u8      dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+       u32 hdr_prio_pfc_map[2];
+       u32 prio_pg_map[2];
+       u32 pg_bw_map[2];
+       u32 pg_tsa_map[2];
+       u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+       /* 1st local, 2nd operational 3rd remote */
+       struct qlcnic_dcb_param type[3];
+       u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+       __le32 hdr_prio_pfc_map[2];
+       __le32 prio_pg_map[2];
+       __le32 pg_bw_map[2];
+       __le32 pg_tsa_map[2];
+       __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+       QLC_SELECTOR_DEF = 0x0,
+       QLC_SELECTOR_ETHER,
+       QLC_SELECTOR_TCP,
+       QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+       QLC_PRIO_NONE = 0,
+       QLC_PRIO_GROUP,
+       QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+       QLC_PFC_DISABLED = 0,
+       QLC_PFC_FULL,
+       QLC_PFC_TX,
+       QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+       bool valid;
+       enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+       bool valid;
+       u8 total_bw_percent;            /* of Link/ port BW */
+       u8 prio_count;
+       u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+       bool valid;
+       struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+       enum qlcnic_dcb_prio_type prio_type;    /* always prio_link */
+       u8 link_percent;                        /* % of link bandwidth */
+       u8 bwg_percent;                         /* % of BWG's bandwidth */
+       u8 up_tc_map;
+       u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+       bool valid;
+       enum qlcnic_dcb_selector selector;
+       u16 protocol;
+       u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+       struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+       struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+       struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+       bool tc_param_valid;
+       bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+       /* 0 - local, 1 - operational, 2 - remote */
+       struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+       struct qlcnic_dcb_capability capability;
+       u32 version;
+};
+
+static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+       .init_dcbnl_ops         = __qlcnic_init_dcbnl_ops,
+       .free                   = __qlcnic_dcb_free,
+       .attach                 = __qlcnic_dcb_attach,
+       .query_hw_capability    = __qlcnic_dcb_query_hw_capability,
+       .get_info               = __qlcnic_dcb_get_info,
+
+       .get_hw_capability      = qlcnic_83xx_dcb_get_hw_capability,
+       .query_cee_param        = qlcnic_83xx_dcb_query_cee_param,
+       .get_cee_cfg            = qlcnic_83xx_dcb_get_cee_cfg,
+       .aen_handler            = qlcnic_83xx_dcb_aen_handler,
+};
+
+static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+       .init_dcbnl_ops         = __qlcnic_init_dcbnl_ops,
+       .free                   = __qlcnic_dcb_free,
+       .attach                 = __qlcnic_dcb_attach,
+       .query_hw_capability    = __qlcnic_dcb_query_hw_capability,
+       .get_info               = __qlcnic_dcb_get_info,
+
+       .get_hw_capability      = qlcnic_82xx_dcb_get_hw_capability,
+       .query_cee_param        = qlcnic_82xx_dcb_query_cee_param,
+       .get_cee_cfg            = qlcnic_82xx_dcb_get_cee_cfg,
+       .aen_handler            = qlcnic_82xx_dcb_aen_handler,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+       if (qlcnic_82xx_check(adapter))
+               return QLC_82XX_DCB_GET_NUMAPP(val);
+       else
+               return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+                                         u32 val)
+{
+       if (qlcnic_82xx_check(adapter))
+               return QLC_82XX_DCB_PFC_VALID(val);
+       else
+               return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+                                         u32 val)
+{
+       if (qlcnic_82xx_check(adapter))
+               return QLC_82XX_DCB_TSA_VALID(val);
+       else
+               return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+                                            u32 val)
+{
+       if (qlcnic_82xx_check(adapter))
+               return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+       else
+               return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+       int j;
+
+       for (j = 0; j < QLC_DCB_MAX_TC; j++)
+               if (up_tc_map & QLC_DCB_GET_MAP(j))
+                       break;
+
+       return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+       if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+               dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+       if (qlcnic_82xx_check(adapter))
+               adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+       else if (qlcnic_83xx_check(adapter))
+               adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_dcb *dcb;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return 0;
+
+       dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+       if (!dcb)
+               return -ENOMEM;
+
+       adapter->dcb = dcb;
+       dcb->adapter = adapter;
+       qlcnic_set_dcb_ops(adapter);
+       dcb->state = 0;
+
+       return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+       struct qlcnic_adapter *adapter;
+
+       if (!dcb)
+               return;
+
+       adapter = dcb->adapter;
+
+       while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+               usleep_range(10000, 11000);
+
+       cancel_delayed_work_sync(&dcb->aen_work);
+
+       if (dcb->wq) {
+               destroy_workqueue(dcb->wq);
+               dcb->wq = NULL;
+       }
+
+       kfree(dcb->cfg);
+       dcb->cfg = NULL;
+       kfree(dcb->param);
+       dcb->param = NULL;
+       kfree(dcb);
+       adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+       qlcnic_dcb_get_hw_capability(dcb);
+       qlcnic_dcb_get_cee_cfg(dcb);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+       int err = 0;
+
+       INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+       dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+       if (!dcb->wq) {
+               dev_err(&dcb->adapter->pdev->dev,
+                       "DCB workqueue allocation failed. DCB will be disabled\n");
+               return -1;
+       }
+
+       dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+       if (!dcb->cfg) {
+               err = -ENOMEM;
+               goto out_free_wq;
+       }
+
+       dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+       if (!dcb->param) {
+               err = -ENOMEM;
+               goto out_free_cfg;
+       }
+
+       return 0;
+out_free_cfg:
+       kfree(dcb->cfg);
+       dcb->cfg = NULL;
+
+out_free_wq:
+       destroy_workqueue(dcb->wq);
+       dcb->wq = NULL;
+
+       return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+       struct qlcnic_adapter *adapter = dcb->adapter;
+       struct qlcnic_cmd_args cmd;
+       u32 mbx_out;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+       if (err)
+               return err;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to query DCBX capability, err %d\n", err);
+       } else {
+               mbx_out = cmd.rsp.arg[1];
+               if (buf)
+                       memcpy(buf, &mbx_out, sizeof(u32));
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
+{
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+       u32 mbx_out;
+       int err;
+
+       memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+       err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
+       if (err)
+               return err;
+
+       mbx_out = *val;
+       if (QLC_DCB_TSA_SUPPORT(mbx_out))
+               cap->tsa_capability = true;
+
+       if (QLC_DCB_ETS_SUPPORT(mbx_out))
+               cap->ets_capability = true;
+
+       cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+       cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+       cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+       if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+           cap->max_ets_tc > cap->max_num_tc ||
+           cap->max_pfc_tc > cap->max_num_tc) {
+               dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
+               return -EINVAL;
+       }
+
+       return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+       struct qlcnic_dcb_cfg *cfg = dcb->cfg;
+       struct qlcnic_dcb_capability *cap;
+       u32 mbx_out;
+       int err;
+
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+       if (err)
+               return err;
+
+       cap = &cfg->capability;
+       cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+       if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+       return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+                                          char *buf, u8 type)
+{
+       u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+       struct qlcnic_adapter *adapter = dcb->adapter;
+       struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+       struct device *dev = &adapter->pdev->dev;
+       dma_addr_t cardrsp_phys_addr;
+       struct qlcnic_dcb_param rsp;
+       struct qlcnic_cmd_args cmd;
+       u64 phys_addr;
+       void *addr;
+       int err, i;
+
+       switch (type) {
+       case QLC_DCB_LOCAL_PARAM_FWID:
+       case QLC_DCB_OPER_PARAM_FWID:
+       case QLC_DCB_PEER_PARAM_FWID:
+               break;
+       default:
+               dev_err(dev, "Invalid parameter type %d\n", type);
+               return -EINVAL;
+       }
+
+       addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
+       if (addr == NULL)
+               return -ENOMEM;
+
+       prsp_le = addr;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+       if (err)
+               goto out_free_rsp;
+
+       phys_addr = cardrsp_phys_addr;
+       cmd.req.arg[1] = size | (type << 16);
+       cmd.req.arg[2] = MSD(phys_addr);
+       cmd.req.arg[3] = LSD(phys_addr);
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+               goto out;
+       }
+
+       memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+       rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+       rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+       rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+       rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+       rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+       rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+       rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+       rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+       for (i = 0; i < QLC_DCB_MAX_APP; i++)
+               rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+       if (buf)
+               memcpy(buf, &rsp, size);
+out:
+       qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+       dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
+
+       return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+       struct qlcnic_dcb_mbx_params *mbx;
+       int err;
+
+       mbx = dcb->param;
+       if (!mbx)
+               return 0;
+
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
+                                        QLC_DCB_LOCAL_PARAM_FWID);
+       if (err)
+               return err;
+
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
+                                        QLC_DCB_OPER_PARAM_FWID);
+       if (err)
+               return err;
+
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
+                                        QLC_DCB_PEER_PARAM_FWID);
+       if (err)
+               return err;
+
+       mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+       return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+       struct qlcnic_dcb *dcb;
+
+       dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+
+       qlcnic_dcb_get_cee_cfg(dcb);
+       clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
+}
+
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+               return;
+
+       queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+       u32 mbx_out;
+       int err;
+
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+       if (err)
+               return err;
+
+       if (mbx_out & BIT_2)
+               cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+       if (mbx_out & BIT_3)
+               cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+       if (cap->dcb_capability)
+               cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+       if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+       return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+                                          char *buf, u8 idx)
+{
+       struct qlcnic_adapter *adapter = dcb->adapter;
+       struct qlcnic_dcb_mbx_params mbx_out;
+       int err, i, j, k, max_app, size;
+       struct qlcnic_dcb_param *each;
+       struct qlcnic_cmd_args cmd;
+       u32 val;
+       char *p;
+
+       size = 0;
+       memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+       memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+       if (err)
+               return err;
+
+       cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to query DCBX param, err %d\n", err);
+               goto out;
+       }
+
+       mbx_out.prio_tc_map = cmd.rsp.arg[1];
+       p = memcpy(buf, &mbx_out, sizeof(u32));
+       k = 2;
+       p += sizeof(u32);
+
+       for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+               each = &mbx_out.type[j];
+
+               each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+               each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+               each->prio_pg_map[0] = cmd.rsp.arg[k++];
+               each->prio_pg_map[1] = cmd.rsp.arg[k++];
+               each->pg_bw_map[0] = cmd.rsp.arg[k++];
+               each->pg_bw_map[1] = cmd.rsp.arg[k++];
+               each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+               each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+               val = each->hdr_prio_pfc_map[0];
+
+               max_app = qlcnic_dcb_get_num_app(adapter, val);
+               for (i = 0; i < max_app; i++)
+                       each->app[i] = cmd.rsp.arg[i + k];
+
+               size = 16 * sizeof(u32);
+               memcpy(p, &each->hdr_prio_pfc_map[0], size);
+               p += size;
+               if (j == 0)
+                       k = 18;
+               else
+                       k = 34;
+       }
+out:
+       qlcnic_free_mbx_args(&cmd);
+
+       return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+       int err;
+
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
+       if (err)
+               return err;
+
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+       return err;
+}
+
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+       u32 *val = data;
+
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+               return;
+
+       if (*val & BIT_8)
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
+       else
+               clear_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+       queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+                                         struct qlcnic_dcb_param *each,
+                                         struct qlcnic_dcb_cee *type)
+{
+       struct qlcnic_dcb_tc_cfg *tc_cfg;
+       u8 i, tc, pgid;
+
+       for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+               tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+               tc_cfg = &type->tc_cfg[tc];
+               tc_cfg->valid = true;
+               tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+               if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+                   type->pfc_mode_enable) {
+                       tc_cfg->prio_cfg[i].valid = true;
+                       tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+               }
+
+               if (i < 4)
+                       pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+               else
+                       pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+               tc_cfg->pgid = pgid;
+
+               tc_cfg->prio_type = QLC_PRIO_LINK;
+               type->pg_cfg[tc_cfg->pgid].prio_count++;
+       }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+                                         struct qlcnic_dcb_cee *type)
+{
+       struct qlcnic_dcb_pg_cfg *pg_cfg;
+       u8 i, tsa, bw_per;
+
+       for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+               pg_cfg = &type->pg_cfg[i];
+               pg_cfg->valid = true;
+
+               if (i < 4) {
+                       bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+                       tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+               } else {
+                       bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+                       tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+               }
+
+               pg_cfg->total_bw_percent = bw_per;
+               pg_cfg->tsa_type = tsa;
+       }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+                              struct qlcnic_dcb_param *each,
+                              struct qlcnic_dcb_cee *type)
+{
+       struct qlcnic_dcb_app *app;
+       u8 i, num_app, map, cnt;
+       struct dcb_app new_app;
+
+       num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+       for (i = 0; i < num_app; i++) {
+               app = &type->app[i];
+               app->valid = true;
+
+               /* Only for CEE (-1) */
+               app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+               new_app.selector = app->selector;
+               app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+               new_app.protocol = app->protocol;
+               map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+               cnt = qlcnic_dcb_prio_count(map);
+
+               if (cnt >= QLC_DCB_MAX_TC)
+                       cnt = 0;
+
+               app->priority = cnt;
+               new_app.priority = cnt;
+
+               if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+                       dcb_setapp(adapter->netdev, &new_app);
+       }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+       struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+       struct qlcnic_dcb_param *each = &mbx->type[idx];
+       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+       struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+       type->tc_param_valid = false;
+       type->pfc_mode_enable = false;
+       memset(type->tc_cfg, 0,
+              sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+       memset(type->pg_cfg, 0,
+              sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+       if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+           cfg->capability.max_pfc_tc)
+               type->pfc_mode_enable = true;
+
+       if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+           cfg->capability.max_ets_tc)
+               type->tc_param_valid = true;
+
+       qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+       qlcnic_dcb_fill_cee_pg_params(each, type);
+       qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+               qlcnic_dcb_map_cee_params(adapter, i);
+
+       dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+       memcpy(addr, netdev->perm_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+                           u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+       struct qlcnic_dcb_cee *type;
+       u8 i, cnt, pg;
+
+       type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+       *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+           !type->tc_param_valid)
+               return;
+
+       if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
+               return;
+
+       tc_cfg = &type->tc_cfg[tc];
+       if (!tc_cfg->valid)
+               return;
+
+       *pgid = tc_cfg->pgid;
+       *prio = tc_cfg->prio_type;
+       *up_tc_map = tc_cfg->up_tc_map;
+       pg = *pgid;
+
+       for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+               temp = &type->tc_cfg[i];
+               if (temp->valid && (pg == temp->pgid))
+                       cnt++;
+       }
+
+       tc_cfg->bwg_percent = (100 / cnt);
+       *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+                                        u8 *bw_pct)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_pg_cfg *pgcfg;
+       struct qlcnic_dcb_cee *type;
+
+       *bw_pct = 0;
+       type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+           !type->tc_param_valid)
+               return;
+
+       if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
+               return;
+
+       pgcfg = &type->pg_cfg[pgid];
+       if (!pgcfg->valid)
+               return;
+
+       *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+                                  u8 *setting)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_tc_cfg *tc_cfg;
+       u8 val = QLC_DCB_GET_MAP(prio);
+       struct qlcnic_dcb_cee *type;
+       u8 i;
+
+       *setting = 0;
+       type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+           !type->pfc_mode_enable)
+               return;
+
+       for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+               tc_cfg = &type->tc_cfg[i];
+               if (!tc_cfg->valid)
+                       continue;
+
+               if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+                       *setting = tc_cfg->prio_cfg[prio].pfc_type;
+       }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+                                   u8 *cap)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       switch (capid) {
+       case DCB_CAP_ATTR_PG:
+       case DCB_CAP_ATTR_UP2TC:
+       case DCB_CAP_ATTR_PFC:
+       case DCB_CAP_ATTR_GSP:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_PG_TCS:
+       case DCB_CAP_ATTR_PFC_TCS:
+               *cap = 0x80;    /* 8 priorities for PGs */
+               break;
+       case DCB_CAP_ATTR_DCBX:
+               *cap = adapter->dcb->cfg->capability.dcb_capability;
+               break;
+       default:
+               *cap = false;
+       }
+
+       return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return -EINVAL;
+
+       switch (attr) {
+       case DCB_NUMTCS_ATTR_PG:
+               *num = cfg->capability.max_ets_tc;
+               return 0;
+       case DCB_NUMTCS_ATTR_PFC:
+               *num = cfg->capability.max_pfc_tc;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct dcb_app app = {
+                               .selector = idtype,
+                               .protocol = id,
+                            };
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return -EINVAL;
+
+       return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb *dcb = adapter->dcb;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
+               return 0;
+
+       return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cee *type;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 1;
+
+       type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+       *flag = 0;
+
+       switch (fid) {
+       case DCB_FEATCFG_ATTR_PG:
+               if (type->tc_param_valid)
+                       *flag |= DCB_FEATCFG_ENABLE;
+               else
+                       *flag |= DCB_FEATCFG_ERROR;
+               break;
+       case DCB_FEATCFG_ATTR_PFC:
+               if (type->pfc_mode_enable) {
+                       if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+                               *flag |= DCB_FEATCFG_ENABLE;
+               } else {
+                       *flag |= DCB_FEATCFG_ERROR;
+               }
+               break;
+       case DCB_FEATCFG_ATTR_APP:
+               *flag |= DCB_FEATCFG_ENABLE;
+               break;
+       default:
+               netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+               return 1;
+       }
+
+       return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+                           u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+       *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+       *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+                                   struct dcb_peer_app_info *info,
+                                   u16 *app_count)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cee *peer;
+       int i;
+
+       memset(info, 0, sizeof(*info));
+       *app_count = 0;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+       for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+               if (peer->app[i].valid)
+                       (*app_count)++;
+       }
+
+       return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+                                    struct dcb_app *table)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cee *peer;
+       struct qlcnic_dcb_app *app;
+       int i, j;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+       for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+               app = &peer->app[i];
+               if (!app->valid)
+                       continue;
+
+               table[j].selector = app->selector;
+               table[j].priority = app->priority;
+               table[j++].protocol = app->protocol;
+       }
+
+       return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+                                     struct cee_pg *pg)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cee *peer;
+       u8 i, j, k, map;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+       for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+               if (!peer->pg_cfg[i].valid)
+                       continue;
+
+               pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+               for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+                       if (peer->tc_cfg[i].valid &&
+                           (peer->tc_cfg[i].pgid == i)) {
+                               map = peer->tc_cfg[i].up_tc_map;
+                               pg->prio_pg[j++] = map;
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+                                      struct cee_pfc *pfc)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+       struct qlcnic_dcb_tc_cfg *tc;
+       struct qlcnic_dcb_cee *peer;
+       u8 i, setting, prio;
+
+       pfc->pfc_en = 0;
+
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+               return 0;
+
+       peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+       for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+               tc = &peer->tc_cfg[i];
+               prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+               setting = 0;
+               qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+               if (setting)
+                       pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+       }
+
+       pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+       return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+       .getstate               = qlcnic_dcb_get_state,
+       .getpermhwaddr          = qlcnic_dcb_get_perm_hw_addr,
+       .getpgtccfgtx           = qlcnic_dcb_get_pg_tc_cfg_tx,
+       .getpgbwgcfgtx          = qlcnic_dcb_get_pg_bwg_cfg_tx,
+       .getpfccfg              = qlcnic_dcb_get_pfc_cfg,
+       .getcap                 = qlcnic_dcb_get_capability,
+       .getnumtcs              = qlcnic_dcb_get_num_tcs,
+       .getapp                 = qlcnic_dcb_get_app,
+       .getpfcstate            = qlcnic_dcb_get_pfc_state,
+       .getdcbx                = qlcnic_dcb_get_dcbx,
+       .getfeatcfg             = qlcnic_dcb_get_feat_cfg,
+
+       .getpgtccfgrx           = qlcnic_dcb_get_pg_tc_cfg_rx,
+       .getpgbwgcfgrx          = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+       .peer_getappinfo        = qlcnic_dcb_peer_app_info,
+       .peer_getapptable       = qlcnic_dcb_peer_app_table,
+       .cee_peer_getpg         = qlcnic_dcb_cee_peer_get_pg,
+       .cee_peer_getpfc        = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644 (file)
index 0000000..f4aa633
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+#define QLCNIC_DCB_STATE       0
+#define QLCNIC_DCB_AEN_MODE    1
+
+#ifdef CONFIG_QLCNIC_DCB
+int qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb;
+
+struct qlcnic_dcb_ops {
+       int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+       int (*get_hw_capability) (struct qlcnic_dcb *);
+       int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+       void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+       void (*aen_handler) (struct qlcnic_dcb *, void *);
+       int (*get_cee_cfg) (struct qlcnic_dcb *);
+       void (*get_info) (struct qlcnic_dcb *);
+       int (*attach) (struct qlcnic_dcb *);
+       void (*free) (struct qlcnic_dcb *);
+};
+
+struct qlcnic_dcb {
+       struct qlcnic_dcb_mbx_params    *param;
+       struct qlcnic_adapter           *adapter;
+       struct delayed_work             aen_work;
+       struct workqueue_struct         *wq;
+       const struct qlcnic_dcb_ops     *ops;
+       struct qlcnic_dcb_cfg           *cfg;
+       unsigned long                   state;
+};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+       kfree(dcb);
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_hw_capability)
+               return dcb->ops->get_hw_capability(dcb);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->free)
+               dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->attach)
+               return dcb->ops->attach(dcb);
+
+       return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+       if (dcb && dcb->ops->query_hw_capability)
+               return dcb->ops->query_hw_capability(dcb, buf);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_info)
+               dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+       if (dcb && dcb->ops->query_cee_param)
+               return dcb->ops->query_cee_param(dcb, buf, type);
+
+       return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_cee_cfg)
+               return dcb->ops->get_cee_cfg(dcb);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+       if (dcb && dcb->ops->aen_handler)
+               dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->init_dcbnl_ops)
+               dcb->ops->init_dcbnl_ops(dcb);
+}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+       if (dcb && qlcnic_dcb_attach(dcb))
+               qlcnic_clear_dcb_ops(dcb);
+}
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
new file mode 100644 (file)
index 0000000..0a2318c
--- /dev/null
@@ -0,0 +1,1883 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+       0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+       {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+       {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+       {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+        QLC_OFF(stats.xmitcalled)},
+       {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+        QLC_OFF(stats.xmitfinished)},
+       {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+        QLC_OFF(stats.tx_dma_map_error)},
+       {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+       {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+       {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+        QLC_OFF(stats.rx_dma_map_error)},
+       {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+       {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+       {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+       {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+       {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+       {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+       {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+       {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+       {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+        QLC_OFF(stats.encap_lso_frames)},
+       {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+        QLC_OFF(stats.encap_tx_csummed)},
+       {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+        QLC_OFF(stats.encap_rx_csummed)},
+       {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+        QLC_OFF(stats.skb_alloc_failure)},
+       {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+        QLC_OFF(stats.mac_filter_limit_overrun)},
+       {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+        QLC_OFF(stats.spurious_intr)},
+       {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+        QLC_OFF(stats.mbx_spurious_intr)},
+};
+
+static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+       "tx unicast frames",
+       "tx multicast frames",
+       "tx broadcast frames",
+       "tx dropped frames",
+       "tx errors",
+       "tx local frames",
+       "tx numbytes",
+       "rx unicast frames",
+       "rx multicast frames",
+       "rx broadcast frames",
+       "rx dropped frames",
+       "rx errors",
+       "rx local frames",
+       "rx numbytes",
+};
+
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+       "ctx_tx_bytes",
+       "ctx_tx_pkts",
+       "ctx_tx_errors",
+       "ctx_tx_dropped_pkts",
+       "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
+       "mac_tx_frames",
+       "mac_tx_bytes",
+       "mac_tx_mcast_pkts",
+       "mac_tx_bcast_pkts",
+       "mac_tx_pause_cnt",
+       "mac_tx_ctrl_pkt",
+       "mac_tx_lt_64b_pkts",
+       "mac_tx_lt_127b_pkts",
+       "mac_tx_lt_255b_pkts",
+       "mac_tx_lt_511b_pkts",
+       "mac_tx_lt_1023b_pkts",
+       "mac_tx_lt_1518b_pkts",
+       "mac_tx_gt_1518b_pkts",
+       "mac_rx_frames",
+       "mac_rx_bytes",
+       "mac_rx_mcast_pkts",
+       "mac_rx_bcast_pkts",
+       "mac_rx_pause_cnt",
+       "mac_rx_ctrl_pkt",
+       "mac_rx_lt_64b_pkts",
+       "mac_rx_lt_127b_pkts",
+       "mac_rx_lt_255b_pkts",
+       "mac_rx_lt_511b_pkts",
+       "mac_rx_lt_1023b_pkts",
+       "mac_rx_lt_1518b_pkts",
+       "mac_rx_gt_1518b_pkts",
+       "mac_rx_length_error",
+       "mac_rx_length_small",
+       "mac_rx_length_large",
+       "mac_rx_jabber",
+       "mac_rx_dropped",
+       "mac_crc_error",
+       "mac_align_error",
+       "eswitch_frames",
+       "eswitch_bytes",
+       "eswitch_multicast_frames",
+       "eswitch_broadcast_frames",
+       "eswitch_unicast_frames",
+       "eswitch_error_free_frames",
+       "eswitch_error_free_bytes",
+};
+
+#define QLCNIC_STATS_LEN       ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+       "xmit_on",
+       "xmit_off",
+       "xmit_called",
+       "xmit_finished",
+       "tx_bytes",
+};
+
+#define QLCNIC_TX_STATS_LEN    ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+       "ctx_rx_bytes",
+       "ctx_rx_pkts",
+       "ctx_lro_pkt_cnt",
+       "ctx_ip_csum_error",
+       "ctx_rx_pkts_wo_ctx",
+       "ctx_rx_pkts_drop_wo_sds_on_card",
+       "ctx_rx_pkts_drop_wo_sds_on_host",
+       "ctx_rx_osized_pkts",
+       "ctx_rx_pkts_dropped_wo_rds",
+       "ctx_rx_unexpected_mcast_pkts",
+       "ctx_invalid_mac_address",
+       "ctx_rx_rds_ring_prim_attempted",
+       "ctx_rx_rds_ring_prim_success",
+       "ctx_num_lro_flows_added",
+       "ctx_num_lro_flows_removed",
+       "ctx_num_lro_flows_active",
+       "ctx_pkts_dropped_unknown",
+};
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+       "Register_Test_on_offline",
+       "Link_Test_on_offline",
+       "Interrupt_Test_offline",
+       "Internal_Loopback_offline",
+       "External_Loopback_offline",
+       "EEPROM_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN        ARRAY_SIZE(qlcnic_gstrings_test)
+
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
+{
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
+{
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+              ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+       int len = -1;
+
+       if (qlcnic_82xx_check(adapter)) {
+               len = qlcnic_82xx_statistics(adapter);
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+                       len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+       } else if (qlcnic_83xx_check(adapter)) {
+               len = qlcnic_83xx_statistics(adapter);
+       }
+
+       return len;
+}
+
+#define        QLCNIC_TX_INTR_NOT_CONFIGURED   0X78563412
+
+#define QLCNIC_MAX_EEPROM_LEN   1024
+
+static const u32 diag_registers[] = {
+       QLCNIC_CMDPEG_STATE,
+       QLCNIC_RCVPEG_STATE,
+       QLCNIC_FW_CAPABILITIES,
+       QLCNIC_CRB_DRV_ACTIVE,
+       QLCNIC_CRB_DEV_STATE,
+       QLCNIC_CRB_DRV_STATE,
+       QLCNIC_CRB_DRV_SCRATCH,
+       QLCNIC_CRB_DEV_PARTITION_INFO,
+       QLCNIC_CRB_DRV_IDC_VER,
+       QLCNIC_PEG_ALIVE_COUNTER,
+       QLCNIC_PEG_HALT_STATUS1,
+       QLCNIC_PEG_HALT_STATUS2,
+       -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+       CRB_XG_STATE_P3P,
+       ISR_INT_STATE_REG,
+       QLCNIC_CRB_PEG_NET_0+0x3c,
+       QLCNIC_CRB_PEG_NET_1+0x3c,
+       QLCNIC_CRB_PEG_NET_2+0x3c,
+       QLCNIC_CRB_PEG_NET_4+0x3c,
+       -1
+};
+
+#define QLCNIC_MGMT_API_VERSION        3
+#define QLCNIC_ETHTOOL_REGS_VER        4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+       int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+                           (adapter->max_rds_rings * 2) +
+                           (adapter->drv_sds_rings * 3) + 5;
+       return ring_regs_cnt * sizeof(u32);
+}
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u32 len;
+
+       if (qlcnic_83xx_check(adapter))
+               len = qlcnic_83xx_get_regs_len(adapter);
+       else
+               len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+       len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+       len += qlcnic_get_ring_regs_len(adapter);
+       return len;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+       return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u32 fw_major, fw_minor, fw_build;
+       fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+       fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+       fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
+       strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
+}
+
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+                                   struct ethtool_cmd *ecmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 speed, reg;
+       int check_sfp_module = 0, err = 0;
+       u16 pcifn = ahw->pci_func;
+
+       /* read which mode */
+       if (adapter->ahw->port_type == QLCNIC_GBE) {
+               ecmd->supported = (SUPPORTED_10baseT_Half |
+                                  SUPPORTED_10baseT_Full |
+                                  SUPPORTED_100baseT_Half |
+                                  SUPPORTED_100baseT_Full |
+                                  SUPPORTED_1000baseT_Half |
+                                  SUPPORTED_1000baseT_Full);
+
+               ecmd->advertising = (ADVERTISED_100baseT_Half |
+                                    ADVERTISED_100baseT_Full |
+                                    ADVERTISED_1000baseT_Half |
+                                    ADVERTISED_1000baseT_Full);
+
+               ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
+               ecmd->duplex = adapter->ahw->link_duplex;
+               ecmd->autoneg = adapter->ahw->link_autoneg;
+
+       } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+               u32 val = 0;
+               val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
+
+               if (val == QLCNIC_PORT_MODE_802_3_AP) {
+                       ecmd->supported = SUPPORTED_1000baseT_Full;
+                       ecmd->advertising = ADVERTISED_1000baseT_Full;
+               } else {
+                       ecmd->supported = SUPPORTED_10000baseT_Full;
+                       ecmd->advertising = ADVERTISED_10000baseT_Full;
+               }
+
+               if (netif_running(adapter->netdev) && ahw->has_link_events) {
+                       if (ahw->linkup) {
+                               reg = QLCRD32(adapter,
+                                             P3P_LINK_SPEED_REG(pcifn), &err);
+                               speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+                               ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+                       }
+
+                       ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+                       ecmd->autoneg = ahw->link_autoneg;
+                       ecmd->duplex = ahw->link_duplex;
+                       goto skip;
+               }
+
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+               ecmd->autoneg = AUTONEG_DISABLE;
+       } else
+               return -EIO;
+
+skip:
+       ecmd->phy_address = adapter->ahw->physical_port;
+       ecmd->transceiver = XCVR_EXTERNAL;
+
+       switch (adapter->ahw->board_type) {
+       case QLCNIC_BRDTYPE_P3P_REF_QG:
+       case QLCNIC_BRDTYPE_P3P_4_GB:
+       case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+
+               ecmd->supported |= SUPPORTED_Autoneg;
+               ecmd->advertising |= ADVERTISED_Autoneg;
+       case QLCNIC_BRDTYPE_P3P_10G_CX4:
+       case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+       case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               ecmd->autoneg =  adapter->ahw->link_autoneg;
+               break;
+       case QLCNIC_BRDTYPE_P3P_IMEZ:
+       case QLCNIC_BRDTYPE_P3P_XG_LOM:
+       case QLCNIC_BRDTYPE_P3P_HMEZ:
+               ecmd->supported |= SUPPORTED_MII;
+               ecmd->advertising |= ADVERTISED_MII;
+               ecmd->port = PORT_MII;
+               ecmd->autoneg = AUTONEG_DISABLE;
+               break;
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->supported |= SUPPORTED_TP;
+               check_sfp_module = netif_running(adapter->netdev) &&
+                                  ahw->has_link_events;
+       case QLCNIC_BRDTYPE_P3P_10G_XFP:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               ecmd->autoneg = AUTONEG_DISABLE;
+               break;
+       case QLCNIC_BRDTYPE_P3P_10G_TP:
+               if (adapter->ahw->port_type == QLCNIC_XGBE) {
+                       ecmd->autoneg = AUTONEG_DISABLE;
+                       ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+                       ecmd->advertising |=
+                               (ADVERTISED_FIBRE | ADVERTISED_TP);
+                       ecmd->port = PORT_FIBRE;
+                       check_sfp_module = netif_running(adapter->netdev) &&
+                                          ahw->has_link_events;
+               } else {
+                       ecmd->autoneg = AUTONEG_ENABLE;
+                       ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+                       ecmd->advertising |=
+                               (ADVERTISED_TP | ADVERTISED_Autoneg);
+                       ecmd->port = PORT_TP;
+               }
+               break;
+       default:
+               dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+                       adapter->ahw->board_type);
+               return -EIO;
+       }
+
+       if (check_sfp_module) {
+               switch (adapter->ahw->module_type) {
+               case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+               case LINKEVENT_MODULE_OPTICAL_SRLR:
+               case LINKEVENT_MODULE_OPTICAL_LRM:
+               case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+                       ecmd->port = PORT_FIBRE;
+                       break;
+               case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+               case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+               case LINKEVENT_MODULE_TWINAX:
+                       ecmd->port = PORT_TP;
+                       break;
+               default:
+                       ecmd->port = PORT_OTHER;
+               }
+       }
+
+       return 0;
+}
+
+static int qlcnic_get_settings(struct net_device *dev,
+                              struct ethtool_cmd *ecmd)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       if (qlcnic_82xx_check(adapter))
+               return qlcnic_82xx_get_settings(adapter, ecmd);
+       else if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_get_settings(adapter, ecmd);
+
+       return -EIO;
+}
+
+
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+                                 struct ethtool_cmd *ecmd)
+{
+       u32 ret = 0, config = 0;
+       /* read which mode */
+       if (ecmd->duplex)
+               config |= 0x1;
+
+       if (ecmd->autoneg)
+               config |= 0x2;
+
+       switch (ethtool_cmd_speed(ecmd)) {
+       case SPEED_10:
+               config |= (0 << 8);
+               break;
+       case SPEED_100:
+               config |= (1 << 8);
+               break;
+       case SPEED_1000:
+               config |= (10 << 8);
+               break;
+       default:
+               return -EIO;
+       }
+
+       ret = qlcnic_fw_cmd_set_port(adapter, config);
+
+       if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
+               return -EOPNOTSUPP;
+       else if (ret)
+               return -EIO;
+       return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+       u32 ret = 0;
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       if (adapter->ahw->port_type != QLCNIC_GBE)
+               return -EOPNOTSUPP;
+
+       if (qlcnic_83xx_check(adapter))
+               ret = qlcnic_83xx_set_settings(adapter, ecmd);
+       else
+               ret = qlcnic_set_port_config(adapter, ecmd);
+
+       if (!ret)
+               return ret;
+
+       adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
+       adapter->ahw->link_duplex = ecmd->duplex;
+       adapter->ahw->link_autoneg = ecmd->autoneg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       dev->netdev_ops->ndo_stop(dev);
+       return dev->netdev_ops->ndo_open(dev);
+}
+
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+                                    u32 *regs_buff)
+{
+       int i, j = 0, err = 0;
+
+       for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+               regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+       j = 0;
+       while (ext_diag_registers[j] != -1)
+               regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+                                        &err);
+       return i;
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_rings;
+       struct qlcnic_host_tx_ring *tx_ring;
+       u32 *regs_buff = p;
+       int ring, i = 0;
+
+       memset(p, 0, qlcnic_get_regs_len(dev));
+
+       regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+               (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
+
+       regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+       regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+       if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+               regs_buff[2] = adapter->ahw->max_vnic_func;
+
+       if (qlcnic_82xx_check(adapter))
+               i = qlcnic_82xx_get_registers(adapter, regs_buff);
+       else
+               i = qlcnic_83xx_get_registers(adapter, regs_buff);
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               return;
+
+       /* Marker btw regs and TX ring count */
+       regs_buff[i++] = 0xFFEFCDAB;
+
+       regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+               regs_buff[i++] = tx_ring->sw_consumer;
+               regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+               regs_buff[i++] = tx_ring->producer;
+               if (tx_ring->crb_intr_mask)
+                       regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+               else
+                       regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+       }
+
+       regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_rings = &recv_ctx->rds_rings[ring];
+               regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+               regs_buff[i++] = rds_rings->producer;
+       }
+
+       regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &(recv_ctx->sds_rings[ring]);
+               regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+               regs_buff[i++] = sds_ring->consumer;
+               regs_buff[i++] = readl(sds_ring->crb_intr_mask);
+       }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int err = 0;
+       u32 val;
+
+       if (qlcnic_83xx_check(adapter)) {
+               val = qlcnic_83xx_test_link(adapter);
+               return (val & 1) ? 0 : 1;
+       }
+       val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+       if (err == -EIO)
+               return err;
+       val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
+       return (val == XG_LINK_UP_P3P) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+                     u8 *bytes)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int offset;
+       int ret = -1;
+
+       if (qlcnic_83xx_check(adapter))
+               return 0;
+       if (eeprom->len == 0)
+               return -EINVAL;
+
+       eeprom->magic = (adapter->pdev)->vendor |
+                       ((adapter->pdev)->device << 16);
+       offset = eeprom->offset;
+
+       if (qlcnic_82xx_check(adapter))
+               ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+                                                eeprom->len);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+               struct ethtool_ringparam *ring)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       ring->rx_pending = adapter->num_rxd;
+       ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+       ring->tx_pending = adapter->num_txd;
+
+       ring->rx_max_pending = adapter->max_rxd;
+       ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
+       ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+       u32 num_desc;
+       num_desc = max(val, min);
+       num_desc = min(num_desc, max);
+       num_desc = roundup_pow_of_two(num_desc);
+
+       if (val != num_desc) {
+               printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+                      qlcnic_driver_name, r_name, num_desc, val);
+       }
+
+       return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+               struct ethtool_ringparam *ring)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u16 num_rxd, num_jumbo_rxd, num_txd;
+
+       if (ring->rx_mini_pending)
+               return -EOPNOTSUPP;
+
+       num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+                       MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
+
+       num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+                       MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+                                               "rx jumbo");
+
+       num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+                       MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+       if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+                       num_jumbo_rxd == adapter->num_jumbo_rxd)
+               return 0;
+
+       adapter->num_rxd = num_rxd;
+       adapter->num_jumbo_rxd = num_jumbo_rxd;
+       adapter->num_txd = num_txd;
+
+       return qlcnic_reset_context(adapter);
+}
+
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+                                     u8 rx_ring, u8 tx_ring)
+{
+       if (rx_ring == 0 || tx_ring == 0)
+               return -EINVAL;
+
+       if (rx_ring != 0) {
+               if (rx_ring > adapter->max_sds_rings) {
+                       netdev_err(adapter->netdev,
+                                  "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+                                  rx_ring, adapter->max_sds_rings);
+                       return -EINVAL;
+               }
+       }
+
+        if (tx_ring != 0) {
+               if (tx_ring > adapter->max_tx_rings) {
+                       netdev_err(adapter->netdev,
+                                  "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+                                  tx_ring, adapter->max_tx_rings);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void qlcnic_get_channels(struct net_device *dev,
+               struct ethtool_channels *channel)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       channel->max_rx = adapter->max_sds_rings;
+       channel->max_tx = adapter->max_tx_rings;
+       channel->rx_count = adapter->drv_sds_rings;
+       channel->tx_count = adapter->drv_tx_rings;
+}
+
+static int qlcnic_set_channels(struct net_device *dev,
+                              struct ethtool_channels *channel)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int err;
+
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+               return -EINVAL;
+       }
+
+       if (channel->other_count || channel->combined_count)
+               return -EINVAL;
+
+       err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+                                        channel->tx_count);
+       if (err)
+               return err;
+
+       if (adapter->drv_sds_rings != channel->rx_count) {
+               err = qlcnic_validate_rings(adapter, channel->rx_count,
+                                           QLCNIC_RX_QUEUE);
+               if (err) {
+                       netdev_err(dev, "Unable to configure %u SDS rings\n",
+                                  channel->rx_count);
+                       return err;
+               }
+               adapter->drv_rss_rings = channel->rx_count;
+       }
+
+       if (adapter->drv_tx_rings != channel->tx_count) {
+               err = qlcnic_validate_rings(adapter, channel->tx_count,
+                                           QLCNIC_TX_QUEUE);
+               if (err) {
+                       netdev_err(dev, "Unable to configure %u Tx rings\n",
+                                  channel->tx_count);
+                       return err;
+               }
+               adapter->drv_tss_rings = channel->tx_count;
+       }
+
+       adapter->flags |= QLCNIC_TSS_RSS;
+
+       err = qlcnic_setup_rings(adapter);
+       netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+                   adapter->drv_sds_rings, adapter->drv_tx_rings);
+
+       return err;
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+                         struct ethtool_pauseparam *pause)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int port = adapter->ahw->physical_port;
+       int err = 0;
+       __u32 val;
+
+       if (qlcnic_83xx_check(adapter)) {
+               qlcnic_83xx_get_pauseparam(adapter, pause);
+               return;
+       }
+       if (adapter->ahw->port_type == QLCNIC_GBE) {
+               if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+                       return;
+               /* get flow control settings */
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+               if (err == -EIO)
+                       return;
+               pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return;
+               switch (port) {
+               case 0:
+                       pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+                       break;
+               case 1:
+                       pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+                       break;
+               case 2:
+                       pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+                       break;
+               case 3:
+               default:
+                       pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+                       break;
+               }
+       } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+               if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+                       return;
+               pause->rx_pause = 1;
+               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return;
+               if (port == 0)
+                       pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+               else
+                       pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+       } else {
+               dev_err(&netdev->dev, "Unknown board type: %x\n",
+                                       adapter->ahw->port_type);
+       }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+                         struct ethtool_pauseparam *pause)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int port = adapter->ahw->physical_port;
+       int err = 0;
+       __u32 val;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_set_pauseparam(adapter, pause);
+
+       /* read mode */
+       if (adapter->ahw->port_type == QLCNIC_GBE) {
+               if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+                       return -EIO;
+               /* set flow control */
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+               if (err == -EIO)
+                       return err;
+
+               if (pause->rx_pause)
+                       qlcnic_gb_rx_flowctl(val);
+               else
+                       qlcnic_gb_unset_rx_flowctl(val);
+
+               QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+                               val);
+               QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
+               /* set autoneg */
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return err;
+               switch (port) {
+               case 0:
+                       if (pause->tx_pause)
+                               qlcnic_gb_unset_gb0_mask(val);
+                       else
+                               qlcnic_gb_set_gb0_mask(val);
+                       break;
+               case 1:
+                       if (pause->tx_pause)
+                               qlcnic_gb_unset_gb1_mask(val);
+                       else
+                               qlcnic_gb_set_gb1_mask(val);
+                       break;
+               case 2:
+                       if (pause->tx_pause)
+                               qlcnic_gb_unset_gb2_mask(val);
+                       else
+                               qlcnic_gb_set_gb2_mask(val);
+                       break;
+               case 3:
+               default:
+                       if (pause->tx_pause)
+                               qlcnic_gb_unset_gb3_mask(val);
+                       else
+                               qlcnic_gb_set_gb3_mask(val);
+                       break;
+               }
+               QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+       } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+               if (!pause->rx_pause || pause->autoneg)
+                       return -EOPNOTSUPP;
+
+               if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+                       return -EIO;
+
+               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return err;
+               if (port == 0) {
+                       if (pause->tx_pause)
+                               qlcnic_xg_unset_xg0_mask(val);
+                       else
+                               qlcnic_xg_set_xg0_mask(val);
+               } else {
+                       if (pause->tx_pause)
+                               qlcnic_xg_unset_xg1_mask(val);
+                       else
+                               qlcnic_xg_set_xg1_mask(val);
+               }
+               QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+       } else {
+               dev_err(&netdev->dev, "Unknown board type: %x\n",
+                               adapter->ahw->port_type);
+       }
+       return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u32 data_read;
+       int err = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_reg_test(adapter);
+
+       data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+       if (err == -EIO)
+               return err;
+       if ((data_read & 0xffff) != adapter->pdev->vendor)
+               return 1;
+
+       return 0;
+}
+
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       if (qlcnic_82xx_check(adapter))
+               return 0;
+
+       return qlcnic_83xx_flash_test(adapter);
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       switch (sset) {
+       case ETH_SS_TEST:
+               return QLCNIC_TEST_LEN;
+       case ETH_SS_STATS:
+               return qlcnic_dev_statistics_len(adapter);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
+       int ret, drv_sds_rings = adapter->drv_sds_rings;
+       int drv_tx_rings = adapter->drv_tx_rings;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_interrupt_test(netdev);
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EIO;
+
+       ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+       if (ret)
+               goto clear_diag_irq;
+
+       ahw->diag_cnt = 0;
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       if (ret)
+               goto free_diag_res;
+
+       cmd.req.arg[1] = ahw->pci_func;
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret)
+               goto done;
+
+       usleep_range(1000, 12000);
+       ret = !ahw->diag_cnt;
+
+done:
+       qlcnic_free_mbx_args(&cmd);
+
+free_diag_res:
+       qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+clear_diag_irq:
+       adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       return ret;
+}
+
+#define QLCNIC_ILB_PKT_SIZE            64
+#define QLCNIC_NUM_ILB_PKT             16
+#define QLCNIC_ILB_MAX_RCV_LOOP                10
+#define QLCNIC_LB_PKT_POLL_DELAY_MSEC  1
+#define QLCNIC_LB_PKT_POLL_COUNT       20
+
+static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
+{
+       unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+
+       memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
+
+       memcpy(data, mac, ETH_ALEN);
+       memcpy(data + ETH_ALEN, mac, ETH_ALEN);
+
+       memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
+{
+       unsigned char buff[QLCNIC_ILB_PKT_SIZE];
+       qlcnic_create_loopback_buff(buff, mac);
+       return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
+}
+
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+       struct sk_buff *skb;
+       int i, loop, cnt = 0;
+
+       for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+               skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+               qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+               skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+               adapter->ahw->diag_cnt = 0;
+               qlcnic_xmit_frame(skb, adapter->netdev);
+               loop = 0;
+
+               do {
+                       msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
+                       qlcnic_process_rcv_ring_diag(sds_ring);
+                       if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
+                               break;
+               } while (!adapter->ahw->diag_cnt);
+
+               dev_kfree_skb_any(skb);
+
+               if (!adapter->ahw->diag_cnt)
+                       dev_warn(&adapter->pdev->dev,
+                                "LB Test: packet #%d was not received\n",
+                                i + 1);
+               else
+                       cnt++;
+       }
+       if (cnt != i) {
+               dev_err(&adapter->pdev->dev,
+                       "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+               if (mode != QLCNIC_ILB_MODE)
+                       dev_warn(&adapter->pdev->dev,
+                                "WARNING: Please check loopback cable\n");
+               return -1;
+       }
+       return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int drv_tx_rings = adapter->drv_tx_rings;
+       int drv_sds_rings = adapter->drv_sds_rings;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int loop = 0;
+       int ret;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_loopback_test(netdev, mode);
+
+       if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+               dev_info(&adapter->pdev->dev,
+                        "Firmware do not support loopback test\n");
+               return -EOPNOTSUPP;
+       }
+
+       dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+                mode == QLCNIC_ILB_MODE ? "internal" : "external");
+       if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(&adapter->pdev->dev,
+                        "Loopback test not supported in nonprivileged mode\n");
+               return 0;
+       }
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+       if (ret)
+               goto clear_it;
+
+       sds_ring = &adapter->recv_ctx->sds_rings[0];
+       ret = qlcnic_set_lb_mode(adapter, mode);
+       if (ret)
+               goto free_res;
+
+       ahw->diag_cnt = 0;
+       do {
+               msleep(500);
+               qlcnic_process_rcv_ring_diag(sds_ring);
+               if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+                       netdev_info(netdev,
+                                   "Firmware didn't sent link up event to loopback request\n");
+                       ret = -ETIMEDOUT;
+                       goto free_res;
+               } else if (adapter->ahw->diag_cnt) {
+                       ret = adapter->ahw->diag_cnt;
+                       goto free_res;
+               }
+       } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
+
+       ret = qlcnic_do_lb_test(adapter, mode);
+
+       qlcnic_clear_lb_mode(adapter, mode);
+
+ free_res:
+       qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+ clear_it:
+       adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+                    u64 *data)
+{
+       memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+       data[0] = qlcnic_reg_test(dev);
+       if (data[0])
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+
+       data[1] = (u64) qlcnic_test_link(dev);
+       if (data[1])
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+
+       if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
+               data[2] = qlcnic_irq_test(dev);
+               if (data[2])
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
+               if (data[3])
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+                       data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+                       if (data[4])
+                               eth_test->flags |= ETH_TEST_FL_FAILED;
+                       eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+               }
+
+               data[5] = qlcnic_eeprom_test(dev);
+               if (data[5])
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+       }
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int index, i, num_stats;
+
+       switch (stringset) {
+       case ETH_SS_TEST:
+               memcpy(data, *qlcnic_gstrings_test,
+                      QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+               break;
+       case ETH_SS_STATS:
+               num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+               for (i = 0; i < adapter->drv_tx_rings; i++) {
+                       for (index = 0; index < num_stats; index++) {
+                               sprintf(data, "tx_queue_%d %s", i,
+                                       qlcnic_tx_queue_stats_strings[index]);
+                               data += ETH_GSTRING_LEN;
+                       }
+               }
+
+               for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+                       memcpy(data + index * ETH_GSTRING_LEN,
+                              qlcnic_gstrings_stats[index].stat_string,
+                              ETH_GSTRING_LEN);
+               }
+
+               if (qlcnic_83xx_check(adapter)) {
+                       num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+                       for (i = 0; i < num_stats; i++, index++)
+                               memcpy(data + index * ETH_GSTRING_LEN,
+                                      qlcnic_83xx_tx_stats_strings[i],
+                                      ETH_GSTRING_LEN);
+                       num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+                       for (i = 0; i < num_stats; i++, index++)
+                               memcpy(data + index * ETH_GSTRING_LEN,
+                                      qlcnic_83xx_mac_stats_strings[i],
+                                      ETH_GSTRING_LEN);
+                       num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+                       for (i = 0; i < num_stats; i++, index++)
+                               memcpy(data + index * ETH_GSTRING_LEN,
+                                      qlcnic_83xx_rx_stats_strings[i],
+                                      ETH_GSTRING_LEN);
+                       return;
+               } else {
+                       num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+                       for (i = 0; i < num_stats; i++, index++)
+                               memcpy(data + index * ETH_GSTRING_LEN,
+                                      qlcnic_83xx_mac_stats_strings[i],
+                                      ETH_GSTRING_LEN);
+               }
+               if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+                       return;
+               num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+               for (i = 0; i < num_stats; index++, i++) {
+                       memcpy(data + index * ETH_GSTRING_LEN,
+                              qlcnic_device_gstrings_stats[i],
+                              ETH_GSTRING_LEN);
+               }
+       }
+}
+
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
+{
+       if (type == QLCNIC_MAC_STATS) {
+               struct qlcnic_mac_statistics *mac_stats =
+                                       (struct qlcnic_mac_statistics *)stats;
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+               *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+       } else if (type == QLCNIC_ESW_STATS) {
+               struct __qlcnic_esw_statistics *esw_stats =
+                               (struct __qlcnic_esw_statistics *)stats;
+               *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+               *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
+       }
+       return data;
+}
+
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_tx_queue_stats tx_stats;
+       struct qlcnic_host_tx_ring *tx_ring;
+       int ring;
+
+       memset(&tx_stats, 0, sizeof(tx_stats));
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
+               tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
+               tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
+               tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
+               tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
+       }
+
+       adapter->stats.xmit_on = tx_stats.xmit_on;
+       adapter->stats.xmit_off = tx_stats.xmit_off;
+       adapter->stats.xmitcalled = tx_stats.xmit_called;
+       adapter->stats.xmitfinished = tx_stats.xmit_finished;
+       adapter->stats.txbytes = tx_stats.tx_bytes;
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+       *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+       *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+       *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+       *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+       *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+       return data;
+}
+
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+                                    struct ethtool_stats *stats, u64 *data)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_esw_statistics port_stats;
+       struct qlcnic_mac_statistics mac_stats;
+       int index, ret, length, size, ring;
+       char *p;
+
+       memset(data, 0, stats->n_stats * sizeof(u64));
+
+       for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+               if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+                       qlcnic_update_stats(adapter);
+               } else {
+                       data += QLCNIC_TX_STATS_LEN;
+               }
+       }
+
+       length = QLCNIC_STATS_LEN;
+       for (index = 0; index < length; index++) {
+               p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+               size = qlcnic_gstrings_stats[index].sizeof_stat;
+               *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
+       }
+
+       if (qlcnic_83xx_check(adapter)) {
+               if (adapter->ahw->linkup)
+                       qlcnic_83xx_get_stats(adapter, data);
+               return;
+       } else {
+               /* Retrieve MAC statistics from firmware */
+               memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+               qlcnic_get_mac_stats(adapter, &mac_stats);
+               data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+       }
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+
+       memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
+       ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+                       QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
+       if (ret)
+               return;
+
+       data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+       ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+                       QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
+       if (ret)
+               return;
+
+       qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
+}
+
+static int qlcnic_set_led(struct net_device *dev,
+                         enum ethtool_phys_id_state state)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int drv_sds_rings = adapter->drv_sds_rings;
+       int err = -EIO, active = 1;
+
+       if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_set_led(dev, state);
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               netdev_warn(dev, "LED test not supported for non "
+                               "privilege function\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+                       return -EBUSY;
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+                       break;
+
+               if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+                       if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+                               break;
+                       set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+               }
+
+               if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
+                       err = 0;
+                       break;
+               }
+
+               dev_err(&adapter->pdev->dev,
+                       "Failed to set LED blink state.\n");
+               break;
+
+       case ETHTOOL_ID_INACTIVE:
+               active = 0;
+
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+                       break;
+
+               if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+                       if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+                               break;
+                       set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+               }
+
+               if (adapter->nic_ops->config_led(adapter, 0, 0xf))
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to reset LED blink state.\n");
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+               qlcnic_diag_free_res(dev, drv_sds_rings);
+
+       if (!active || err)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+       return err;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u32 wol_cfg;
+       int err = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               return;
+       wol->supported = 0;
+       wol->wolopts = 0;
+
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+       if (err == -EIO)
+               return;
+       if (wol_cfg & (1UL << adapter->portnum))
+               wol->supported |= WAKE_MAGIC;
+
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+       if (wol_cfg & (1UL << adapter->portnum))
+               wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       u32 wol_cfg;
+       int err = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               return -EOPNOTSUPP;
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+       if (err == -EIO)
+               return err;
+       if (!(wol_cfg & (1 << adapter->portnum)))
+               return -EOPNOTSUPP;
+
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+       if (err == -EIO)
+               return err;
+       if (wol->wolopts & WAKE_MAGIC)
+               wol_cfg |= 1UL << adapter->portnum;
+       else
+               wol_cfg &= ~(1UL << adapter->portnum);
+
+       QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+       return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+                       struct ethtool_coalesce *ethcoal)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               return -EINVAL;
+
+       /*
+       * Return Error if unsupported values or
+       * unsupported parameters are set.
+       */
+       if (ethcoal->rx_coalesce_usecs > 0xffff ||
+           ethcoal->rx_max_coalesced_frames > 0xffff ||
+           ethcoal->tx_coalesce_usecs > 0xffff ||
+           ethcoal->tx_max_coalesced_frames > 0xffff ||
+           ethcoal->rx_coalesce_usecs_irq ||
+           ethcoal->rx_max_coalesced_frames_irq ||
+           ethcoal->tx_coalesce_usecs_irq ||
+           ethcoal->tx_max_coalesced_frames_irq ||
+           ethcoal->stats_block_coalesce_usecs ||
+           ethcoal->use_adaptive_rx_coalesce ||
+           ethcoal->use_adaptive_tx_coalesce ||
+           ethcoal->pkt_rate_low ||
+           ethcoal->rx_coalesce_usecs_low ||
+           ethcoal->rx_max_coalesced_frames_low ||
+           ethcoal->tx_coalesce_usecs_low ||
+           ethcoal->tx_max_coalesced_frames_low ||
+           ethcoal->pkt_rate_high ||
+           ethcoal->rx_coalesce_usecs_high ||
+           ethcoal->rx_max_coalesced_frames_high ||
+           ethcoal->tx_coalesce_usecs_high ||
+           ethcoal->tx_max_coalesced_frames_high)
+               return -EINVAL;
+
+       err = qlcnic_config_intr_coalesce(adapter, ethcoal);
+
+       return err;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+                       struct ethtool_coalesce *ethcoal)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return -EINVAL;
+
+       ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
+       ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+       ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
+       ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
+
+       return 0;
+}
+
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->ahw->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       adapter->ahw->msg_enable = msglvl;
+}
+
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       u32 val;
+
+       if (qlcnic_84xx_check(adapter)) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+               qlcnic_83xx_unlock_driver(adapter);
+       } else {
+               fw_dump->enable = true;
+       }
+
+       dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+       return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       u32 val;
+
+       if (qlcnic_84xx_check(adapter)) {
+               if (qlcnic_83xx_lock_driver(adapter))
+                       return -EBUSY;
+
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+               QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+               qlcnic_83xx_unlock_driver(adapter);
+       } else {
+               fw_dump->enable = false;
+       }
+
+       dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+       return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       bool state;
+       u32 val;
+
+       if (qlcnic_84xx_check(adapter)) {
+               val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+               state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+       } else {
+               state = fw_dump->enable;
+       }
+
+       return state;
+}
+
+static int
+qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+       if (!fw_dump->tmpl_hdr) {
+               netdev_err(adapter->netdev, "FW Dump not supported\n");
+               return -ENOTSUPP;
+       }
+
+       if (fw_dump->clr)
+               dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
+       else
+               dump->len = 0;
+
+       if (!qlcnic_check_fw_dump_state(adapter))
+               dump->flag = ETH_FW_DUMP_DISABLE;
+       else
+               dump->flag = fw_dump->cap_mask;
+
+       dump->version = adapter->fw_version;
+       return 0;
+}
+
+static int
+qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+                       void *buffer)
+{
+       int i, copy_sz;
+       u32 *hdr_ptr;
+       __le32 *data;
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+       if (!fw_dump->tmpl_hdr) {
+               netdev_err(netdev, "FW Dump not supported\n");
+               return -ENOTSUPP;
+       }
+
+       if (!fw_dump->clr) {
+               netdev_info(netdev, "Dump not available\n");
+               return -EINVAL;
+       }
+
+       /* Copy template header first */
+       copy_sz = fw_dump->tmpl_hdr_size;
+       hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
+       data = buffer;
+       for (i = 0; i < copy_sz/sizeof(u32); i++)
+               *data++ = cpu_to_le32(*hdr_ptr++);
+
+       /* Copy captured dump data */
+       memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
+       dump->len = copy_sz + fw_dump->size;
+       dump->flag = fw_dump->cap_mask;
+
+       /* Free dump area once data has been captured */
+       vfree(fw_dump->data);
+       fw_dump->data = NULL;
+       fw_dump->clr = 0;
+       netdev_info(netdev, "extracted the FW dump Successfully\n");
+       return 0;
+}
+
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       struct net_device *netdev = adapter->netdev;
+
+       if (!qlcnic_check_fw_dump_state(adapter)) {
+               netdev_info(netdev,
+                           "Can not change driver mask to 0x%x. FW dump not enabled\n",
+                           mask);
+               return -EOPNOTSUPP;
+       }
+
+       fw_dump->cap_mask = mask;
+
+       /* Store new capture mask in template header as well*/
+       qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
+       netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+       return 0;
+}
+
+static int
+qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       bool valid_mask = false;
+       int i, ret = 0;
+
+       switch (val->flag) {
+       case QLCNIC_FORCE_FW_DUMP_KEY:
+               if (!fw_dump->tmpl_hdr) {
+                       netdev_err(netdev, "FW dump not supported\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               if (!qlcnic_check_fw_dump_state(adapter)) {
+                       netdev_info(netdev, "FW dump not enabled\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               if (fw_dump->clr) {
+                       netdev_info(netdev,
+                                   "Previous dump not cleared, not forcing dump\n");
+                       break;
+               }
+
+               netdev_info(netdev, "Forcing a FW dump\n");
+               qlcnic_dev_request_reset(adapter, val->flag);
+               break;
+       case QLCNIC_DISABLE_FW_DUMP:
+               if (!fw_dump->tmpl_hdr) {
+                       netdev_err(netdev, "FW dump not supported\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               ret = qlcnic_disable_fw_dump_state(adapter);
+               break;
+
+       case QLCNIC_ENABLE_FW_DUMP:
+               if (!fw_dump->tmpl_hdr) {
+                       netdev_err(netdev, "FW dump not supported\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               ret = qlcnic_enable_fw_dump_state(adapter);
+               break;
+
+       case QLCNIC_FORCE_FW_RESET:
+               netdev_info(netdev, "Forcing a FW reset\n");
+               qlcnic_dev_request_reset(adapter, val->flag);
+               adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+               break;
+
+       case QLCNIC_SET_QUIESCENT:
+       case QLCNIC_RESET_QUIESCENT:
+               if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+                       netdev_info(netdev, "Device is in non-operational state\n");
+               break;
+
+       default:
+               if (!fw_dump->tmpl_hdr) {
+                       netdev_err(netdev, "FW dump not supported\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+                       if (val->flag == qlcnic_fw_dump_level[i]) {
+                               valid_mask = true;
+                               break;
+                       }
+               }
+
+               if (valid_mask) {
+                       ret = qlcnic_set_dump_mask(adapter, val->flag);
+               } else {
+                       netdev_info(netdev, "Invalid dump level: 0x%x\n",
+                                   val->flag);
+                       ret = -EINVAL;
+               }
+       }
+       return ret;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+       .get_settings = qlcnic_get_settings,
+       .set_settings = qlcnic_set_settings,
+       .get_drvinfo = qlcnic_get_drvinfo,
+       .get_regs_len = qlcnic_get_regs_len,
+       .get_regs = qlcnic_get_regs,
+       .get_link = ethtool_op_get_link,
+       .get_eeprom_len = qlcnic_get_eeprom_len,
+       .get_eeprom = qlcnic_get_eeprom,
+       .get_ringparam = qlcnic_get_ringparam,
+       .set_ringparam = qlcnic_set_ringparam,
+       .get_channels = qlcnic_get_channels,
+       .set_channels = qlcnic_set_channels,
+       .get_pauseparam = qlcnic_get_pauseparam,
+       .set_pauseparam = qlcnic_set_pauseparam,
+       .get_wol = qlcnic_get_wol,
+       .set_wol = qlcnic_set_wol,
+       .self_test = qlcnic_diag_test,
+       .get_strings = qlcnic_get_strings,
+       .get_ethtool_stats = qlcnic_get_ethtool_stats,
+       .get_sset_count = qlcnic_get_sset_count,
+       .get_coalesce = qlcnic_get_intr_coalesce,
+       .set_coalesce = qlcnic_set_intr_coalesce,
+       .set_phys_id = qlcnic_set_led,
+       .set_msglevel = qlcnic_set_msglevel,
+       .get_msglevel = qlcnic_get_msglevel,
+       .get_dump_flag = qlcnic_get_dump_flag,
+       .get_dump_data = qlcnic_get_dump_data,
+       .set_dump = qlcnic_set_dump,
+};
+
+const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+       .get_settings           = qlcnic_get_settings,
+       .get_drvinfo            = qlcnic_get_drvinfo,
+       .get_regs_len           = qlcnic_get_regs_len,
+       .get_regs               = qlcnic_get_regs,
+       .get_link               = ethtool_op_get_link,
+       .get_eeprom_len         = qlcnic_get_eeprom_len,
+       .get_eeprom             = qlcnic_get_eeprom,
+       .get_ringparam          = qlcnic_get_ringparam,
+       .set_ringparam          = qlcnic_set_ringparam,
+       .get_channels           = qlcnic_get_channels,
+       .get_pauseparam         = qlcnic_get_pauseparam,
+       .get_wol                = qlcnic_get_wol,
+       .get_strings            = qlcnic_get_strings,
+       .get_ethtool_stats      = qlcnic_get_ethtool_stats,
+       .get_sset_count         = qlcnic_get_sset_count,
+       .get_coalesce           = qlcnic_get_intr_coalesce,
+       .set_coalesce           = qlcnic_set_intr_coalesce,
+       .set_msglevel           = qlcnic_set_msglevel,
+       .get_msglevel           = qlcnic_get_msglevel,
+};
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+       .get_settings           = qlcnic_get_settings,
+       .get_drvinfo            = qlcnic_get_drvinfo,
+       .set_msglevel           = qlcnic_set_msglevel,
+       .get_msglevel           = qlcnic_get_msglevel,
+       .set_dump               = qlcnic_set_dump,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
new file mode 100644 (file)
index 0000000..34e467b
--- /dev/null
@@ -0,0 +1,948 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "qlcnic_hw.h"
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+       QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+       QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+       QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+       QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+       QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+       QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+       QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/*  Hub 0 */
+enum {
+       QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+       QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/*  Hub 1 */
+enum {
+       QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+       QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+       QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+       QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+       QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+       QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+       QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+       QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+       QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+       QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+       QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+       QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+       QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+       QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+       QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+       QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/*  Hub 2 */
+enum {
+       QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+       QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+       QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+       QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+       QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+       QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+       QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+       QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+       QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+       QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+       QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+       QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+       QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+       QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+       QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+       QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/*  Hub 3 */
+enum {
+       QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+       QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+       QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+       QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/*  Hub 4 */
+enum {
+       QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+       QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+       QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+       QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+       QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+       QLCNIC_HW_PEGND_CRB_AGT_ADR,
+       QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+       QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+       QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+       QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+       QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+       QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/*  Hub 5 */
+enum {
+       QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+       QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+       QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+       QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+       QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+       QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+       QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/*  Hub 6 */
+enum {
+       QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+       QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+       QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+       QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+       QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+       QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+       QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+       QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+       QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/*  Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR        0x67
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+       QLCNIC_HW_PX_MAP_CRB_PH = 0,
+       QLCNIC_HW_PX_MAP_CRB_PS,
+       QLCNIC_HW_PX_MAP_CRB_MN,
+       QLCNIC_HW_PX_MAP_CRB_MS,
+       QLCNIC_HW_PX_MAP_CRB_PGR1,
+       QLCNIC_HW_PX_MAP_CRB_SRE,
+       QLCNIC_HW_PX_MAP_CRB_NIU,
+       QLCNIC_HW_PX_MAP_CRB_QMN,
+       QLCNIC_HW_PX_MAP_CRB_SQN0,
+       QLCNIC_HW_PX_MAP_CRB_SQN1,
+       QLCNIC_HW_PX_MAP_CRB_SQN2,
+       QLCNIC_HW_PX_MAP_CRB_SQN3,
+       QLCNIC_HW_PX_MAP_CRB_QMS,
+       QLCNIC_HW_PX_MAP_CRB_SQS0,
+       QLCNIC_HW_PX_MAP_CRB_SQS1,
+       QLCNIC_HW_PX_MAP_CRB_SQS2,
+       QLCNIC_HW_PX_MAP_CRB_SQS3,
+       QLCNIC_HW_PX_MAP_CRB_PGN0,
+       QLCNIC_HW_PX_MAP_CRB_PGN1,
+       QLCNIC_HW_PX_MAP_CRB_PGN2,
+       QLCNIC_HW_PX_MAP_CRB_PGN3,
+       QLCNIC_HW_PX_MAP_CRB_PGND,
+       QLCNIC_HW_PX_MAP_CRB_PGNI,
+       QLCNIC_HW_PX_MAP_CRB_PGS0,
+       QLCNIC_HW_PX_MAP_CRB_PGS1,
+       QLCNIC_HW_PX_MAP_CRB_PGS2,
+       QLCNIC_HW_PX_MAP_CRB_PGS3,
+       QLCNIC_HW_PX_MAP_CRB_PGSD,
+       QLCNIC_HW_PX_MAP_CRB_PGSI,
+       QLCNIC_HW_PX_MAP_CRB_SN,
+       QLCNIC_HW_PX_MAP_CRB_PGR2,
+       QLCNIC_HW_PX_MAP_CRB_EG,
+       QLCNIC_HW_PX_MAP_CRB_PH2,
+       QLCNIC_HW_PX_MAP_CRB_PS2,
+       QLCNIC_HW_PX_MAP_CRB_CAM,
+       QLCNIC_HW_PX_MAP_CRB_CAS0,
+       QLCNIC_HW_PX_MAP_CRB_CAS1,
+       QLCNIC_HW_PX_MAP_CRB_CAS2,
+       QLCNIC_HW_PX_MAP_CRB_C2C0,
+       QLCNIC_HW_PX_MAP_CRB_C2C1,
+       QLCNIC_HW_PX_MAP_CRB_TIMR,
+       QLCNIC_HW_PX_MAP_CRB_PGR3,
+       QLCNIC_HW_PX_MAP_CRB_RPMX1,
+       QLCNIC_HW_PX_MAP_CRB_RPMX2,
+       QLCNIC_HW_PX_MAP_CRB_RPMX3,
+       QLCNIC_HW_PX_MAP_CRB_RPMX4,
+       QLCNIC_HW_PX_MAP_CRB_RPMX5,
+       QLCNIC_HW_PX_MAP_CRB_RPMX6,
+       QLCNIC_HW_PX_MAP_CRB_RPMX7,
+       QLCNIC_HW_PX_MAP_CRB_XDMA,
+       QLCNIC_HW_PX_MAP_CRB_I2Q,
+       QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+       QLCNIC_HW_PX_MAP_CRB_CAS3,
+       QLCNIC_HW_PX_MAP_CRB_RPMX0,
+       QLCNIC_HW_PX_MAP_CRB_RPMX8,
+       QLCNIC_HW_PX_MAP_CRB_RPMX9,
+       QLCNIC_HW_PX_MAP_CRB_OCM0,
+       QLCNIC_HW_PX_MAP_CRB_OCM1,
+       QLCNIC_HW_PX_MAP_CRB_SMB,
+       QLCNIC_HW_PX_MAP_CRB_I2C0,
+       QLCNIC_HW_PX_MAP_CRB_I2C1,
+       QLCNIC_HW_PX_MAP_CRB_LPC,
+       QLCNIC_HW_PX_MAP_CRB_PGNC,
+       QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+#define        BIT_0   0x1
+#define        BIT_1   0x2
+#define        BIT_2   0x4
+#define        BIT_3   0x8
+#define        BIT_4   0x10
+#define        BIT_5   0x20
+#define        BIT_6   0x40
+#define        BIT_7   0x80
+#define        BIT_8   0x100
+#define        BIT_9   0x200
+#define        BIT_10  0x400
+#define        BIT_11  0x800
+#define        BIT_12  0x1000
+#define        BIT_13  0x2000
+#define        BIT_14  0x4000
+#define        BIT_15  0x8000
+#define        BIT_16  0x10000
+#define        BIT_17  0x20000
+#define        BIT_18  0x40000
+#define        BIT_19  0x80000
+#define        BIT_20  0x100000
+#define        BIT_21  0x200000
+#define        BIT_22  0x400000
+#define        BIT_23  0x800000
+#define        BIT_24  0x1000000
+#define        BIT_25  0x2000000
+#define        BIT_26  0x4000000
+#define        BIT_27  0x8000000
+#define        BIT_28  0x10000000
+#define        BIT_29  0x20000000
+#define        BIT_30  0x40000000
+#define        BIT_31  0x80000000
+
+/*  This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN   \
+       ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH   \
+       ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS   \
+       ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS   \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS   \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3        \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS  \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2        \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4        \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7        \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9        \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB  \
+       ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU  \
+       ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+       ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+       ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE  \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG   \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0        \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN  \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1        \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5        \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6        \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8        \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+       ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+       ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+       ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM  \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN   \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q  \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB       \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC  \
+       ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC                (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI  (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB             (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM             (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS       (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET     (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I   (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST      (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL        (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n)          (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS      (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA                (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT    (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA                (ROMUSB_ROM + 0x0018)
+
+/******************************************************************************
+*
+*    Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE      0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A)       \
+       (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU         QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE         QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB      \
+       QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_EPG         QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG)
+#define QLCNIC_CRB_I2Q         QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_TIMER       QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR)
+#define QLCNIC_CRB_I2C0        QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB         QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX         QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2  QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I   QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET     QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET     QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD     QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE        QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR         (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK           (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW      (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS  (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK    (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_OCM0_2M     (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE    (0x06000000UL)
+#define QLCNIC_PCI_CAMQM       (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END   (0x04800800UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE       (0x000ff800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET    (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0       (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX   (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1       (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX   (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET    (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+
+/*
+ *   Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL     (0x000)
+#define QLCNIC_MIU_MN_CONTROL  (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN         200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX         50
+#define QLCNIC_NIU_MAX_GBE_PORTS       4
+#define QLCNIC_NIU_MAX_XG_PORTS                2
+
+#define QLCNIC_NIU_MODE                        (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL                (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL                (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I)          \
+               (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I)          \
+               (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+#define MAX_CTL_CHECK  1000
+#define TEST_AGT_CTRL  (0x00)
+
+#define TA_CTL_START   BIT_0
+#define TA_CTL_ENABLE  BIT_1
+#define TA_CTL_WRITE   BIT_2
+#define TA_CTL_BUSY    BIT_3
+
+/* XG Link status */
+#define XG_LINK_UP     0x10
+#define XG_LINK_DOWN   0x20
+
+#define XG_LINK_UP_P3P 0x01
+#define XG_LINK_DOWN_P3P       0x02
+#define XG_LINK_STATE_P3P_MASK 0xf
+#define XG_LINK_STATE_P3P(pcifn, val) \
+       (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
+
+#define P3P_LINK_SPEED_MHZ     100
+#define P3P_LINK_SPEED_MASK    0xff
+#define P3P_LINK_SPEED_REG(pcifn)      \
+       (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3P_LINK_SPEED_VAL(pcifn, reg) \
+       (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE    (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg)    (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_ROM_LOCK_ID     (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID     (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE           (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2         (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X)          (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X)        (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_MAX_ARGS   4
+#define QLCNIC_CDRP_ARG(i)     (QLCNIC_REG(0x18 + ((i) * 4)))
+
+#define QLCNIC_CDRP_CRB_OFFSET         (QLCNIC_REG(0x18))
+#define QLCNIC_SIGN_CRB_OFFSET         (QLCNIC_REG(0x28))
+
+#define CRB_XG_STATE_P3P               (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1            (QLCNIC_REG(0xe8))
+#define CRB_DRIVER_VERSION             (QLCNIC_REG(0x2a0))
+
+#define CRB_FW_CAPABILITIES_2          (QLCNIC_CAM_RAM(0x12c))
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x)         ((x) >> 16)
+#define qlcnic_get_temp_state(x)       ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       QLCNIC_TEMP_NORMAL = 0x1,       /* Normal operating range */
+       QLCNIC_TEMP_WARN,       /* Sound alert, temperature getting high */
+       QLCNIC_TEMP_PANIC       /* Fatal error, hardware has shut down. */
+};
+
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER                0x44524956
+
+#define PCIX_INT_VECTOR        (0x10100)
+#define PCIX_INT_MASK          (0x10104)
+
+#define PCIX_OCM_WINDOW                (0x10800)
+#define PCIX_OCM_WINDOW_REG(func)      (PCIX_OCM_WINDOW + 0x4 * (func))
+
+#define PCIX_TARGET_STATUS     (0x10118)
+#define PCIX_TARGET_STATUS_F1  (0x10160)
+#define PCIX_TARGET_STATUS_F2  (0x10164)
+#define PCIX_TARGET_STATUS_F3  (0x10168)
+#define PCIX_TARGET_STATUS_F4  (0x10360)
+#define PCIX_TARGET_STATUS_F5  (0x10364)
+#define PCIX_TARGET_STATUS_F6  (0x10368)
+#define PCIX_TARGET_STATUS_F7  (0x1036c)
+
+#define PCIX_TARGET_MASK       (0x10128)
+#define PCIX_TARGET_MASK_F1    (0x10170)
+#define PCIX_TARGET_MASK_F2    (0x10174)
+#define PCIX_TARGET_MASK_F3    (0x10178)
+#define PCIX_TARGET_MASK_F4    (0x10370)
+#define PCIX_TARGET_MASK_F5    (0x10374)
+#define PCIX_TARGET_MASK_F6    (0x10378)
+#define PCIX_TARGET_MASK_F7    (0x1037c)
+
+#define PCIX_MSI_F(i)          (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg)        (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg)        (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg)   (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK         (0x1c000)
+#define PCIE_SEM0_UNLOCK       (0x1c004)
+#define PCIE_SEM_LOCK(N)       (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N)     (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION    (0x12040)
+#define PCIE_SETUP_FUNCTION2   (0x12048)
+#define PCIE_MISCCFG_RC         (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3          (0x120c8)
+
+#define ISR_INT_STATE_REG       (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT  (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE          0
+#define QLCNIC_PORT_MODE_XG            1
+#define QLCNIC_PORT_MODE_GB            2
+#define QLCNIC_PORT_MODE_802_3_AP      3
+#define QLCNIC_PORT_MODE_AUTO_NEG      4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G   5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG   6
+#define QLCNIC_PORT_MODE_ADDR          (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE           (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV           (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG              (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT     0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY     (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL       (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT    (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT   (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD                        0x1
+#define QLCNIC_DEV_INITIALIZING                0x2
+#define QLCNIC_DEV_READY               0x3
+#define QLCNIC_DEV_NEED_RESET          0x4
+#define QLCNIC_DEV_NEED_QUISCENT       0x5
+#define QLCNIC_DEV_FAILED              0x6
+#define QLCNIC_DEV_QUISCENT            0x7
+
+#define QLCNIC_DEV_BADBAD              0xbad0bad0
+
+#define QLCNIC_DEV_NPAR_NON_OPER       0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER           1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO     30 /* Operational time out */
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN)           ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN)           ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN)           ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN)         ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN)         ((VAL) &= ~(3 << (FN * 4)))
+
+#define QLC_DEV_GET_DRV(VAL, FN)               (0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN)               ((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC                1
+#define QLCNIC_TYPE_FCOE               2
+#define QLCNIC_TYPE_ISCSI              3
+
+#define QLCNIC_RCODE_DRIVER_INFO               0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD         BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR               BIT_31
+#define QLCNIC_FWERROR_PEGNUM(code)            ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code)              ((code >> 8) & 0x1fffff)
+#define QLCNIC_FWERROR_FAN_FAILURE             0x16
+
+#define FW_POLL_DELAY          (1 * HZ)
+#define FW_FAIL_THRESH         2
+
+#define QLCNIC_RESET_TIMEOUT_SECS      10
+#define QLCNIC_INIT_TIMEOUT_SECS       30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT        2000
+#define QLCNIC_RCVPEG_CHECK_DELAY      10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT        60
+#define QLCNIC_CMDPEG_CHECK_DELAY      500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS  200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT     10
+
+#define QLCNIC_MAX_MC_COUNT            38
+#define QLCNIC_MAX_UC_COUNT            512
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE   5
+
+#define        ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL)  (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define        PCIX_INT_VECTOR_BIT_F0  0x0080
+#define        PCIX_INT_VECTOR_BIT_F1  0x0100
+#define        PCIX_INT_VECTOR_BIT_F2  0x0200
+#define        PCIX_INT_VECTOR_BIT_F3  0x0400
+#define        PCIX_INT_VECTOR_BIT_F4  0x0800
+#define        PCIX_INT_VECTOR_BIT_F5  0x1000
+#define        PCIX_INT_VECTOR_BIT_F6  0x2000
+#define        PCIX_INT_VECTOR_BIT_F7  0x4000
+
+struct qlcnic_legacy_intr_set {
+       u32     int_vec_bit;
+       u32     tgt_status_reg;
+       u32     tgt_mask_reg;
+       u32     pci_int_reg;
+};
+
+#define QLCNIC_MSIX_BASE       0x132110
+#define QLCNIC_MAX_VLAN_FILTERS        64
+
+#define FLASH_ROM_WINDOW       0x42110030
+#define FLASH_ROM_DATA         0x42150000
+
+#define QLCNIC_FW_DUMP_REG1    0x00130060
+#define QLCNIC_FW_DUMP_REG2    0x001e0000
+#define QLCNIC_FLASH_SEM2_LK   0x0013C010
+#define QLCNIC_FLASH_SEM2_ULK  0x0013C014
+#define QLCNIC_FLASH_LOCK_ID   0x001B2100
+
+/* PCI function operational mode */
+enum {
+       QLCNIC_MGMT_FUNC        = 0,
+       QLCNIC_PRIV_FUNC        = 1,
+       QLCNIC_NON_PRIV_FUNC    = 2,
+       QLCNIC_SRIOV_PF_FUNC    = 3,
+       QLCNIC_SRIOV_VF_FUNC    = 4,
+       QLCNIC_UNKNOWN_FUNC_MODE = 5
+};
+
+enum {
+       QLCNIC_PORT_DEFAULTS    = 0,
+       QLCNIC_ADD_VLAN = 1,
+       QLCNIC_DEL_VLAN = 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x)  ((uint16_t)((uint32_t)(x)))
+#define MSW(x)  ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x)  ((uint32_t)((uint64_t)(x)))
+#define MSD(x)  ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define QLCNIC_MS_CTRL                 0x41000090
+#define QLCNIC_MS_ADDR_LO              0x41000094
+#define QLCNIC_MS_ADDR_HI              0x41000098
+#define QLCNIC_MS_WRTDATA_LO           0x410000A0
+#define QLCNIC_MS_WRTDATA_HI           0x410000A4
+#define QLCNIC_MS_WRTDATA_ULO          0x410000B0
+#define QLCNIC_MS_WRTDATA_UHI          0x410000B4
+#define QLCNIC_MS_RDDATA_LO            0x410000A8
+#define QLCNIC_MS_RDDATA_HI            0x410000AC
+#define QLCNIC_MS_RDDATA_ULO           0x410000B8
+#define QLCNIC_MS_RDDATA_UHI           0x410000BC
+
+#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_WRITE_START  (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
+
+#define        QLCNIC_LEGACY_INTR_CONFIG                                       \
+{                                                                      \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F0,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS,          \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK, },         \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F1,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F1,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F1, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F2,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F2,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F2, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F3,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F3,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F3, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F4,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F4,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F4, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F5,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F5,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F5, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F6,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F6,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F6, },      \
+                                                                       \
+       {                                                               \
+               .int_vec_bit    =       PCIX_INT_VECTOR_BIT_F7,         \
+               .tgt_status_reg =       ISR_INT_TARGET_STATUS_F7,       \
+               .tgt_mask_reg   =       ISR_INT_TARGET_MASK_F7, },      \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit)  ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ *     Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ *     Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ *     Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ *     Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ *     Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ *     Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ *     Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ *     Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ *     Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ *     Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ *     Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ *     Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word)      \
+       ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word)  \
+       _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word)        \
+       ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word)    \
+       ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word)    \
+       ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word)    \
+       ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word)    \
+       ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word)  \
+       ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word)  \
+       ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word)  \
+       ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word)  \
+       ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word)    \
+       ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word)    \
+       ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word)    \
+       _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word)  \
+       ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word)  \
+       ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG            4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS         17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0      : jabber => 1:jabber detected, 0:not
+ * Bit 1      : polarity => 1:polarity reversed, 0:normal
+ * Bit 2      : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3      : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4      : energydetect => 1:sleep, 0:active
+ * Bit 5      : downshift => 1:downshift, 0:no downshift
+ * Bit 6      : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9   : cablelen => not valid in 10Mb/s mode
+ *                     0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10     : link => 1:link up, 0:link down
+ * Bit 11     : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12     : pagercvd => 1:page received, 0:page not received
+ * Bit 13     : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+               ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word)     \
+               ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word)   \
+               ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word)       \
+               _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word)     \
+               _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE    0
+#define QLCNIC_NIU_PROMISC_MODE                1
+#define QLCNIC_NIU_ALLMULTI_MODE       2
+
+#define QLCNIC_PCIE_SEM_TIMEOUT        10000
+
+struct crb_128M_2M_sub_block_map {
+       unsigned valid;
+       unsigned start_128M;
+       unsigned end_128M;
+       unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+       struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif                         /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
new file mode 100644 (file)
index 0000000..509b596
--- /dev/null
@@ -0,0 +1,1710 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/bitops.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off)   ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off)        ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M  (0x130060)
+#define CRB_HI(off)    ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M        (0x1e0000UL)
+
+struct qlcnic_ms_reg_ctrl {
+       u32 ocm_window;
+       u32 control;
+       u32 hi;
+       u32 low;
+       u32 rd[4];
+       u32 wd[4];
+       u64 off;
+};
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+       return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+       writel(((u32) (val)), (addr));
+       writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+    {{{0, 0,         0,         0} } },                /* 0: PCI */
+    {{{1, 0x0100000, 0x0102000, 0x120000},     /* 1: PCIE */
+         {1, 0x0110000, 0x0120000, 0x130000},
+         {1, 0x0120000, 0x0122000, 0x124000},
+         {1, 0x0130000, 0x0132000, 0x126000},
+         {1, 0x0140000, 0x0142000, 0x128000},
+         {1, 0x0150000, 0x0152000, 0x12a000},
+         {1, 0x0160000, 0x0170000, 0x110000},
+         {1, 0x0170000, 0x0172000, 0x12e000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {1, 0x01e0000, 0x01e0800, 0x122000},
+         {0, 0x0000000, 0x0000000, 0x000000} } },
+       {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+    {{{0, 0,         0,         0} } },            /* 3: */
+    {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+    {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
+    {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
+    {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
+    {{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x08f0000, 0x08f2000, 0x172000} } },
+    {{{1, 0x0900000, 0x0902000, 0x174000},     /* 9: SQM1*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x09f0000, 0x09f2000, 0x176000} } },
+    {{{0, 0x0a00000, 0x0a02000, 0x178000},     /* 10: SQM2*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+    {{{0, 0x0b00000, 0x0b02000, 0x17c000},     /* 11: SQM3*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+       {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+       {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+       {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+       {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+       {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+       {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+       {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+       {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+       {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+       {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+       {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+       {{{0, 0,         0,         0} } },     /* 23: */
+       {{{0, 0,         0,         0} } },     /* 24: */
+       {{{0, 0,         0,         0} } },     /* 25: */
+       {{{0, 0,         0,         0} } },     /* 26: */
+       {{{0, 0,         0,         0} } },     /* 27: */
+       {{{0, 0,         0,         0} } },     /* 28: */
+       {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+    {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+    {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+       {{{0} } },                              /* 32: PCI */
+       {{{1, 0x2100000, 0x2102000, 0x120000},  /* 33: PCIE */
+         {1, 0x2110000, 0x2120000, 0x130000},
+         {1, 0x2120000, 0x2122000, 0x124000},
+         {1, 0x2130000, 0x2132000, 0x126000},
+         {1, 0x2140000, 0x2142000, 0x128000},
+         {1, 0x2150000, 0x2152000, 0x12a000},
+         {1, 0x2160000, 0x2170000, 0x110000},
+         {1, 0x2170000, 0x2172000, 0x12e000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000},
+         {0, 0x0000000, 0x0000000, 0x000000} } },
+       {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+       {{{0} } },                              /* 35: */
+       {{{0} } },                              /* 36: */
+       {{{0} } },                              /* 37: */
+       {{{0} } },                              /* 38: */
+       {{{0} } },                              /* 39: */
+       {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+       {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+       {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+       {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+       {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+       {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+       {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+       {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+       {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+       {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+       {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+       {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+       {{{0} } },                              /* 52: */
+       {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+       {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+       {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+       {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+       {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+       {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+       {{{0} } },                              /* 59: I2C0 */
+       {{{0} } },                              /* 60: I2C1 */
+       {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+       {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+       {{{1, 0x3f00000, 0x3f01000, 0x168000} } }       /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+       0,
+       0,
+       0,
+       0,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+       0,
+       QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+       0,
+};
+
+static const u32 msi_tgt_status[8] = {
+       ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+       ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+       ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+       ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+/*  PCI Windowing for DDR regions.  */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT        10000
+
+static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+       u32 dest;
+       void __iomem *val;
+
+       dest = addr & 0xFFFF0000;
+       val = bar0 + QLCNIC_FW_DUMP_REG1;
+       writel(dest, val);
+       readl(val);
+       val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+       *data = readl(val);
+}
+
+static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+       u32 dest;
+       void __iomem *val;
+
+       dest = addr & 0xFFFF0000;
+       val = bar0 + QLCNIC_FW_DUMP_REG1;
+       writel(dest, val);
+       readl(val);
+       val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+       writel(data, val);
+       readl(val);
+}
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+       int timeout = 0, err = 0, done = 0;
+
+       while (!done) {
+               done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+                              &err);
+               if (done == 1)
+                       break;
+               if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+                       if (id_reg) {
+                               done = QLCRD32(adapter, id_reg, &err);
+                               if (done != -1)
+                                       dev_err(&adapter->pdev->dev,
+                                               "Failed to acquire sem=%d lock held by=%d\n",
+                                               sem, done);
+                               else
+                                       dev_err(&adapter->pdev->dev,
+                                               "Failed to acquire sem=%d lock",
+                                               sem);
+                       } else {
+                               dev_err(&adapter->pdev->dev,
+                                       "Failed to acquire sem=%d lock", sem);
+                       }
+                       return -EIO;
+               }
+               usleep_range(1000, 1500);
+       }
+
+       if (id_reg)
+               QLCWR32(adapter, id_reg, adapter->portnum);
+
+       return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+       int err = 0;
+
+       QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
+}
+
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+{
+       int err = 0;
+       u32 data;
+
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
+       else {
+               data = QLCRD32(adapter, addr, &err);
+               if (err == -EIO)
+                       return err;
+       }
+       return data;
+}
+
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+{
+       int ret = 0;
+
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+       else
+               ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+       return ret;
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+               struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+       u32 i, producer;
+       struct qlcnic_cmd_buffer *pbuf;
+       struct cmd_desc_type0 *cmd_desc;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       i = 0;
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return -EIO;
+
+       tx_ring = &adapter->tx_ring[0];
+       __netif_tx_lock_bh(tx_ring->txq);
+
+       producer = tx_ring->producer;
+
+       if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+               netif_tx_stop_queue(tx_ring->txq);
+               smp_mb();
+               if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+                               netif_tx_wake_queue(tx_ring->txq);
+               } else {
+                       adapter->stats.xmit_off++;
+                       __netif_tx_unlock_bh(tx_ring->txq);
+                       return -EBUSY;
+               }
+       }
+
+       do {
+               cmd_desc = &cmd_desc_arr[i];
+
+               pbuf = &tx_ring->cmd_buf_arr[producer];
+               pbuf->skb = NULL;
+               pbuf->frag_count = 0;
+
+               memcpy(&tx_ring->desc_head[producer],
+                      cmd_desc, sizeof(struct cmd_desc_type0));
+
+               producer = get_next_index(producer, tx_ring->num_desc);
+               i++;
+
+       } while (i != nr_desc);
+
+       tx_ring->producer = producer;
+
+       qlcnic_update_cmd_producer(tx_ring);
+
+       __netif_tx_unlock_bh(tx_ring->txq);
+
+       return 0;
+}
+
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+                                  u16 vlan_id, u8 op)
+{
+       struct qlcnic_nic_req req;
+       struct qlcnic_mac_req *mac_req;
+       struct qlcnic_vlan_req *vlan_req;
+       u64 word;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+       word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       mac_req = (struct qlcnic_mac_req *)&req.words[0];
+       mac_req->op = op;
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+       vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
+       vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+       return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+       struct qlcnic_mac_vlan_list *cur;
+       struct list_head *head;
+       int err = -EINVAL;
+
+       /* Delete MAC from the existing list */
+       list_for_each(head, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+               if (ether_addr_equal(addr, cur->mac_addr)) {
+                       err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                                       0, QLCNIC_MAC_DEL);
+                       if (err)
+                               return err;
+                       list_del(&cur->list);
+                       kfree(cur);
+                       return err;
+               }
+       }
+       return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
+                      enum qlcnic_mac_type mac_type)
+{
+       struct qlcnic_mac_vlan_list *cur;
+       struct list_head *head;
+
+       /* look up if already exists */
+       list_for_each(head, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+               if (ether_addr_equal(addr, cur->mac_addr) &&
+                   cur->vlan_id == vlan)
+                       return 0;
+       }
+
+       cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
+       if (cur == NULL)
+               return -ENOMEM;
+
+       memcpy(cur->mac_addr, addr, ETH_ALEN);
+
+       if (qlcnic_sre_macaddr_change(adapter,
+                               cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
+               kfree(cur);
+               return -EIO;
+       }
+
+       cur->vlan_id = vlan;
+       cur->mac_type = mac_type;
+
+       list_add_tail(&cur->list, &adapter->mac_list);
+       return 0;
+}
+
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mac_vlan_list *cur;
+       struct list_head *head, *tmp;
+
+       list_for_each_safe(head, tmp, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+               if (cur->mac_type != QLCNIC_MULTICAST_MAC)
+                       continue;
+
+               qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                         cur->vlan_id, QLCNIC_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct netdev_hw_addr *ha;
+       static const u8 bcast_addr[ETH_ALEN] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+       u32 mode = VPORT_MISS_MODE_DROP;
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return;
+
+       qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan,
+                          QLCNIC_UNICAST_MAC);
+       qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC);
+
+       if (netdev->flags & IFF_PROMISC) {
+               if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+                       mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if ((netdev->flags & IFF_ALLMULTI) ||
+                  (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+               mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+       } else if (!netdev_mc_empty(netdev)) {
+               qlcnic_flush_mcast_mac(adapter);
+               netdev_for_each_mc_addr(ha, netdev)
+                       qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+                                          QLCNIC_MULTICAST_MAC);
+       }
+
+       /* configure unicast MAC address, if there is not sufficient space
+        * to store all the unicast addresses then enable promiscuous mode
+        */
+       if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if (!netdev_uc_empty(netdev)) {
+               netdev_for_each_uc_addr(ha, netdev)
+                       qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+                                          QLCNIC_UNICAST_MAC);
+       }
+
+       if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+           !adapter->fdb_mac_learn) {
+               qlcnic_alloc_lb_filters_mem(adapter);
+               adapter->drv_mac_learn = 1;
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+                       adapter->rx_mac_learn = true;
+       } else {
+               adapter->drv_mac_learn = 0;
+               adapter->rx_mac_learn = false;
+       }
+
+       qlcnic_nic_set_promisc(adapter, mode);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_vf_set_multi(netdev);
+       else
+               __qlcnic_set_multi(netdev, 0);
+}
+
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
+                       ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(mode);
+
+       return qlcnic_send_cmd_descs(adapter,
+                               (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
+{
+       struct list_head *head = &adapter->mac_list;
+       struct qlcnic_mac_vlan_list *cur;
+
+       while (!list_empty(head)) {
+               cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+               qlcnic_sre_macaddr_change(adapter,
+                               cur->mac_addr, 0, QLCNIC_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_filter *tmp_fil;
+       struct hlist_node *n;
+       struct hlist_head *head;
+       int i;
+       unsigned long expires;
+       u8 cmd;
+
+       for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+               head = &(adapter->fhash.fhead[i]);
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+                       cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+                                                 QLCNIC_MAC_DEL;
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
+                               qlcnic_sre_macaddr_change(adapter,
+                                                         tmp_fil->faddr,
+                                                         tmp_fil->vlan_id,
+                                                         cmd);
+                               spin_lock_bh(&adapter->mac_learn_lock);
+                               adapter->fhash.fnum--;
+                               hlist_del(&tmp_fil->fnode);
+                               spin_unlock_bh(&adapter->mac_learn_lock);
+                               kfree(tmp_fil);
+                       }
+               }
+       }
+       for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+               head = &(adapter->rx_fhash.fhead[i]);
+
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
+               {
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
+                               spin_lock_bh(&adapter->rx_mac_learn_lock);
+                               adapter->rx_fhash.fnum--;
+                               hlist_del(&tmp_fil->fnode);
+                               spin_unlock_bh(&adapter->rx_mac_learn_lock);
+                               kfree(tmp_fil);
+                       }
+               }
+       }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_filter *tmp_fil;
+       struct hlist_node *n;
+       struct hlist_head *head;
+       int i;
+       u8 cmd;
+
+       for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+               head = &(adapter->fhash.fhead[i]);
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+                       cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+                                                 QLCNIC_MAC_DEL;
+                       qlcnic_sre_macaddr_change(adapter,
+                                                 tmp_fil->faddr,
+                                                 tmp_fil->vlan_id,
+                                                 cmd);
+                       spin_lock_bh(&adapter->mac_learn_lock);
+                       adapter->fhash.fnum--;
+                       hlist_del(&tmp_fil->fnode);
+                       spin_unlock_bh(&adapter->mac_learn_lock);
+                       kfree(tmp_fil);
+               }
+       }
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+{
+       struct qlcnic_nic_req req;
+       int rv;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+       req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+               ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
+
+       req.words[0] = cpu_to_le64(flag);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
+                               flag ? "Set" : "Reset");
+       return rv;
+}
+
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       if (qlcnic_set_fw_loopback(adapter, mode))
+               return -EIO;
+
+       if (qlcnic_nic_set_promisc(adapter,
+                                  VPORT_MISS_MODE_ACCEPT_ALL)) {
+               qlcnic_set_fw_loopback(adapter, 0);
+               return -EIO;
+       }
+
+       msleep(1000);
+       return 0;
+}
+
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       mode = VPORT_MISS_MODE_DROP;
+       qlcnic_set_fw_loopback(adapter, 0);
+
+       if (netdev->flags & IFF_PROMISC)
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       else if (netdev->flags & IFF_ALLMULTI)
+               mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+       qlcnic_nic_set_promisc(adapter, mode);
+       msleep(1000);
+       return 0;
+}
+
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+       u8 mac[ETH_ALEN];
+       int ret;
+
+       ret = qlcnic_get_mac_address(adapter, mac,
+                                    adapter->ahw->physical_port);
+       if (ret)
+               return ret;
+
+       memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+       adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+       return 0;
+}
+
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_nic_req req;
+       int rv;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
+               ((u64) adapter->portnum << 16));
+
+       req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
+       req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
+                       ((u64) adapter->ahw->coal.rx_time_us) << 16);
+       req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
+                       ((u64) adapter->ahw->coal.type) << 32 |
+                       ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                       "Could not send interrupt coalescing parameters\n");
+
+       return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+                                    struct ethtool_coalesce *ethcoal)
+{
+       struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+       int rv;
+
+       coal->flag = QLCNIC_INTR_DEFAULT;
+       coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+       coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+       rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+       if (rv)
+               netdev_err(adapter->netdev,
+                          "Failed to set Rx coalescing parameters\n");
+
+       return rv;
+}
+
+#define QLCNIC_ENABLE_IPV4_LRO         BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO         (BIT_1 | BIT_9)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+       int rv;
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return 0;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       word = 0;
+       if (enable) {
+               word = QLCNIC_ENABLE_IPV4_LRO;
+               if (adapter->ahw->extra_capability[0] &
+                   QLCNIC_FW_CAP2_HW_LRO_IPV6)
+                       word |= QLCNIC_ENABLE_IPV6_LRO;
+       }
+
+       req.words[0] = cpu_to_le64(word);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                       "Could not send configure hw lro request\n");
+
+       return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+       int rv;
+
+       if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+               return 0;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+               ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(enable);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                       "Could not send configure bridge mode request\n");
+
+       adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+       return rv;
+}
+
+
+#define QLCNIC_RSS_HASHTYPE_IP_TCP     0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS       BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG        (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK      0x7ULL
+
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+       int i, rv;
+
+       static const u64 key[] = {
+               0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+               0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+               0x255b0ec26d5a56daULL
+       };
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       /*
+        * RSS request:
+        * bits 3-0: hash_method
+        *      5-4: hash_type_ipv4
+        *      7-6: hash_type_ipv6
+        *        8: enable
+        *        9: use indirection table
+        *       10: type-c rss
+        *       11: udp rss
+        *    47-12: reserved
+        *    62-48: indirection table mask
+        *       63: feature flag
+        */
+       word =  ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+               ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+               ((u64)(enable & 0x1) << 8) |
+               ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+               (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+               (u64)QLCNIC_RSS_FEATURE_FLAG;
+
+       req.words[0] = cpu_to_le64(word);
+       for (i = 0; i < 5; i++)
+               req.words[i+1] = cpu_to_le64(key[i]);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+       return rv;
+}
+
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+                              __be32 ip, int cmd)
+{
+       struct qlcnic_nic_req req;
+       struct qlcnic_ipaddr *ipa;
+       u64 word;
+       int rv;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(cmd);
+       ipa = (struct qlcnic_ipaddr *)&req.words[1];
+       ipa->ipv4 = ip;
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                               "could not notify %s IP 0x%x request\n",
+                               (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+}
+
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+       int rv;
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+       req.words[0] = cpu_to_le64(enable | (enable << 8));
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                               "could not configure link notification\n");
+
+       return rv;
+}
+
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_nic_req req;
+       u64 word;
+       int rv;
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return 0;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+               ((u64)adapter->portnum << 16) |
+               ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+       req.req_hdr = cpu_to_le64(word);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv != 0)
+               dev_err(&adapter->netdev->dev,
+                                "could not cleanup lro flows\n");
+
+       return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int rc = 0;
+
+       if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
+               dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
+                       " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
+               return -EINVAL;
+       }
+
+       rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+       if (!rc)
+               netdev->mtu = mtu;
+
+       return rc;
+}
+
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+                                             netdev_features_t features)
+{
+       u32 offload_flags = adapter->offload_flags;
+
+       if (offload_flags & BIT_0) {
+               features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+                           NETIF_F_IPV6_CSUM;
+               adapter->rx_csum = 1;
+               if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+                       if (!(offload_flags & BIT_1))
+                               features &= ~NETIF_F_TSO;
+                       else
+                               features |= NETIF_F_TSO;
+
+                       if (!(offload_flags & BIT_2))
+                               features &= ~NETIF_F_TSO6;
+                       else
+                               features |= NETIF_F_TSO6;
+               }
+       } else {
+               features &= ~(NETIF_F_RXCSUM |
+                             NETIF_F_IP_CSUM |
+                             NETIF_F_IPV6_CSUM);
+
+               if (QLCNIC_IS_TSO_CAPABLE(adapter))
+                       features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+               adapter->rx_csum = 0;
+       }
+
+       return features;
+}
+
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       netdev_features_t changed;
+
+       if (qlcnic_82xx_check(adapter) &&
+           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+               if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+                       features = qlcnic_process_flags(adapter, features);
+               } else {
+                       changed = features ^ netdev->features;
+                       features ^= changed & (NETIF_F_RXCSUM |
+                                              NETIF_F_IP_CSUM |
+                                              NETIF_F_IPV6_CSUM |
+                                              NETIF_F_TSO |
+                                              NETIF_F_TSO6);
+               }
+       }
+
+       if (!(features & NETIF_F_RXCSUM))
+               features &= ~NETIF_F_LRO;
+
+       return features;
+}
+
+
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       netdev_features_t changed = netdev->features ^ features;
+       int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+       if (!(changed & NETIF_F_LRO))
+               return 0;
+
+       netdev->features ^= NETIF_F_LRO;
+
+       if (qlcnic_config_hw_lro(adapter, hw_lro))
+               return -EIO;
+
+       if (!hw_lro && qlcnic_82xx_check(adapter)) {
+               if (qlcnic_send_lro_cleanup(adapter))
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ *      1 if window access is needed. 'off' is set to offset from
+ *        CRB space in 128M pci map
+ *      0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
+                                     ulong off, void __iomem **addr)
+{
+       const struct crb_128M_2M_sub_block_map *m;
+
+       if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+               return -EINVAL;
+
+       off -= QLCNIC_PCI_CRBSPACE;
+
+       /*
+        * Try direct map
+        */
+       m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+       if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+               *addr = ahw->pci_base0 + m->start_2M +
+                       (off - m->start_128M);
+               return 0;
+       }
+
+       /*
+        * Not in direct map, use crb window
+        */
+       *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+       return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static int
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+       u32 window;
+       void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
+
+       off -= QLCNIC_PCI_CRBSPACE;
+
+       window = CRB_HI(off);
+       if (window == 0) {
+               dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+               return -EIO;
+       }
+
+       writel(window, addr);
+       if (readl(addr) != window) {
+               if (printk_ratelimit())
+                       dev_warn(&adapter->pdev->dev,
+                               "failed to set CRB window to %d off 0x%lx\n",
+                               window, off);
+               return -EIO;
+       }
+       return 0;
+}
+
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+                              u32 data)
+{
+       unsigned long flags;
+       int rv;
+       void __iomem *addr = NULL;
+
+       rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+       if (rv == 0) {
+               writel(data, addr);
+               return 0;
+       }
+
+       if (rv > 0) {
+               /* indirect access */
+               write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+               crb_win_lock(adapter);
+               rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+               if (!rv)
+                       writel(data, addr);
+               crb_win_unlock(adapter);
+               write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+               return rv;
+       }
+
+       dev_err(&adapter->pdev->dev,
+                       "%s: invalid offset: 0x%016lx\n", __func__, off);
+       dump_stack();
+       return -EIO;
+}
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+                             int *err)
+{
+       unsigned long flags;
+       int rv;
+       u32 data = -1;
+       void __iomem *addr = NULL;
+
+       rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+       if (rv == 0)
+               return readl(addr);
+
+       if (rv > 0) {
+               /* indirect access */
+               write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+               crb_win_lock(adapter);
+               if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+                       data = readl(addr);
+               crb_win_unlock(adapter);
+               write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+               return data;
+       }
+
+       dev_err(&adapter->pdev->dev,
+                       "%s: invalid offset: 0x%016lx\n", __func__, off);
+       dump_stack();
+       return -1;
+}
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
+                               u32 offset)
+{
+       void __iomem *addr = NULL;
+
+       WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
+
+       return addr;
+}
+
+static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
+                                       u32 window, u64 off, u64 *data, int op)
+{
+       void __iomem *addr;
+       u32 start;
+
+       mutex_lock(&adapter->ahw->mem_lock);
+
+       writel(window, adapter->ahw->ocm_win_crb);
+       /* read back to flush */
+       readl(adapter->ahw->ocm_win_crb);
+       start = QLCNIC_PCI_OCM0_2M + off;
+
+       addr = adapter->ahw->pci_base0 + start;
+
+       if (op == 0)    /* read */
+               *data = readq(addr);
+       else            /* write */
+               writeq(*data, addr);
+
+       /* Set window to 0 */
+       writel(0, adapter->ahw->ocm_win_crb);
+       readl(adapter->ahw->ocm_win_crb);
+
+       mutex_unlock(&adapter->ahw->mem_lock);
+       return 0;
+}
+
+static void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+       void __iomem *addr = adapter->ahw->pci_base0 +
+               QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+       mutex_lock(&adapter->ahw->mem_lock);
+       *data = readq(addr);
+       mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+static void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+       void __iomem *addr = adapter->ahw->pci_base0 +
+               QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+       mutex_lock(&adapter->ahw->mem_lock);
+       writeq(data, addr);
+       mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+
+
+/* Set MS memory control data for different adapters */
+static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
+                                  struct qlcnic_ms_reg_ctrl *ms)
+{
+       ms->control = QLCNIC_MS_CTRL;
+       ms->low = QLCNIC_MS_ADDR_LO;
+       ms->hi = QLCNIC_MS_ADDR_HI;
+       if (off & 0xf) {
+               ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
+               ms->rd[0] = QLCNIC_MS_RDDATA_LO;
+               ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
+               ms->rd[1] = QLCNIC_MS_RDDATA_HI;
+               ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
+               ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
+               ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
+               ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
+       } else {
+               ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
+               ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
+               ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
+               ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
+               ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
+               ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
+               ms->rd[2] = QLCNIC_MS_RDDATA_LO;
+               ms->rd[3] = QLCNIC_MS_RDDATA_HI;
+       }
+
+       ms->ocm_window = OCM_WIN_P3P(off);
+       ms->off = GET_MEM_OFFS_2M(off);
+}
+
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+       int j, ret = 0;
+       u32 temp, off8;
+       struct qlcnic_ms_reg_ctrl ms;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+
+       memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+       if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+                           QLCNIC_ADDR_QDR_NET_MAX) ||
+             ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+                           QLCNIC_ADDR_DDR_NET_MAX)))
+               return -EIO;
+
+       qlcnic_set_ms_controls(adapter, off, &ms);
+
+       if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+               return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+                                                   ms.off, &data, 1);
+
+       off8 = off & ~0xf;
+
+       mutex_lock(&adapter->ahw->mem_lock);
+
+       qlcnic_ind_wr(adapter, ms.low, off8);
+       qlcnic_ind_wr(adapter, ms.hi, 0);
+
+       qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+       qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = qlcnic_ind_rd(adapter, ms.control);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               ret = -EIO;
+               goto done;
+       }
+
+       /* This is the modify part of read-modify-write */
+       qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
+       qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
+       /* This is the write part of read-modify-write */
+       qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
+       qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
+
+       qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
+       qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = qlcnic_ind_rd(adapter, ms.control);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to write through agent\n");
+               ret = -EIO;
+       } else
+               ret = 0;
+
+done:
+       mutex_unlock(&adapter->ahw->mem_lock);
+
+       return ret;
+}
+
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+       int j, ret;
+       u32 temp, off8;
+       u64 val;
+       struct qlcnic_ms_reg_ctrl ms;
+
+       /* Only 64-bit aligned access */
+       if (off & 7)
+               return -EIO;
+       if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+                           QLCNIC_ADDR_QDR_NET_MAX) ||
+             ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+                           QLCNIC_ADDR_DDR_NET_MAX)))
+               return -EIO;
+
+       memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+       qlcnic_set_ms_controls(adapter, off, &ms);
+
+       if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+               return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+                                                   ms.off, data, 0);
+
+       mutex_lock(&adapter->ahw->mem_lock);
+
+       off8 = off & ~0xf;
+
+       qlcnic_ind_wr(adapter, ms.low, off8);
+       qlcnic_ind_wr(adapter, ms.hi, 0);
+
+       qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+       qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+       for (j = 0; j < MAX_CTL_CHECK; j++) {
+               temp = qlcnic_ind_rd(adapter, ms.control);
+               if ((temp & TA_CTL_BUSY) == 0)
+                       break;
+       }
+
+       if (j >= MAX_CTL_CHECK) {
+               if (printk_ratelimit())
+                       dev_err(&adapter->pdev->dev,
+                                       "failed to read through agent\n");
+               ret = -EIO;
+       } else {
+
+               temp = qlcnic_ind_rd(adapter, ms.rd[3]);
+               val = (u64)temp << 32;
+               val |= qlcnic_ind_rd(adapter, ms.rd[2]);
+               *data = val;
+               ret = 0;
+       }
+
+       mutex_unlock(&adapter->ahw->mem_lock);
+
+       return ret;
+}
+
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
+{
+       int offset, board_type, magic, err = 0;
+       struct pci_dev *pdev = adapter->pdev;
+
+       offset = QLCNIC_FW_MAGIC_OFFSET;
+       if (qlcnic_rom_fast_read(adapter, offset, &magic))
+               return -EIO;
+
+       if (magic != QLCNIC_BDINFO_MAGIC) {
+               dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+                       magic);
+               return -EIO;
+       }
+
+       offset = QLCNIC_BRDTYPE_OFFSET;
+       if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+               return -EIO;
+
+       adapter->ahw->board_type = board_type;
+
+       if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
+               u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+               if (err == -EIO)
+                       return err;
+               if ((gpio & 0x8000) == 0)
+                       board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
+       }
+
+       switch (board_type) {
+       case QLCNIC_BRDTYPE_P3P_HMEZ:
+       case QLCNIC_BRDTYPE_P3P_XG_LOM:
+       case QLCNIC_BRDTYPE_P3P_10G_CX4:
+       case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+       case QLCNIC_BRDTYPE_P3P_IMEZ:
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+       case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+       case QLCNIC_BRDTYPE_P3P_10G_XFP:
+       case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+               adapter->ahw->port_type = QLCNIC_XGBE;
+               break;
+       case QLCNIC_BRDTYPE_P3P_REF_QG:
+       case QLCNIC_BRDTYPE_P3P_4_GB:
+       case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+               adapter->ahw->port_type = QLCNIC_GBE;
+               break;
+       case QLCNIC_BRDTYPE_P3P_10G_TP:
+               adapter->ahw->port_type = (adapter->portnum < 2) ?
+                       QLCNIC_XGBE : QLCNIC_GBE;
+               break;
+       default:
+               dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+               adapter->ahw->port_type = QLCNIC_XGBE;
+               break;
+       }
+
+       return 0;
+}
+
+static int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+       u32 wol_cfg;
+       int err = 0;
+
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+       if (wol_cfg & (1UL << adapter->portnum)) {
+               wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+               if (err == -EIO)
+                       return err;
+               if (wol_cfg & (1 << adapter->portnum))
+                       return 1;
+       }
+
+       return 0;
+}
+
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+       struct qlcnic_nic_req   req;
+       int rv;
+       u64 word;
+
+       memset(&req, 0, sizeof(struct qlcnic_nic_req));
+       req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+       word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+       req.req_hdr = cpu_to_le64(word);
+
+       req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
+       req.words[1] = cpu_to_le64(state);
+
+       rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+       if (rv)
+               dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+       return rv;
+}
+
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_cmd_args cmd;
+       u8 beacon_state;
+       int err = 0;
+
+       if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+               err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                           QLCNIC_CMD_GET_LED_STATUS);
+               if (!err) {
+                       err = qlcnic_issue_cmd(adapter, &cmd);
+                       if (err) {
+                               netdev_err(adapter->netdev,
+                                          "Failed to get current beacon state, err=%d\n",
+                                          err);
+                       } else {
+                               beacon_state = cmd.rsp.arg[1];
+                               if (beacon_state == QLCNIC_BEACON_DISABLE)
+                                       ahw->beacon_state = QLCNIC_BEACON_OFF;
+                               else if (beacon_state == QLCNIC_BEACON_EANBLE)
+                                       ahw->beacon_state = QLCNIC_BEACON_ON;
+                       }
+               }
+               qlcnic_free_mbx_args(&cmd);
+       }
+
+       return;
+}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+       void __iomem *msix_base_addr;
+       u32 func;
+       u32 msix_base;
+
+       pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+       msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+       msix_base = readl(msix_base_addr);
+       func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+       adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+                         loff_t offset, size_t size)
+{
+       int err = 0;
+       u32 data;
+       u64 qmdata;
+
+       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+               qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+               memcpy(buf, &qmdata, size);
+       } else {
+               data = QLCRD32(adapter, offset, &err);
+               memcpy(buf, &data, size);
+       }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+                          loff_t offset, size_t size)
+{
+       u32 data;
+       u64 qmdata;
+
+       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+               memcpy(&qmdata, buf, size);
+               qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+       } else {
+               memcpy(&data, buf, size);
+               QLCWR32(adapter, offset, data);
+       }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+       return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+       qlcnic_pcie_sem_unlock(adapter, 5);
+}
+
+int qlcnic_82xx_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_clr_all_drv_state(adapter, 0);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       if (qlcnic_wol_supported(adapter)) {
+               pci_enable_wake(pdev, PCI_D3cold, 1);
+               pci_enable_wake(pdev, PCI_D3hot, 1);
+       }
+
+       return 0;
+}
+
+int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       err = qlcnic_start_firmware(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "failed to start firmware\n");
+               return err;
+       }
+
+       if (netif_running(netdev)) {
+               err = qlcnic_up(adapter, netdev);
+               if (!err)
+                       qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+       netif_device_attach(netdev);
+       qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+       return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644 (file)
index 0000000..4bb33af
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+       QLCNIC_PEG_HALT_STATUS1 = 0,
+       QLCNIC_PEG_HALT_STATUS2,
+       QLCNIC_PEG_ALIVE_COUNTER,
+       QLCNIC_FLASH_LOCK_OWNER,
+       QLCNIC_FW_CAPABILITIES,
+       QLCNIC_CRB_DRV_ACTIVE,
+       QLCNIC_CRB_DEV_STATE,
+       QLCNIC_CRB_DRV_STATE,
+       QLCNIC_CRB_DRV_SCRATCH,
+       QLCNIC_CRB_DEV_PARTITION_INFO,
+       QLCNIC_CRB_DRV_IDC_VER,
+       QLCNIC_FW_VERSION_MAJOR,
+       QLCNIC_FW_VERSION_MINOR,
+       QLCNIC_FW_VERSION_SUB,
+       QLCNIC_CRB_DEV_NPAR_STATE,
+       QLCNIC_FW_IMG_VALID,
+       QLCNIC_CMDPEG_STATE,
+       QLCNIC_RCVPEG_STATE,
+       QLCNIC_ASIC_TEMP,
+       QLCNIC_FW_API,
+       QLCNIC_DRV_OP_MODE,
+       QLCNIC_FLASH_LOCK,
+       QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr)                   \
+       readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value)            \
+       writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr)      \
+       readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value)       \
+       writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR           0x1
+#define QLCNIC_CMD_CONFIG_INTRPT               0x2
+#define QLCNIC_CMD_CREATE_RX_CTX               0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX              0x8
+#define QLCNIC_CMD_CREATE_TX_CTX               0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX              0xa
+#define QLCNIC_CMD_CONFIGURE_LRO               0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING      0xD
+#define QLCNIC_CMD_GET_STATISTICS              0xF
+#define QLCNIC_CMD_INTRPT_TEST                 0x11
+#define QLCNIC_CMD_SET_MTU                     0x12
+#define QLCNIC_CMD_READ_PHY                    0x13
+#define QLCNIC_CMD_WRITE_PHY                   0x14
+#define QLCNIC_CMD_READ_HW_REG                 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL                        0x16
+#define QLCNIC_CMD_SET_FLOW_CTL                        0x17
+#define QLCNIC_CMD_READ_MAX_MTU                        0x18
+#define QLCNIC_CMD_READ_MAX_LRO                        0x19
+#define QLCNIC_CMD_MAC_ADDRESS                 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO                        0x20
+#define QLCNIC_CMD_GET_NIC_INFO                        0x21
+#define QLCNIC_CMD_SET_NIC_INFO                        0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY      0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH              0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS          0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING           0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH           0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG     0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS           0x2a
+#define QLCNIC_CMD_CONFIG_PORT                 0x2e
+#define QLCNIC_CMD_TEMP_SIZE                   0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR                        0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP              0x31
+#define        QLCNIC_CMD_CONFIG_VPORT                 0x32
+#define        QLCNIC_CMD_DCB_QUERY_CAP                0x34
+#define        QLCNIC_CMD_DCB_QUERY_PARAM              0x35
+#define QLCNIC_CMD_GET_MAC_STATS               0x37
+#define QLCNIC_CMD_82XX_SET_DRV_VER            0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR           0x39
+#define QLCNIC_CMD_GET_LED_STATUS              0x3C
+#define QLCNIC_CMD_CONFIGURE_RSS               0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL            0x43
+#define QLCNIC_CMD_CONFIGURE_LED               0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN             0x45
+#define QLCNIC_CMD_GET_LINK_EVENT              0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE       0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO            0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP           0x4E
+#define QLCNIC_CMD_INIT_NIC_FUNC               0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC               0x61
+#define QLCNIC_CMD_IDC_ACK                     0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG             0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG             0x67
+#define QLCNIC_CMD_GET_LINK_STATUS             0x68
+#define QLCNIC_CMD_SET_LED_CONFIG              0x69
+#define QLCNIC_CMD_GET_LED_CONFIG              0x6A
+#define QLCNIC_CMD_83XX_SET_DRV_VER            0x6F
+#define QLCNIC_CMD_ADD_RCV_RINGS               0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP  0x37
+
+#define QLCNIC_INTRPT_INTX                     1
+#define QLCNIC_INTRPT_MSIX                     3
+#define QLCNIC_INTRPT_ADD                      1
+#define QLCNIC_INTRPT_DEL                      2
+
+#define QLCNIC_GET_CURRENT_MAC                 1
+#define QLCNIC_SET_STATION_MAC                 2
+#define QLCNIC_GET_DEFAULT_MAC                 3
+#define QLCNIC_GET_FAC_DEF_MAC                 4
+#define QLCNIC_SET_FAC_DEF_MAC                 5
+
+#define QLCNIC_MBX_LINK_EVENT          0x8001
+#define QLCNIC_MBX_BC_EVENT            0x8002
+#define QLCNIC_MBX_COMP_EVENT          0x8100
+#define QLCNIC_MBX_REQUEST_EVENT       0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT   0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT    0x8110
+#define QLCNIC_MBX_SFP_INSERT_EVENT    0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT    0x8131
+
+struct qlcnic_mailbox_metadata {
+       u32 cmd;
+       u32 in_args;
+       u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val)  ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER        1
+#define QLCNIC_CLR_OWNER        0
+#define QLCNIC_MBX_TIMEOUT      5000
+
+#define QLCNIC_MBX_RSP_OK      1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS         8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS    4
+#define QLCNIC_MAX_TX_RINGS            8
+#define QLCNIC_MAX_SDS_RINGS           8
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+                        struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+                              u64 *uaddr, u16 vlan_id);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+                                    struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+                              __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+                         struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+                                    struct qlcnic_host_tx_ring *tx_ring, int);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+                                  struct qlcnic_host_tx_ring *);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+                              struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+int qlcnic_82xx_shutdown(struct pci_dev *);
+int qlcnic_82xx_resume(struct qlcnic_adapter *);
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
+void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
+#endif                         /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
new file mode 100644 (file)
index 0000000..be41e4c
--- /dev/null
@@ -0,0 +1,1310 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+struct crb_addr_pair {
+       u32 addr;
+       u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+       (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+       QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+       crb_addr_transform(XDMA);
+       crb_addr_transform(TIMR);
+       crb_addr_transform(SRE);
+       crb_addr_transform(SQN3);
+       crb_addr_transform(SQN2);
+       crb_addr_transform(SQN1);
+       crb_addr_transform(SQN0);
+       crb_addr_transform(SQS3);
+       crb_addr_transform(SQS2);
+       crb_addr_transform(SQS1);
+       crb_addr_transform(SQS0);
+       crb_addr_transform(RPMX7);
+       crb_addr_transform(RPMX6);
+       crb_addr_transform(RPMX5);
+       crb_addr_transform(RPMX4);
+       crb_addr_transform(RPMX3);
+       crb_addr_transform(RPMX2);
+       crb_addr_transform(RPMX1);
+       crb_addr_transform(RPMX0);
+       crb_addr_transform(ROMUSB);
+       crb_addr_transform(SN);
+       crb_addr_transform(QMN);
+       crb_addr_transform(QMS);
+       crb_addr_transform(PGNI);
+       crb_addr_transform(PGND);
+       crb_addr_transform(PGN3);
+       crb_addr_transform(PGN2);
+       crb_addr_transform(PGN1);
+       crb_addr_transform(PGN0);
+       crb_addr_transform(PGSI);
+       crb_addr_transform(PGSD);
+       crb_addr_transform(PGS3);
+       crb_addr_transform(PGS2);
+       crb_addr_transform(PGS1);
+       crb_addr_transform(PGS0);
+       crb_addr_transform(PS);
+       crb_addr_transform(PH);
+       crb_addr_transform(NIU);
+       crb_addr_transform(I2Q);
+       crb_addr_transform(EG);
+       crb_addr_transform(MN);
+       crb_addr_transform(MS);
+       crb_addr_transform(CAS2);
+       crb_addr_transform(CAS1);
+       crb_addr_transform(CAS0);
+       crb_addr_transform(CAM);
+       crb_addr_transform(C2C1);
+       crb_addr_transform(C2C0);
+       crb_addr_transform(SMB);
+       crb_addr_transform(OCM0);
+       crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_rx_buffer *rx_buf;
+       int i, ring;
+
+       recv_ctx = adapter->recv_ctx;
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               for (i = 0; i < rds_ring->num_desc; ++i) {
+                       rx_buf = &(rds_ring->rx_buf_arr[i]);
+                       if (rx_buf->skb == NULL)
+                               continue;
+
+                       pci_unmap_single(adapter->pdev,
+                                       rx_buf->dma,
+                                       rds_ring->dma_size,
+                                       PCI_DMA_FROMDEVICE);
+
+                       dev_kfree_skb_any(rx_buf->skb);
+               }
+       }
+}
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_rx_buffer *rx_buf;
+       int i, ring;
+
+       recv_ctx = adapter->recv_ctx;
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+
+               INIT_LIST_HEAD(&rds_ring->free_list);
+
+               rx_buf = rds_ring->rx_buf_arr;
+               for (i = 0; i < rds_ring->num_desc; i++) {
+                       list_add_tail(&rx_buf->list,
+                                       &rds_ring->free_list);
+                       rx_buf++;
+               }
+       }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+                              struct qlcnic_host_tx_ring *tx_ring)
+{
+       struct qlcnic_cmd_buffer *cmd_buf;
+       struct qlcnic_skb_frag *buffrag;
+       int i, j;
+
+       spin_lock(&tx_ring->tx_clean_lock);
+
+       cmd_buf = tx_ring->cmd_buf_arr;
+       for (i = 0; i < tx_ring->num_desc; i++) {
+               buffrag = cmd_buf->frag_array;
+               if (buffrag->dma) {
+                       pci_unmap_single(adapter->pdev, buffrag->dma,
+                                        buffrag->length, PCI_DMA_TODEVICE);
+                       buffrag->dma = 0ULL;
+               }
+               for (j = 1; j < cmd_buf->frag_count; j++) {
+                       buffrag++;
+                       if (buffrag->dma) {
+                               pci_unmap_page(adapter->pdev, buffrag->dma,
+                                              buffrag->length,
+                                              PCI_DMA_TODEVICE);
+                               buffrag->dma = 0ULL;
+                       }
+               }
+               if (cmd_buf->skb) {
+                       dev_kfree_skb_any(cmd_buf->skb);
+                       cmd_buf->skb = NULL;
+               }
+               cmd_buf++;
+       }
+
+       spin_unlock(&tx_ring->tx_clean_lock);
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int ring;
+
+       recv_ctx = adapter->recv_ctx;
+
+       if (recv_ctx->rds_rings == NULL)
+               return;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               vfree(rds_ring->rx_buf_arr);
+               rds_ring->rx_buf_arr = NULL;
+       }
+       kfree(recv_ctx->rds_rings);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_rx_buffer *rx_buf;
+       int ring, i;
+
+       recv_ctx = adapter->recv_ctx;
+
+       rds_ring = kcalloc(adapter->max_rds_rings,
+                          sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+       if (rds_ring == NULL)
+               goto err_out;
+
+       recv_ctx->rds_rings = rds_ring;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               switch (ring) {
+               case RCV_RING_NORMAL:
+                       rds_ring->num_desc = adapter->num_rxd;
+                       rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
+                       rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+                       break;
+
+               case RCV_RING_JUMBO:
+                       rds_ring->num_desc = adapter->num_jumbo_rxd;
+                       rds_ring->dma_size =
+                               QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
+
+                       if (adapter->ahw->capabilities &
+                           QLCNIC_FW_CAPABILITY_HW_LRO)
+                               rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+                       rds_ring->skb_size =
+                               rds_ring->dma_size + NET_IP_ALIGN;
+                       break;
+               }
+               rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+               if (rds_ring->rx_buf_arr == NULL)
+                       goto err_out;
+
+               INIT_LIST_HEAD(&rds_ring->free_list);
+               /*
+                * Now go through all of them, set reference handles
+                * and put them in the queues.
+                */
+               rx_buf = rds_ring->rx_buf_arr;
+               for (i = 0; i < rds_ring->num_desc; i++) {
+                       list_add_tail(&rx_buf->list,
+                                       &rds_ring->free_list);
+                       rx_buf->ref_handle = i;
+                       rx_buf++;
+               }
+               spin_lock_init(&rds_ring->lock);
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               sds_ring->irq = adapter->msix_entries[ring].vector;
+               sds_ring->adapter = adapter;
+               sds_ring->num_desc = adapter->num_rxd;
+               if (qlcnic_82xx_check(adapter)) {
+                       if (qlcnic_check_multi_tx(adapter) &&
+                           !adapter->ahw->diag_test)
+                               sds_ring->tx_ring = &adapter->tx_ring[ring];
+                       else
+                               sds_ring->tx_ring = &adapter->tx_ring[0];
+               }
+               for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+                       INIT_LIST_HEAD(&sds_ring->free_list[i]);
+       }
+
+       return 0;
+
+err_out:
+       qlcnic_free_sw_resources(adapter);
+       return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+       int i;
+       u32 base_addr, offset, pci_base;
+
+       crb_addr_transform_setup();
+
+       pci_base = QLCNIC_ADDR_ERROR;
+       base_addr = addr & 0xfff00000;
+       offset = addr & 0x000fffff;
+
+       for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+               if (crb_addr_xform[i] == base_addr) {
+                       pci_base = i << 20;
+                       break;
+               }
+       }
+       if (pci_base == QLCNIC_ADDR_ERROR)
+               return pci_base;
+       else
+               return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC       100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+       long timeout = 0;
+       long done = 0;
+       int err = 0;
+
+       cond_resched();
+       while (done == 0) {
+               done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
+               done &= 2;
+               if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+                       dev_err(&adapter->pdev->dev,
+                               "Timeout reached  waiting for rom done");
+                       return -EIO;
+               }
+               udelay(1);
+       }
+       return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+                           u32 addr, u32 *valp)
+{
+       int err = 0;
+
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+       if (qlcnic_wait_rom_done(adapter)) {
+               dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+               return -EIO;
+       }
+       /* reset abyte_cnt and dummy_byte_cnt */
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+       udelay(10);
+       QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+       *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+       if (err == -EIO)
+               return err;
+       return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+                                 u8 *bytes, size_t size)
+{
+       int addridx;
+       int ret = 0;
+
+       for (addridx = addr; addridx < (addr + size); addridx += 4) {
+               int v;
+               ret = do_rom_fast_read(adapter, addridx, &v);
+               if (ret != 0)
+                       break;
+               *(__le32 *)bytes = cpu_to_le32(v);
+               bytes += 4;
+       }
+
+       return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+                               u8 *bytes, size_t size)
+{
+       int ret;
+
+       ret = qlcnic_rom_lock(adapter);
+       if (ret < 0)
+               return ret;
+
+       ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+       qlcnic_rom_unlock(adapter);
+       return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
+{
+       int ret;
+
+       if (qlcnic_rom_lock(adapter) != 0)
+               return -EIO;
+
+       ret = do_rom_fast_read(adapter, addr, valp);
+       qlcnic_rom_unlock(adapter);
+       return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+       int addr, err = 0;
+       int i, n, init_delay;
+       struct crb_addr_pair *buf;
+       unsigned offset;
+       u32 off, val;
+       struct pci_dev *pdev = adapter->pdev;
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
+
+       /* Halt all the indiviual PEGs and other blocks */
+       /* disable all I2Q */
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0);
+
+       /* disable all niu interrupts */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff);
+       /* disable xge rx/tx */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00);
+       /* disable xg1 rx/tx */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00);
+       /* disable sideband mac */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00);
+       /* disable ap0 mac */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00);
+       /* disable ap1 mac */
+       QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
+
+       /* halt sre */
+       val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+       if (err == -EIO)
+               return err;
+       QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
+
+       /* halt epg */
+       QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1);
+
+       /* halt timers */
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0);
+       QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0);
+       /* halt pegs */
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
+       msleep(20);
+
+       qlcnic_rom_unlock(adapter);
+       /* big hammer don't reset CAM block on reset */
+       QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+
+       /* Init HW CRB block */
+       if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+                       qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+               dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+               return -EIO;
+       }
+       offset = n & 0xffffU;
+       n = (n >> 16) & 0xffffU;
+
+       if (n >= 1024) {
+               dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+               return -EIO;
+       }
+
+       buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++) {
+               if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+               qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+                       kfree(buf);
+                       return -EIO;
+               }
+
+               buf[i].addr = addr;
+               buf[i].data = val;
+       }
+
+       for (i = 0; i < n; i++) {
+
+               off = qlcnic_decode_crb_addr(buf[i].addr);
+               if (off == QLCNIC_ADDR_ERROR) {
+                       dev_err(&pdev->dev, "CRB init value out of range %x\n",
+                                       buf[i].addr);
+                       continue;
+               }
+               off += QLCNIC_PCI_CRBSPACE;
+
+               if (off & 1)
+                       continue;
+
+               /* skipping cold reboot MAGIC */
+               if (off == QLCNIC_CAM_RAM(0x1fc))
+                       continue;
+               if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+                       continue;
+               if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+                       continue;
+               if (off == (ROMUSB_GLB + 0xa8))
+                       continue;
+               if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+                       continue;
+               if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+                       continue;
+               if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+                       continue;
+               if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+                       continue;
+               /* skip the function enable register */
+               if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+                       continue;
+               if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+                       continue;
+               if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+                       continue;
+
+               init_delay = 1;
+               /* After writing this register, HW needs time for CRB */
+               /* to quiet down (else crb_window returns 0xffffffff) */
+               if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+                       init_delay = 1000;
+
+               QLCWR32(adapter, off, buf[i].data);
+
+               msleep(init_delay);
+       }
+       kfree(buf);
+
+       /* Initialize protocol process engine */
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+       usleep_range(1000, 1500);
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+       return 0;
+}
+
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+       int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+       do {
+               val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
+
+               switch (val) {
+               case PHAN_INITIALIZE_COMPLETE:
+               case PHAN_INITIALIZE_ACK:
+                       return 0;
+               case PHAN_INITIALIZE_FAILED:
+                       goto out_err;
+               default:
+                       break;
+               }
+
+               msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+       } while (--retries);
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+                           PHAN_INITIALIZE_FAILED);
+
+out_err:
+       dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+                     "complete, state: 0x%x.\n", val);
+       return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+       int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+       do {
+               val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
+
+               if (val == PHAN_PEG_RCV_INITIALIZED)
+                       return 0;
+
+               msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+       } while (--retries);
+
+       if (!retries) {
+               dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+                             "complete, state: 0x%x.\n", val);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       err = qlcnic_cmd_peg_ready(adapter);
+       if (err)
+               return err;
+
+       err = qlcnic_receive_peg_ready(adapter);
+       if (err)
+               return err;
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+       return err;
+}
+
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+       int timeo;
+       u32 val;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+       val = QLC_DEV_GET_DRV(val, adapter->portnum);
+       if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+               dev_err(&adapter->pdev->dev,
+                       "Not an Ethernet NIC func=%u\n", val);
+               return -EIO;
+       }
+       adapter->ahw->physical_port = (val >> 2);
+       if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+               timeo = QLCNIC_INIT_TIMEOUT_SECS;
+
+       adapter->dev_init_timeo = timeo;
+
+       if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+               timeo = QLCNIC_RESET_TIMEOUT_SECS;
+
+       adapter->reset_ack_timeo = timeo;
+
+       return 0;
+}
+
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+                               struct qlcnic_flt_entry *region_entry)
+{
+       struct qlcnic_flt_header flt_hdr;
+       struct qlcnic_flt_entry *flt_entry;
+       int i = 0, ret;
+       u32 entry_size;
+
+       memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+       ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+                                        (u8 *)&flt_hdr,
+                                        sizeof(struct qlcnic_flt_header));
+       if (ret) {
+               dev_warn(&adapter->pdev->dev,
+                        "error reading flash layout header\n");
+               return -EIO;
+       }
+
+       entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+       flt_entry = vzalloc(entry_size);
+       if (flt_entry == NULL)
+               return -EIO;
+
+       ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+                                        sizeof(struct qlcnic_flt_header),
+                                        (u8 *)flt_entry, entry_size);
+       if (ret) {
+               dev_warn(&adapter->pdev->dev,
+                        "error reading flash layout entries\n");
+               goto err_out;
+       }
+
+       while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+               if (flt_entry[i].region == region)
+                       break;
+               i++;
+       }
+       if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+               dev_warn(&adapter->pdev->dev,
+                        "region=%x not found in %d regions\n", region, i);
+               ret = -EIO;
+               goto err_out;
+       }
+       memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+       vfree(flt_entry);
+       return ret;
+}
+
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_flt_entry fw_entry;
+       u32 ver = -1, min_ver;
+       int ret;
+
+       if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
+               ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
+                                                &fw_entry);
+       else
+               ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
+                                                &fw_entry);
+
+       if (!ret)
+               /* 0-4:-signature,  4-8:-fw version */
+               qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+                                    (int *)&ver);
+       else
+               qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+                                    (int *)&ver);
+
+       ver = QLCNIC_DECODE_VERSION(ver);
+       min_ver = QLCNIC_MIN_FW_VERSION;
+
+       if (ver < min_ver) {
+               dev_err(&adapter->pdev->dev,
+                       "firmware version %d.%d.%d unsupported."
+                       "Min supported version %d.%d.%d\n",
+                       _major(ver), _minor(ver), _build(ver),
+                       _major(min_ver), _minor(min_ver), _build(min_ver));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+       u32 capability = 0;
+       int err = 0;
+
+       capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+       if (err == -EIO)
+               return err;
+       if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+               return 1;
+
+       return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+       u32 i, entries;
+       struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+       entries = le32_to_cpu(directory->num_entries);
+
+       for (i = 0; i < entries; i++) {
+
+               u32 offs = le32_to_cpu(directory->findex) +
+                          i * le32_to_cpu(directory->entry_size);
+               u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
+
+               if (tab_type == section)
+                       return (struct uni_table_desc *) &unirom[offs];
+       }
+
+       return NULL;
+}
+
+#define FILEHEADER_SIZE (14 * 4)
+
+static int
+qlcnic_validate_header(struct qlcnic_adapter *adapter)
+{
+       const u8 *unirom = adapter->fw->data;
+       struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+       u32 entries, entry_size, tab_size, fw_file_size;
+
+       fw_file_size = adapter->fw->size;
+
+       if (fw_file_size < FILEHEADER_SIZE)
+               return -EINVAL;
+
+       entries = le32_to_cpu(directory->num_entries);
+       entry_size = le32_to_cpu(directory->entry_size);
+       tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
+
+       if (fw_file_size < tab_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
+{
+       struct uni_table_desc *tab_desc;
+       struct uni_data_desc *descr;
+       u32 offs, tab_size, data_size, idx;
+       const u8 *unirom = adapter->fw->data;
+       __le32 temp;
+
+       temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+                QLCNIC_UNI_BOOTLD_IDX_OFF);
+       idx = le32_to_cpu(temp);
+       tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
+
+       if (!tab_desc)
+               return -EINVAL;
+
+       tab_size = le32_to_cpu(tab_desc->findex) +
+                  le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+       offs = le32_to_cpu(tab_desc->findex) +
+              le32_to_cpu(tab_desc->entry_size) * idx;
+       descr = (struct uni_data_desc *)&unirom[offs];
+
+       data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+       if (adapter->fw->size < data_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+qlcnic_validate_fw(struct qlcnic_adapter *adapter)
+{
+       struct uni_table_desc *tab_desc;
+       struct uni_data_desc *descr;
+       const u8 *unirom = adapter->fw->data;
+       u32 offs, tab_size, data_size, idx;
+       __le32 temp;
+
+       temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+                QLCNIC_UNI_FIRMWARE_IDX_OFF);
+       idx = le32_to_cpu(temp);
+       tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
+
+       if (!tab_desc)
+               return -EINVAL;
+
+       tab_size = le32_to_cpu(tab_desc->findex) +
+                  le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+       offs = le32_to_cpu(tab_desc->findex) +
+              le32_to_cpu(tab_desc->entry_size) * idx;
+       descr = (struct uni_data_desc *)&unirom[offs];
+       data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+       if (adapter->fw->size < data_size)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
+{
+       struct uni_table_desc *ptab_descr;
+       const u8 *unirom = adapter->fw->data;
+       int mn_present = qlcnic_has_mn(adapter);
+       u32 entries, entry_size, tab_size, i;
+       __le32 temp;
+
+       ptab_descr = qlcnic_get_table_desc(unirom,
+                               QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+       if (!ptab_descr)
+               return -EINVAL;
+
+       entries = le32_to_cpu(ptab_descr->num_entries);
+       entry_size = le32_to_cpu(ptab_descr->entry_size);
+       tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
+
+       if (adapter->fw->size < tab_size)
+               return -EINVAL;
+
+nomn:
+       for (i = 0; i < entries; i++) {
+
+               u32 flags, file_chiprev, offs;
+               u8 chiprev = adapter->ahw->revision_id;
+               u32 flagbit;
+
+               offs = le32_to_cpu(ptab_descr->findex) +
+                      i * le32_to_cpu(ptab_descr->entry_size);
+               temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
+               flags = le32_to_cpu(temp);
+               temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
+               file_chiprev = le32_to_cpu(temp);
+
+               flagbit = mn_present ? 1 : 2;
+
+               if ((chiprev == file_chiprev) &&
+                                       ((1ULL << flagbit) & flags)) {
+                       adapter->file_prd_off = offs;
+                       return 0;
+               }
+       }
+       if (mn_present) {
+               mn_present = 0;
+               goto nomn;
+       }
+       return -EINVAL;
+}
+
+static int
+qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
+{
+       if (qlcnic_validate_header(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: header validation failed\n");
+               return -EINVAL;
+       }
+
+       if (qlcnic_validate_product_offs(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: product validation failed\n");
+               return -EINVAL;
+       }
+
+       if (qlcnic_validate_bootld(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: bootld validation failed\n");
+               return -EINVAL;
+       }
+
+       if (qlcnic_validate_fw(adapter)) {
+               dev_err(&adapter->pdev->dev,
+                               "unified image: firmware validation failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+                       u32 section, u32 idx_offset)
+{
+       const u8 *unirom = adapter->fw->data;
+       struct uni_table_desc *tab_desc;
+       u32 offs, idx;
+       __le32 temp;
+
+       temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
+       idx = le32_to_cpu(temp);
+
+       tab_desc = qlcnic_get_table_desc(unirom, section);
+
+       if (tab_desc == NULL)
+               return NULL;
+
+       offs = le32_to_cpu(tab_desc->findex) +
+              le32_to_cpu(tab_desc->entry_size) * idx;
+
+       return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+       u32 offs = QLCNIC_BOOTLD_START;
+       struct uni_data_desc *data_desc;
+
+       data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
+                                        QLCNIC_UNI_BOOTLD_IDX_OFF);
+
+       if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+               offs = le32_to_cpu(data_desc->findex);
+
+       return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+       u32 offs = QLCNIC_IMAGE_START;
+       struct uni_data_desc *data_desc;
+
+       data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+                                        QLCNIC_UNI_FIRMWARE_IDX_OFF);
+       if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+               offs = le32_to_cpu(data_desc->findex);
+
+       return (u8 *)&adapter->fw->data[offs];
+}
+
+static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+       struct uni_data_desc *data_desc;
+       const u8 *unirom = adapter->fw->data;
+
+       data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+                                        QLCNIC_UNI_FIRMWARE_IDX_OFF);
+
+       if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+               return le32_to_cpu(data_desc->size);
+       else
+               return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+       struct uni_data_desc *fw_data_desc;
+       const struct firmware *fw = adapter->fw;
+       u32 major, minor, sub;
+       __le32 version_offset;
+       const u8 *ver_str;
+       int i, ret;
+
+       if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+               version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
+               return le32_to_cpu(version_offset);
+       }
+
+       fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+                       QLCNIC_UNI_FIRMWARE_IDX_OFF);
+       ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
+                 le32_to_cpu(fw_data_desc->size) - 17;
+
+       for (i = 0; i < 12; i++) {
+               if (!strncmp(&ver_str[i], "REV=", 4)) {
+                       ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+                                       &major, &minor, &sub);
+                       if (ret != 3)
+                               return 0;
+                       else
+                               return major + (minor << 8) + (sub << 16);
+               }
+       }
+
+       return 0;
+}
+
+static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+       const struct firmware *fw = adapter->fw;
+       u32 bios_ver, prd_off = adapter->file_prd_off;
+       u8 *version_offset;
+       __le32 temp;
+
+       if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+               version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
+               return le32_to_cpu(*(__le32 *)version_offset);
+       }
+
+       temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
+       bios_ver = le32_to_cpu(temp);
+
+       return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
+{
+       if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+               dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
+
+       qlcnic_pcie_sem_unlock(adapter, 2);
+}
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+       u32 heartbeat, ret = -EIO;
+       int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+
+       adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+                                                QLCNIC_PEG_ALIVE_COUNTER);
+
+       do {
+               msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+               heartbeat = QLC_SHARED_REG_RD32(adapter,
+                                               QLCNIC_PEG_ALIVE_COUNTER);
+               if (heartbeat != adapter->heartbeat) {
+                       ret = QLCNIC_RCODE_SUCCESS;
+                       break;
+               }
+       } while (--retries);
+
+       return ret;
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+       if ((adapter->flags & QLCNIC_FW_HANG) ||
+                       qlcnic_check_fw_hearbeat(adapter)) {
+               qlcnic_rom_lock_recovery(adapter);
+               return 1;
+       }
+
+       if (adapter->need_fw_reset)
+               return 1;
+
+       if (adapter->fw)
+               return 1;
+
+       return 0;
+}
+
+static const char *fw_name[] = {
+       QLCNIC_UNIFIED_ROMIMAGE_NAME,
+       QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+       __le64 *ptr64;
+       u32 i, flashaddr, size;
+       const struct firmware *fw = adapter->fw;
+       struct pci_dev *pdev = adapter->pdev;
+
+       dev_info(&pdev->dev, "loading firmware from %s\n",
+                fw_name[adapter->ahw->fw_type]);
+
+       if (fw) {
+               u64 data;
+
+               size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+               ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
+               flashaddr = QLCNIC_BOOTLD_START;
+
+               for (i = 0; i < size; i++) {
+                       data = le64_to_cpu(ptr64[i]);
+
+                       if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+
+               size = qlcnic_get_fw_size(adapter) / 8;
+
+               ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
+               flashaddr = QLCNIC_IMAGE_START;
+
+               for (i = 0; i < size; i++) {
+                       data = le64_to_cpu(ptr64[i]);
+
+                       if (qlcnic_pci_mem_write_2M(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+
+               size = qlcnic_get_fw_size(adapter) % 8;
+               if (size) {
+                       data = le64_to_cpu(ptr64[i]);
+
+                       if (qlcnic_pci_mem_write_2M(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+               }
+
+       } else {
+               u64 data;
+               u32 hi, lo;
+               int ret;
+               struct qlcnic_flt_entry bootld_entry;
+
+               ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
+                                       &bootld_entry);
+               if (!ret) {
+                       size = bootld_entry.size / 8;
+                       flashaddr = bootld_entry.start_addr;
+               } else {
+                       size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+                       flashaddr = QLCNIC_BOOTLD_START;
+                       dev_info(&pdev->dev,
+                               "using legacy method to get flash fw region");
+               }
+
+               for (i = 0; i < size; i++) {
+                       if (qlcnic_rom_fast_read(adapter,
+                                       flashaddr, (int *)&lo) != 0)
+                               return -EIO;
+                       if (qlcnic_rom_fast_read(adapter,
+                                       flashaddr + 4, (int *)&hi) != 0)
+                               return -EIO;
+
+                       data = (((u64)hi << 32) | lo);
+
+                       if (qlcnic_pci_mem_write_2M(adapter,
+                                               flashaddr, data))
+                               return -EIO;
+
+                       flashaddr += 8;
+               }
+       }
+       usleep_range(1000, 1500);
+
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+       QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+       return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+       u32 val;
+       u32 ver, bios, min_size;
+       struct pci_dev *pdev = adapter->pdev;
+       const struct firmware *fw = adapter->fw;
+       u8 fw_type = adapter->ahw->fw_type;
+
+       if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+               if (qlcnic_validate_unified_romimage(adapter))
+                       return -EINVAL;
+
+               min_size = QLCNIC_UNI_FW_MIN_SIZE;
+       } else {
+               val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+               if (val != QLCNIC_BDINFO_MAGIC)
+                       return -EINVAL;
+
+               min_size = QLCNIC_FW_MIN_SIZE;
+       }
+
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       val = qlcnic_get_fw_version(adapter);
+       ver = QLCNIC_DECODE_VERSION(val);
+
+       if (ver < QLCNIC_MIN_FW_VERSION) {
+               dev_err(&pdev->dev,
+                               "%s: firmware version %d.%d.%d unsupported\n",
+               fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+               return -EINVAL;
+       }
+
+       val = qlcnic_get_bios_version(adapter);
+       qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+       if (val != bios) {
+               dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+                               fw_name[fw_type]);
+               return -EINVAL;
+       }
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
+       return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+       u8 fw_type;
+
+       switch (adapter->ahw->fw_type) {
+       case QLCNIC_UNKNOWN_ROMIMAGE:
+               fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+               break;
+
+       case QLCNIC_UNIFIED_ROMIMAGE:
+       default:
+               fw_type = QLCNIC_FLASH_ROMIMAGE;
+               break;
+       }
+
+       adapter->ahw->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int rc;
+
+       adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+       qlcnic_get_next_fwtype(adapter);
+
+       if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+               adapter->fw = NULL;
+       } else {
+               rc = request_firmware(&adapter->fw,
+                                     fw_name[adapter->ahw->fw_type],
+                                     &pdev->dev);
+               if (rc != 0)
+                       goto next;
+
+               rc = qlcnic_validate_firmware(adapter);
+               if (rc != 0) {
+                       release_firmware(adapter->fw);
+                       usleep_range(1000, 1500);
+                       goto next;
+               }
+       }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+       release_firmware(adapter->fw);
+       adapter->fw = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644 (file)
index 0000000..fedd736
--- /dev/null
@@ -0,0 +1,2230 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <linux/printk.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_TX_ETHER_PKT            0x01
+#define QLCNIC_TX_TCP_PKT              0x02
+#define QLCNIC_TX_UDP_PKT              0x03
+#define QLCNIC_TX_IP_PKT               0x04
+#define QLCNIC_TX_TCP_LSO              0x05
+#define QLCNIC_TX_TCP_LSO6             0x06
+#define QLCNIC_TX_ENCAP_PKT            0x07
+#define QLCNIC_TX_ENCAP_LSO            0x08
+#define QLCNIC_TX_TCPV6_PKT            0x0b
+#define QLCNIC_TX_UDPV6_PKT            0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED       0x10
+#define QLCNIC_FLAGS_VLAN_OOB          0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v)    \
+       (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var)        \
+       ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)       \
+       ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+       ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+       ((_desc)->flags_opcode |= \
+       cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+       ((_desc)->nfrags__length = \
+       cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST      (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM   (0x2ULL << 56)
+
+/* Status descriptor:
+   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data)  \
+       ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data)        \
+       (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data)  \
+       (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data)   \
+       (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data)     \
+       (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data)  \
+       (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data)    \
+       (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data)      \
+       (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data)        \
+       (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data)         \
+       ((sts_data) & 0x07FFF)
+#define qlcnic_get_lro_sts_length(sts_data)    \
+       (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)     \
+       (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)     \
+       (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+       (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data)      \
+       (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data)         \
+       (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data)                \
+       ((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1)              \
+       ((sts_data1 >> 32) & 0x0FFFF)
+
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD     0x03
+#define QLCNIC_RXPKT_DESC      0x04
+#define QLCNIC_OLD_RXPKT_DESC  0x3f
+#define QLCNIC_RESPONSE_DESC   0x05
+#define QLCNIC_LRO_DESC        0x12
+
+#define QLCNIC_TCP_HDR_SIZE            20
+#define QLCNIC_TCP_TS_OPTION_SIZE      12
+#define QLCNIC_FETCH_RING_ID(handle)   ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW           cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP      0
+#define STATUS_CKSUM_OK                2
+
+#define qlcnic_83xx_pktln(sts)         ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts)          ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts)   ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts)        ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts)      (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts)     (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts)    (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts)    (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts)       (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts)     (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts)    (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts)   (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts)  (((sts) >> 47) & 1)
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+                                  int max);
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+                                           struct qlcnic_host_rds_ring *,
+                                           u16, u16);
+
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
+{
+       return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+                                       u16 handle, u8 ring_id)
+{
+       if (qlcnic_83xx_check(adapter))
+               return handle | (ring_id << 15);
+       else
+               return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+       return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_filter *fil,
+                                     void *addr, u16 vlan_id)
+{
+       int ret;
+       u8 op;
+
+       op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+       ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+       if (ret)
+               return;
+
+       op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+       ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+       if (!ret) {
+               hlist_del(&fil->fnode);
+               adapter->rx_fhash.fnum--;
+       }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+                                                   void *addr, u16 vlan_id)
+{
+       struct qlcnic_filter *tmp_fil = NULL;
+       struct hlist_node *n;
+
+       hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+               if (ether_addr_equal(tmp_fil->faddr, addr) &&
+                   tmp_fil->vlan_id == vlan_id)
+                       return tmp_fil;
+       }
+
+       return NULL;
+}
+
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+                                struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
+{
+       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+       struct qlcnic_filter *fil, *tmp_fil;
+       struct hlist_head *head;
+       unsigned long time;
+       u64 src_addr = 0;
+       u8 hindex, op;
+       int ret;
+
+       if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+               vlan_id = 0;
+
+       memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+       hindex = qlcnic_mac_hash(src_addr, vlan_id) &
+                (adapter->fhash.fbucket_size - 1);
+
+       if (loopback_pkt) {
+               if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+                       return;
+
+               head = &(adapter->rx_fhash.fhead[hindex]);
+
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil) {
+                       time = tmp_fil->ftime;
+                       if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+                               tmp_fil->ftime = jiffies;
+                       return;
+               }
+
+               fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+               if (!fil)
+                       return;
+
+               fil->ftime = jiffies;
+               memcpy(fil->faddr, &src_addr, ETH_ALEN);
+               fil->vlan_id = vlan_id;
+               spin_lock(&adapter->rx_mac_learn_lock);
+               hlist_add_head(&(fil->fnode), head);
+               adapter->rx_fhash.fnum++;
+               spin_unlock(&adapter->rx_mac_learn_lock);
+       } else {
+               head = &adapter->fhash.fhead[hindex];
+
+               spin_lock(&adapter->mac_learn_lock);
+
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil) {
+                       op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+                       ret = qlcnic_sre_macaddr_change(adapter,
+                                                       (u8 *)&src_addr,
+                                                       vlan_id, op);
+                       if (!ret) {
+                               hlist_del(&tmp_fil->fnode);
+                               adapter->fhash.fnum--;
+                       }
+
+                       spin_unlock(&adapter->mac_learn_lock);
+
+                       return;
+               }
+
+               spin_unlock(&adapter->mac_learn_lock);
+
+               head = &adapter->rx_fhash.fhead[hindex];
+
+               spin_lock(&adapter->rx_mac_learn_lock);
+
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil)
+                       qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+                                                 vlan_id);
+
+               spin_unlock(&adapter->rx_mac_learn_lock);
+       }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+                              u16 vlan_id)
+{
+       struct cmd_desc_type0 *hwdesc;
+       struct qlcnic_nic_req *req;
+       struct qlcnic_mac_req *mac_req;
+       struct qlcnic_vlan_req *vlan_req;
+       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+       u32 producer;
+       u64 word;
+
+       producer = tx_ring->producer;
+       hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+       req = (struct qlcnic_nic_req *)hwdesc;
+       memset(req, 0, sizeof(struct qlcnic_nic_req));
+       req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+       word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+       req->req_hdr = cpu_to_le64(word);
+
+       mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+       mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+       memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+
+       vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+       vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+       tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+       smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+                              struct cmd_desc_type0 *first_desc,
+                              struct sk_buff *skb)
+{
+       struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+       u16 protocol = ntohs(skb->protocol);
+       struct qlcnic_filter *fil, *tmp_fil;
+       struct hlist_head *head;
+       struct hlist_node *n;
+       u64 src_addr = 0;
+       u16 vlan_id = 0;
+       u8 hindex, hval;
+
+       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+               return;
+
+       if (adapter->flags & QLCNIC_VLAN_FILTERING) {
+               if (protocol == ETH_P_8021Q) {
+                       vh = (struct vlan_ethhdr *)skb->data;
+                       vlan_id = ntohs(vh->h_vlan_TCI);
+               } else if (skb_vlan_tag_present(skb)) {
+                       vlan_id = skb_vlan_tag_get(skb);
+               }
+       }
+
+       memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+       hval = qlcnic_mac_hash(src_addr, vlan_id);
+       hindex = hval & (adapter->fhash.fbucket_size - 1);
+       head = &(adapter->fhash.fhead[hindex]);
+
+       hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+               if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
+                   tmp_fil->vlan_id == vlan_id) {
+                       if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+                               qlcnic_change_filter(adapter, &src_addr,
+                                                    vlan_id);
+                       tmp_fil->ftime = jiffies;
+                       return;
+               }
+       }
+
+       if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+               adapter->stats.mac_filter_limit_overrun++;
+               return;
+       }
+
+       fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+       if (!fil)
+               return;
+
+       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       fil->ftime = jiffies;
+       fil->vlan_id = vlan_id;
+       memcpy(fil->faddr, &src_addr, ETH_ALEN);
+       spin_lock(&adapter->mac_learn_lock);
+       hlist_add_head(&(fil->fnode), head);
+       adapter->fhash.fnum++;
+       spin_unlock(&adapter->mac_learn_lock);
+}
+
+#define QLCNIC_ENCAP_VXLAN_PKT         BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6      BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6      BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP      BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM                BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM                BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+                              struct cmd_desc_type0 *first_desc,
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
+{
+       u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+       int copied, copy_len, descr_size;
+       u32 producer = tx_ring->producer;
+       struct cmd_desc_type0 *hwdesc;
+       u16 flags = 0, encap_descr = 0;
+
+       opcode = QLCNIC_TX_ETHER_PKT;
+       encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+       if (skb_is_gso(skb)) {
+               inner_hdr_len = skb_inner_transport_header(skb) +
+                               inner_tcp_hdrlen(skb) -
+                               skb_inner_mac_header(skb);
+
+               /* VXLAN header size = 8 */
+               outer_hdr_len = skb_transport_offset(skb) + 8 +
+                               sizeof(struct udphdr);
+               first_desc->outer_hdr_length = outer_hdr_len;
+               total_hdr_len = inner_hdr_len + outer_hdr_len;
+               encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+                              QLCNIC_ENCAP_DO_L4_CSUM;
+               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               first_desc->hdr_length = inner_hdr_len;
+
+               /* Copy inner and outer headers in Tx descriptor(s)
+                * If total_hdr_len > cmd_desc_type0, use multiple
+                * descriptors
+                */
+               copied = 0;
+               descr_size = (int)sizeof(struct cmd_desc_type0);
+               while (copied < total_hdr_len) {
+                       copy_len = min(descr_size, (total_hdr_len - copied));
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+                       skb_copy_from_linear_data_offset(skb, copied,
+                                                        (char *)hwdesc,
+                                                        copy_len);
+                       copied += copy_len;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               tx_ring->producer = producer;
+
+               /* Make sure updated tx_ring->producer is visible
+                * for qlcnic_tx_avail()
+                */
+               smp_mb();
+               adapter->stats.encap_lso_frames++;
+
+               opcode = QLCNIC_TX_ENCAP_LSO;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (inner_ip_hdr(skb)->version == 6) {
+                       if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+                               encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+               } else {
+                       if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+                               encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+               }
+
+               adapter->stats.encap_tx_csummed++;
+               opcode = QLCNIC_TX_ENCAP_PKT;
+       }
+
+       /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+       if (ip_hdr(skb)->version == 6)
+               encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+       /* outer IP header's size in 32bit words size*/
+       encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+       /* outer IP header offset */
+       encap_descr |= skb_network_offset(skb) << 10;
+       first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+       first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+                                    skb->data;
+       first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       return 0;
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+                        struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+                        struct qlcnic_host_tx_ring *tx_ring)
+{
+       u8 l4proto, opcode = 0, hdr_len = 0;
+       u16 flags = 0, vlan_tci = 0;
+       int copied, offset, copy_len, size;
+       struct cmd_desc_type0 *hwdesc;
+       struct vlan_ethhdr *vh;
+       u16 protocol = ntohs(skb->protocol);
+       u32 producer = tx_ring->producer;
+
+       if (protocol == ETH_P_8021Q) {
+               vh = (struct vlan_ethhdr *)skb->data;
+               flags = QLCNIC_FLAGS_VLAN_TAGGED;
+               vlan_tci = ntohs(vh->h_vlan_TCI);
+               protocol = ntohs(vh->h_vlan_encapsulated_proto);
+       } else if (skb_vlan_tag_present(skb)) {
+               flags = QLCNIC_FLAGS_VLAN_OOB;
+               vlan_tci = skb_vlan_tag_get(skb);
+       }
+       if (unlikely(adapter->tx_pvid)) {
+               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+                       return -EIO;
+               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+                       goto set_flags;
+
+               flags = QLCNIC_FLAGS_VLAN_OOB;
+               vlan_tci = adapter->tx_pvid;
+       }
+set_flags:
+       qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       if (*(skb->data) & BIT_0) {
+               flags |= BIT_0;
+               memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+       }
+       opcode = QLCNIC_TX_ETHER_PKT;
+       if (skb_is_gso(skb)) {
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               first_desc->hdr_length = hdr_len;
+               opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+                                                   QLCNIC_TX_TCP_LSO;
+
+               /* For LSO, we need to copy the MAC/IP/TCP headers into
+               * the descriptor ring */
+               copied = 0;
+               offset = 2;
+
+               if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+                       first_desc->hdr_length += VLAN_HLEN;
+                       first_desc->tcp_hdr_offset = VLAN_HLEN;
+                       first_desc->ip_hdr_offset = VLAN_HLEN;
+
+                       /* Only in case of TSO on vlan device */
+                       flags |= QLCNIC_FLAGS_VLAN_TAGGED;
+
+                       /* Create a TSO vlan header template for firmware */
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
+                                      offset, hdr_len + VLAN_HLEN);
+
+                       vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+                       skb_copy_from_linear_data(skb, vh, 12);
+                       vh->h_vlan_proto = htons(ETH_P_8021Q);
+                       vh->h_vlan_TCI = htons(vlan_tci);
+
+                       skb_copy_from_linear_data_offset(skb, 12,
+                                                        (char *)vh + 16,
+                                                        copy_len - 16);
+                       copied = copy_len - VLAN_HLEN;
+                       offset = 0;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               while (copied < hdr_len) {
+                       size = (int)sizeof(struct cmd_desc_type0) - offset;
+                       copy_len = min(size, (hdr_len - copied));
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+                       skb_copy_from_linear_data_offset(skb, copied,
+                                                        (char *)hwdesc +
+                                                        offset, copy_len);
+                       copied += copy_len;
+                       offset = 0;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               tx_ring->producer = producer;
+               smp_mb();
+               adapter->stats.lso_frames++;
+
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (protocol == ETH_P_IP) {
+                       l4proto = ip_hdr(skb)->protocol;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = QLCNIC_TX_TCP_PKT;
+                       else if (l4proto == IPPROTO_UDP)
+                               opcode = QLCNIC_TX_UDP_PKT;
+               } else if (protocol == ETH_P_IPV6) {
+                       l4proto = ipv6_hdr(skb)->nexthdr;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = QLCNIC_TX_TCPV6_PKT;
+                       else if (l4proto == IPPROTO_UDP)
+                               opcode = QLCNIC_TX_UDPV6_PKT;
+               }
+       }
+       first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+       first_desc->ip_hdr_offset += skb_network_offset(skb);
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+                            struct qlcnic_cmd_buffer *pbuf)
+{
+       struct qlcnic_skb_frag *nf;
+       struct skb_frag_struct *frag;
+       int i, nr_frags;
+       dma_addr_t map;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       nf = &pbuf->frag_array[0];
+
+       map = pci_map_single(pdev, skb->data, skb_headlen(skb),
+                            PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(pdev, map))
+               goto out_err;
+
+       nf->dma = map;
+       nf->length = skb_headlen(skb);
+
+       for (i = 0; i < nr_frags; i++) {
+               frag = &skb_shinfo(skb)->frags[i];
+               nf = &pbuf->frag_array[i+1];
+               map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, map))
+                       goto unwind;
+
+               nf->dma = map;
+               nf->length = skb_frag_size(frag);
+       }
+
+       return 0;
+
+unwind:
+       while (--i >= 0) {
+               nf = &pbuf->frag_array[i+1];
+               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+       }
+
+       nf = &pbuf->frag_array[0];
+       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+       return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+                                struct qlcnic_cmd_buffer *pbuf)
+{
+       struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+       int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+       for (i = 0; i < nr_frags; i++) {
+               nf = &pbuf->frag_array[i+1];
+               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+       }
+
+       nf = &pbuf->frag_array[0];
+       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+       pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+       desc[0] = 0ULL;
+       desc[2] = 0ULL;
+       desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_cmd_buffer *pbuf;
+       struct qlcnic_skb_frag *buffrag;
+       struct cmd_desc_type0 *hwdesc, *first_desc;
+       struct pci_dev *pdev;
+       struct ethhdr *phdr;
+       int i, k, frag_count, delta = 0;
+       u32 producer, num_txd;
+       u16 protocol;
+       bool l4_is_udp = false;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               netif_tx_stop_all_queues(netdev);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (adapter->flags & QLCNIC_MACSPOOF) {
+               phdr = (struct ethhdr *)skb->data;
+               if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+                       goto drop_packet;
+       }
+
+       tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+       num_txd = tx_ring->num_desc;
+
+       frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+                       delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
+
+       if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+               netif_tx_stop_queue(tx_ring->txq);
+               if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+                       netif_tx_start_queue(tx_ring->txq);
+               } else {
+                       tx_ring->tx_stats.xmit_off++;
+                       return NETDEV_TX_BUSY;
+               }
+       }
+
+       producer = tx_ring->producer;
+       pbuf = &tx_ring->cmd_buf_arr[producer];
+       pdev = adapter->pdev;
+       first_desc = &tx_ring->desc_head[producer];
+       hwdesc = &tx_ring->desc_head[producer];
+       qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+       if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+               adapter->stats.tx_dma_map_error++;
+               goto drop_packet;
+       }
+
+       pbuf->skb = skb;
+       pbuf->frag_count = frag_count;
+
+       qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+       qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+       for (i = 0; i < frag_count; i++) {
+               k = i % 4;
+
+               if ((k == 0) && (i > 0)) {
+                       /* move to next desc.*/
+                       producer = get_next_index(producer, num_txd);
+                       hwdesc = &tx_ring->desc_head[producer];
+                       qlcnic_clear_cmddesc((u64 *)hwdesc);
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+               }
+
+               buffrag = &pbuf->frag_array[i];
+               hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+               switch (k) {
+               case 0:
+                       hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 1:
+                       hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 2:
+                       hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 3:
+                       hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+                       break;
+               }
+       }
+
+       tx_ring->producer = get_next_index(producer, num_txd);
+       smp_mb();
+
+       protocol = ntohs(skb->protocol);
+       if (protocol == ETH_P_IP)
+               l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+       else if (protocol == ETH_P_IPV6)
+               l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+       /* Check if it is a VXLAN packet */
+       if (!skb->encapsulation || !l4_is_udp ||
+           !qlcnic_encap_tx_offload(adapter)) {
+               if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+                                          tx_ring)))
+                       goto unwind_buff;
+       } else {
+               if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+                                                skb, tx_ring)))
+                       goto unwind_buff;
+       }
+
+       if (adapter->drv_mac_learn)
+               qlcnic_send_filter(adapter, first_desc, skb);
+
+       tx_ring->tx_stats.tx_bytes += skb->len;
+       tx_ring->tx_stats.xmit_called++;
+
+       /* Ensure writes are complete before HW fetches Tx descriptors */
+       wmb();
+       qlcnic_update_cmd_producer(tx_ring);
+
+       return NETDEV_TX_OK;
+
+unwind_buff:
+       qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+       adapter->stats.txdropped++;
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (adapter->ahw->linkup && !linkup) {
+               netdev_info(netdev, "NIC Link is down\n");
+               adapter->ahw->linkup = 0;
+               netif_carrier_off(netdev);
+       } else if (!adapter->ahw->linkup && linkup) {
+               adapter->ahw->linkup = 1;
+
+               /* Do not advertise Link up to the stack if device
+                * is in loopback mode
+                */
+               if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+                       netdev_info(netdev, "NIC Link is up for loopback test\n");
+                       return;
+               }
+
+               netdev_info(netdev, "NIC Link is up\n");
+               netif_carrier_on(netdev);
+       }
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+                              struct qlcnic_host_rds_ring *rds_ring,
+                              struct qlcnic_rx_buffer *buffer)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma;
+       struct pci_dev *pdev = adapter->pdev;
+
+       skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+       if (!skb) {
+               adapter->stats.skb_alloc_failure++;
+               return -ENOMEM;
+       }
+
+       skb_reserve(skb, NET_IP_ALIGN);
+       dma = pci_map_single(pdev, skb->data,
+                            rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+       if (pci_dma_mapping_error(pdev, dma)) {
+               adapter->stats.rx_dma_map_error++;
+               dev_kfree_skb_any(skb);
+               return -ENOMEM;
+       }
+
+       buffer->skb = skb;
+       buffer->dma = dma;
+
+       return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+                                       struct qlcnic_host_rds_ring *rds_ring,
+                                       u8 ring_id)
+{
+       struct rcv_desc *pdesc;
+       struct qlcnic_rx_buffer *buffer;
+       int  count = 0;
+       uint32_t producer, handle;
+       struct list_head *head;
+
+       if (!spin_trylock(&rds_ring->lock))
+               return;
+
+       producer = rds_ring->producer;
+       head = &rds_ring->free_list;
+       while (!list_empty(head)) {
+               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               handle = qlcnic_get_ref_handle(adapter,
+                                              buffer->ref_handle, ring_id);
+               pdesc->reference_handle = cpu_to_le16(handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+       if (count) {
+               rds_ring->producer = producer;
+               writel((producer - 1) & (rds_ring->num_desc - 1),
+                      rds_ring->crb_rcv_producer);
+       }
+       spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_host_tx_ring *tx_ring,
+                                  int budget)
+{
+       u32 sw_consumer, hw_consumer;
+       int i, done, count = 0;
+       struct qlcnic_cmd_buffer *buffer;
+       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_skb_frag *frag;
+
+       if (!spin_trylock(&tx_ring->tx_clean_lock))
+               return 1;
+
+       sw_consumer = tx_ring->sw_consumer;
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+       while (sw_consumer != hw_consumer) {
+               buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+               if (buffer->skb) {
+                       frag = &buffer->frag_array[0];
+                       pci_unmap_single(pdev, frag->dma, frag->length,
+                                        PCI_DMA_TODEVICE);
+                       frag->dma = 0ULL;
+                       for (i = 1; i < buffer->frag_count; i++) {
+                               frag++;
+                               pci_unmap_page(pdev, frag->dma, frag->length,
+                                              PCI_DMA_TODEVICE);
+                               frag->dma = 0ULL;
+                       }
+                       tx_ring->tx_stats.xmit_finished++;
+                       dev_kfree_skb_any(buffer->skb);
+                       buffer->skb = NULL;
+               }
+
+               sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+               if (++count >= budget)
+                       break;
+       }
+
+       tx_ring->sw_consumer = sw_consumer;
+
+       if (count && netif_running(netdev)) {
+               smp_mb();
+               if (netif_tx_queue_stopped(tx_ring->txq) &&
+                   netif_carrier_ok(netdev)) {
+                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+                               netif_tx_wake_queue(tx_ring->txq);
+                               tx_ring->tx_stats.xmit_on++;
+                       }
+               }
+               adapter->tx_timeo_cnt = 0;
+       }
+       /*
+        * If everything is freed up to consumer then check if the ring is full
+        * If the ring is full then check if more needs to be freed and
+        * schedule the call back again.
+        *
+        * This happens when there are 2 CPUs. One could be freeing and the
+        * other filling it. If the ring is full when we get out of here and
+        * the card has already interrupted the host then the host can miss the
+        * interrupt.
+        *
+        * There is still a possible race condition and the host could miss an
+        * interrupt. The card has to take care of this.
+        */
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+       done = (sw_consumer == hw_consumer);
+
+       spin_unlock(&tx_ring->tx_clean_lock);
+
+       return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+       int tx_complete, work_done;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+       tx_ring = sds_ring->tx_ring;
+
+       tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
+                                             budget);
+       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+               }
+       }
+
+       return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_adapter *adapter;
+       int work_done;
+
+       tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+       adapter = tx_ring->adapter;
+
+       work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+       if (work_done) {
+               napi_complete(&tx_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+               work_done = budget;
+       }
+
+       return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       int work_done;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+
+       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_fw_msg *msg)
+{
+       u32 cable_OUI;
+       u16 cable_len, link_speed;
+       u8  link_status, module, duplex, autoneg, lb_status = 0;
+       struct net_device *netdev = adapter->netdev;
+
+       adapter->ahw->has_link_events = 1;
+
+       cable_OUI = msg->body[1] & 0xffffffff;
+       cable_len = (msg->body[1] >> 32) & 0xffff;
+       link_speed = (msg->body[1] >> 48) & 0xffff;
+
+       link_status = msg->body[2] & 0xff;
+       duplex = (msg->body[2] >> 16) & 0xff;
+       autoneg = (msg->body[2] >> 24) & 0xff;
+       lb_status = (msg->body[2] >> 32) & 0x3;
+
+       module = (msg->body[2] >> 8) & 0xff;
+       if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+               dev_info(&netdev->dev,
+                        "unsupported cable: OUI 0x%x, length %d\n",
+                        cable_OUI, cable_len);
+       else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+               dev_info(&netdev->dev, "unsupported cable length %d\n",
+                        cable_len);
+
+       if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+           lb_status == QLCNIC_ELB_MODE))
+               adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+       qlcnic_advert_link_change(adapter, link_status);
+
+       if (duplex == LINKEVENT_FULL_DUPLEX)
+               adapter->ahw->link_duplex = DUPLEX_FULL;
+       else
+               adapter->ahw->link_duplex = DUPLEX_HALF;
+
+       adapter->ahw->module_type = module;
+       adapter->ahw->link_autoneg = autoneg;
+
+       if (link_status) {
+               adapter->ahw->link_speed = link_speed;
+       } else {
+               adapter->ahw->link_speed = SPEED_UNKNOWN;
+               adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
+       }
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+                                    struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_fw_msg msg;
+       struct status_desc *desc;
+       struct qlcnic_adapter *adapter;
+       struct device *dev;
+       int i = 0, opcode, ret;
+
+       while (desc_cnt > 0 && i < 8) {
+               desc = &sds_ring->desc_head[index];
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+               index = get_next_index(index, sds_ring->num_desc);
+               desc_cnt--;
+       }
+
+       adapter = sds_ring->adapter;
+       dev = &adapter->pdev->dev;
+       opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+       switch (opcode) {
+       case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+               qlcnic_handle_linkevent(adapter, &msg);
+               break;
+       case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+               ret = (u32)(msg.body[1]);
+               switch (ret) {
+               case 0:
+                       adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+                       break;
+               case 1:
+                       dev_info(dev, "loopback already in progress\n");
+                       adapter->ahw->diag_cnt = -EINPROGRESS;
+                       break;
+               case 2:
+                       dev_info(dev, "loopback cable is not connected\n");
+                       adapter->ahw->diag_cnt = -ENODEV;
+                       break;
+               default:
+                       dev_info(dev,
+                                "loopback configure request failed, err %x\n",
+                                ret);
+                       adapter->ahw->diag_cnt = -EIO;
+                       break;
+               }
+               break;
+       case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
+               break;
+       default:
+               break;
+       }
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+                                           struct qlcnic_host_rds_ring *ring,
+                                           u16 index, u16 cksum)
+{
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+
+       buffer = &ring->rx_buf_arr[index];
+       if (unlikely(buffer->skb == NULL)) {
+               WARN_ON(1);
+               return NULL;
+       }
+
+       pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
+                        PCI_DMA_FROMDEVICE);
+
+       skb = buffer->skb;
+       if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+                  (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+               adapter->stats.csummed++;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else {
+               skb_checksum_none_assert(skb);
+       }
+
+
+       buffer->skb = NULL;
+
+       return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+                                         struct sk_buff *skb, u16 *vlan_tag)
+{
+       struct ethhdr *eth_hdr;
+
+       if (!__vlan_get_tag(skb, vlan_tag)) {
+               eth_hdr = (struct ethhdr *)skb->data;
+               memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+               skb_pull(skb, VLAN_HLEN);
+       }
+       if (!adapter->rx_pvid)
+               return 0;
+
+       if (*vlan_tag == adapter->rx_pvid) {
+               /* Outer vlan tag. Packet should follow non-vlan path */
+               *vlan_tag = 0xffff;
+               return 0;
+       }
+       if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+               return 0;
+
+       return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+                  struct qlcnic_host_sds_ring *sds_ring, int ring,
+                  u64 sts_data0)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length, cksum, pkt_offset, is_lb_pkt;
+       u16 vid = 0xffff, t_vid;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_sts_refhandle(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+       length = qlcnic_get_sts_totallength(sts_data0);
+       cksum  = qlcnic_get_sts_status(sts_data0);
+       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return buffer;
+
+       if (adapter->rx_mac_learn) {
+               t_vid = 0;
+               is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+               qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+       }
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       if (pkt_offset)
+               skb_pull(skb, pkt_offset);
+
+       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+       napi_gro_receive(&sds_ring->napi, skb);
+
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE            20
+#define QLC_TCP_TS_OPTION_SIZE      12
+#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+                  int ring, u64 sts_data0, u64 sts_data1)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+       bool push, timestamp;
+       int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+       u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
+       u32 seq_number;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_lro_sts_refhandle(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+       lro_length = qlcnic_get_lro_sts_length(sts_data0);
+       l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+       l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+       push = qlcnic_get_lro_sts_push_flag(sts_data0);
+       seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+       if (!skb)
+               return buffer;
+
+       if (adapter->rx_mac_learn) {
+               t_vid = 0;
+               is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+               qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+       }
+
+       if (timestamp)
+               data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+       else
+               data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+       skb_put(skb, lro_length + data_offset);
+       skb_pull(skb, l2_hdr_offset);
+
+       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (ntohs(skb->protocol) == ETH_P_IPV6) {
+               ipv6h = (struct ipv6hdr *)skb->data;
+               th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+               length = (th->doff << 2) + lro_length;
+               ipv6h->payload_len = htons(length);
+       } else {
+               iph = (struct iphdr *)skb->data;
+               th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+               length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+               csum_replace2(&iph->check, iph->tot_len, htons(length));
+               iph->tot_len = htons(length);
+       }
+
+       th->psh = push;
+       th->seq = htonl(seq_number);
+       length = skb->len;
+
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+               skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               else
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       }
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+       netif_receive_skb(skb);
+
+       adapter->stats.lro_pkts++;
+       adapter->stats.lrobytes += length;
+
+       return buffer;
+}
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct list_head *cur;
+       struct status_desc *desc;
+       struct qlcnic_rx_buffer *rxbuf;
+       int opcode, desc_cnt, count = 0;
+       u64 sts_data0, sts_data1;
+       u8 ring;
+       u32 consumer = sds_ring->consumer;
+
+       while (count < max) {
+               desc = &sds_ring->desc_head[consumer];
+               sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+               if (!(sts_data0 & STATUS_OWNER_HOST))
+                       break;
+
+               desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+               opcode = qlcnic_get_sts_opcode(sts_data0);
+               switch (opcode) {
+               case QLCNIC_RXPKT_DESC:
+               case QLCNIC_OLD_RXPKT_DESC:
+               case QLCNIC_SYN_OFFLOAD:
+                       ring = qlcnic_get_sts_type(sts_data0);
+                       rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+                                                  sts_data0);
+                       break;
+               case QLCNIC_LRO_DESC:
+                       ring = qlcnic_get_lro_sts_type(sts_data0);
+                       sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+                       rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+                                                  sts_data1);
+                       break;
+               case QLCNIC_RESPONSE_DESC:
+                       qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+               default:
+                       goto skip;
+               }
+               WARN_ON(desc_cnt > 1);
+
+               if (likely(rxbuf))
+                       list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+               else
+                       adapter->stats.null_rxbuf++;
+skip:
+               for (; desc_cnt > 0; desc_cnt--) {
+                       desc = &sds_ring->desc_head[consumer];
+                       desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
+                       consumer = get_next_index(consumer, sds_ring->num_desc);
+               }
+               count++;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               if (!list_empty(&sds_ring->free_list[ring])) {
+                       list_for_each(cur, &sds_ring->free_list[ring]) {
+                               rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+                                                  list);
+                               qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+                       }
+                       spin_lock(&rds_ring->lock);
+                       list_splice_tail_init(&sds_ring->free_list[ring],
+                                             &rds_ring->free_list);
+                       spin_unlock(&rds_ring->lock);
+               }
+
+               qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+       }
+
+       if (count) {
+               sds_ring->consumer = consumer;
+               writel(consumer, sds_ring->crb_sts_consumer);
+       }
+
+       return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
+{
+       struct rcv_desc *pdesc;
+       struct qlcnic_rx_buffer *buffer;
+       int count = 0;
+       u32 producer, handle;
+       struct list_head *head;
+
+       producer = rds_ring->producer;
+       head = &rds_ring->free_list;
+
+       while (!list_empty(head)) {
+
+               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+               handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+                                              ring_id);
+               pdesc->reference_handle = cpu_to_le16(handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+
+       if (count) {
+               rds_ring->producer = producer;
+               writel((producer-1) & (rds_ring->num_desc-1),
+                      rds_ring->crb_rcv_producer);
+       }
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+               char prefix[30];
+
+               scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+                         dev_name(&adapter->pdev->dev), __func__);
+
+               print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+                                    skb->data, skb->len, true);
+       }
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+                                   u64 sts_data0)
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length, cksum, pkt_offset;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_sts_refhandle(sts_data0);
+       length = qlcnic_get_sts_totallength(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return;
+
+       cksum  = qlcnic_get_sts_status(sts_data0);
+       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       if (pkt_offset)
+               skb_pull(skb, pkt_offset);
+
+       if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+               adapter->ahw->diag_cnt++;
+       else
+               dump_skb(skb, adapter);
+
+       dev_kfree_skb_any(skb);
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return;
+}
+
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct status_desc *desc;
+       u64 sts_data0;
+       int ring, opcode, desc_cnt;
+
+       u32 consumer = sds_ring->consumer;
+
+       desc = &sds_ring->desc_head[consumer];
+       sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+       if (!(sts_data0 & STATUS_OWNER_HOST))
+               return;
+
+       desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+       opcode = qlcnic_get_sts_opcode(sts_data0);
+       switch (opcode) {
+       case QLCNIC_RESPONSE_DESC:
+               qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+               break;
+       default:
+               ring = qlcnic_get_sts_type(sts_data0);
+               qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+               break;
+       }
+
+       for (; desc_cnt > 0; desc_cnt--) {
+               desc = &sds_ring->desc_head[consumer];
+               desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+               consumer = get_next_index(consumer, sds_ring->num_desc);
+       }
+
+       sds_ring->consumer = consumer;
+       writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+                        struct net_device *netdev)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+               return -ENOMEM;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test) {
+                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+                                      NAPI_POLL_WEIGHT);
+               } else {
+                       if (ring == (adapter->drv_sds_rings - 1))
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_poll,
+                                              NAPI_POLL_WEIGHT);
+                       else
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_rx_poll,
+                                              NAPI_POLL_WEIGHT);
+               }
+       }
+
+       if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+               qlcnic_free_sds_rings(recv_ctx);
+               return -ENOMEM;
+       }
+
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+                                      NAPI_POLL_WEIGHT);
+               }
+       }
+
+       return 0;
+}
+
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netif_napi_del(&sds_ring->napi);
+       }
+
+       qlcnic_free_sds_rings(adapter->recv_ctx);
+
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_napi_del(&tx_ring->napi);
+               }
+       }
+
+       qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               napi_enable(&sds_ring->napi);
+               qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !adapter->ahw->diag_test) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       napi_enable(&tx_ring->napi);
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+               }
+       }
+}
+
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_disable_sds_intr(adapter, sds_ring);
+               napi_synchronize(&sds_ring->napi);
+               napi_disable(&sds_ring->napi);
+       }
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !adapter->ahw->diag_test &&
+           qlcnic_check_multi_tx(adapter)) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       qlcnic_disable_tx_intr(adapter, tx_ring);
+                       napi_synchronize(&tx_ring->napi);
+                       napi_disable(&tx_ring->napi);
+               }
+       }
+}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT    (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+       if (lro_pkt)
+               return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+       else
+               return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+#define QLCNIC_ENCAP_LENGTH_MASK       0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+       return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+                       struct qlcnic_host_sds_ring *sds_ring,
+                       u8 ring, u64 sts_data[])
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length, cksum, is_lb_pkt;
+       u16 vid = 0xffff;
+       int err;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_83xx_hndl(sts_data[0]);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+       length = qlcnic_83xx_pktln(sts_data[0]);
+       cksum  = qlcnic_83xx_csum_status(sts_data[1]);
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return buffer;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+       if (adapter->rx_mac_learn) {
+               is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+               qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+       }
+
+       if (unlikely(err)) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (qlcnic_encap_length(sts_data[1]) &&
+           skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               skb->csum_level = 1;
+               adapter->stats.encap_rx_csummed++;
+       }
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+       napi_gro_receive(&sds_ring->napi, skb);
+
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+                       u8 ring, u64 sts_data[])
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+       bool push;
+       int l2_hdr_offset, l4_hdr_offset;
+       int index, is_lb_pkt;
+       u16 lro_length, length, data_offset, gso_size;
+       u16 vid = 0xffff;
+       int err;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_83xx_hndl(sts_data[0]);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+       l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+       l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+       push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+       if (!skb)
+               return buffer;
+
+       if (qlcnic_83xx_is_tstamp(sts_data[1]))
+               data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+       else
+               data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+       skb_put(skb, lro_length + data_offset);
+       skb_pull(skb, l2_hdr_offset);
+
+       err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+       if (adapter->rx_mac_learn) {
+               is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+               qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+       }
+
+       if (unlikely(err)) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+       if (ntohs(skb->protocol) == ETH_P_IPV6) {
+               ipv6h = (struct ipv6hdr *)skb->data;
+               th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+               length = (th->doff << 2) + lro_length;
+               ipv6h->payload_len = htons(length);
+       } else {
+               iph = (struct iphdr *)skb->data;
+               th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+               length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+               csum_replace2(&iph->check, iph->tot_len, htons(length));
+               iph->tot_len = htons(length);
+       }
+
+       th->psh = push;
+       length = skb->len;
+
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+               gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+               skb_shinfo(skb)->gso_size = gso_size;
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               else
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       }
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+       netif_receive_skb(skb);
+
+       adapter->stats.lro_pkts++;
+       adapter->stats.lrobytes += length;
+       return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+                                       int max)
+{
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct list_head *cur;
+       struct status_desc *desc;
+       struct qlcnic_rx_buffer *rxbuf = NULL;
+       u8 ring;
+       u64 sts_data[2];
+       int count = 0, opcode;
+       u32 consumer = sds_ring->consumer;
+
+       while (count < max) {
+               desc = &sds_ring->desc_head[consumer];
+               sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+               opcode = qlcnic_83xx_opcode(sts_data[1]);
+               if (!opcode)
+                       break;
+               sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+               ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+               switch (opcode) {
+               case QLC_83XX_REG_DESC:
+                       rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+                                                       ring, sts_data);
+                       break;
+               case QLC_83XX_LRO_DESC:
+                       rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+                                                       sts_data);
+                       break;
+               default:
+                       dev_info(&adapter->pdev->dev,
+                                "Unknown opcode: 0x%x\n", opcode);
+                       goto skip;
+               }
+
+               if (likely(rxbuf))
+                       list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+               else
+                       adapter->stats.null_rxbuf++;
+skip:
+               desc = &sds_ring->desc_head[consumer];
+               /* Reset the descriptor */
+               desc->status_desc_data[1] = 0;
+               consumer = get_next_index(consumer, sds_ring->num_desc);
+               count++;
+       }
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               if (!list_empty(&sds_ring->free_list[ring])) {
+                       list_for_each(cur, &sds_ring->free_list[ring]) {
+                               rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+                                                  list);
+                               qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+                       }
+                       spin_lock(&rds_ring->lock);
+                       list_splice_tail_init(&sds_ring->free_list[ring],
+                                             &rds_ring->free_list);
+                       spin_unlock(&rds_ring->lock);
+               }
+               qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+       }
+       if (count) {
+               sds_ring->consumer = consumer;
+               writel(consumer, sds_ring->crb_sts_consumer);
+       }
+       return count;
+}
+
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+       int tx_complete;
+       int work_done;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+       /* tx ring count = 1 */
+       tx_ring = adapter->tx_ring;
+
+       tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+       work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       return work_done;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+       int tx_complete;
+       int work_done;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+       /* tx ring count = 1 */
+       tx_ring = adapter->tx_ring;
+
+       tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+       work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+       int work_done;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_adapter *adapter;
+
+       tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+       adapter = tx_ring->adapter;
+       work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+       if (work_done) {
+               napi_complete(&tx_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* need a repoll */
+               work_done = budget;
+       }
+
+       return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+       int work_done;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+       work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               napi_enable(&sds_ring->napi);
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+       }
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       napi_enable(&tx_ring->napi);
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+               }
+       }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       qlcnic_disable_sds_intr(adapter, sds_ring);
+               napi_synchronize(&sds_ring->napi);
+               napi_disable(&sds_ring->napi);
+       }
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       qlcnic_disable_tx_intr(adapter, tx_ring);
+                       napi_synchronize(&tx_ring->napi);
+                       napi_disable(&tx_ring->napi);
+               }
+       }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+                        struct net_device *netdev)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+               return -ENOMEM;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+                       if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_83xx_rx_poll,
+                                              NAPI_POLL_WEIGHT);
+                       else
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_83xx_msix_sriov_vf_poll,
+                                              NAPI_POLL_WEIGHT);
+
+               } else {
+                       netif_napi_add(netdev, &sds_ring->napi,
+                                      qlcnic_83xx_poll,
+                                      NAPI_POLL_WEIGHT);
+               }
+       }
+
+       if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+               qlcnic_free_sds_rings(recv_ctx);
+               return -ENOMEM;
+       }
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_tx_napi_add(netdev, &tx_ring->napi,
+                                      qlcnic_83xx_msix_tx_poll,
+                                      NAPI_POLL_WEIGHT);
+               }
+       }
+
+       return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netif_napi_del(&sds_ring->napi);
+       }
+
+       qlcnic_free_sds_rings(adapter->recv_ctx);
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_napi_del(&tx_ring->napi);
+               }
+       }
+
+       qlcnic_free_tx_rings(adapter);
+}
+
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+                                        int ring, u64 sts_data[])
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+       index = qlcnic_83xx_hndl(sts_data[0]);
+       if (unlikely(index >= rds_ring->num_desc))
+               return;
+
+       length = qlcnic_83xx_pktln(sts_data[0]);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+       if (!skb)
+               return;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+               adapter->ahw->diag_cnt++;
+       else
+               dump_skb(skb, adapter);
+
+       dev_kfree_skb_any(skb);
+       return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct status_desc *desc;
+       u64 sts_data[2];
+       int ring, opcode;
+       u32 consumer = sds_ring->consumer;
+
+       desc = &sds_ring->desc_head[consumer];
+       sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+       sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+       opcode = qlcnic_83xx_opcode(sts_data[1]);
+       if (!opcode)
+               return;
+
+       ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+       qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+       desc = &sds_ring->desc_head[consumer];
+       desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+       consumer = get_next_index(consumer, sds_ring->num_desc);
+       sds_ring->consumer = consumer;
+       writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
new file mode 100644 (file)
index 0000000..3ebef27
--- /dev/null
@@ -0,0 +1,4355 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <net/vxlan.h>
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+       "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
+
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0444);
+MODULE_PARM_DESC(qlcnic_mac_learn,
+                "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
+
+int qlcnic_use_msi = 1;
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi, qlcnic_use_msi, int, 0444);
+
+int qlcnic_use_msi_x = 1;
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
+
+int qlcnic_auto_fw_reset = 1;
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
+module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
+
+int qlcnic_load_fw_file;
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)");
+module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
+
+static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+                                                     pci_channel_state_t);
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+               return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+       else
+               return 1;
+}
+
+/*  PCI Device ID Table  */
+#define ENTRY(device) \
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+       .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id qlcnic_pci_tbl[] = {
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
+{
+       writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+}
+
+static const u32 msi_tgt_status[8] = {
+       ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+       ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+       ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+       ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const u32 qlcnic_reg_tbl[] = {
+       0x1B20A8,       /* PEG_HALT_STAT1 */
+       0x1B20AC,       /* PEG_HALT_STAT2 */
+       0x1B20B0,       /* FW_HEARTBEAT */
+       0x1B2100,       /* LOCK ID */
+       0x1B2128,       /* FW_CAPABILITIES */
+       0x1B2138,       /* drv active */
+       0x1B2140,       /* dev state */
+       0x1B2144,       /* drv state */
+       0x1B2148,       /* drv scratch */
+       0x1B214C,       /* dev partition info */
+       0x1B2174,       /* drv idc ver */
+       0x1B2150,       /* fw version major */
+       0x1B2154,       /* fw version minor */
+       0x1B2158,       /* fw version sub */
+       0x1B219C,       /* npar state */
+       0x1B21FC,       /* FW_IMG_VALID */
+       0x1B2250,       /* CMD_PEG_STATE */
+       0x1B233C,       /* RCV_PEG_STATE */
+       0x1B23B4,       /* ASIC TEMP */
+       0x1B216C,       /* FW api */
+       0x1B2170,       /* drv op mode */
+       0x13C010,       /* flash lock */
+       0x13C014,       /* flash unlock */
+};
+
+static const struct qlcnic_board_info qlcnic_boards[] = {
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE844X,
+         0x0,
+         0x0,
+         "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x24e,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x243,
+         "8300 Series Single Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x24a,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x246,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x252,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x26e,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x260,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x266,
+         "8300 Series Single Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x269,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x271,
+         "8300 Series Dual Port 10GbE Converged Network Adapter "
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE834X,
+         0x0, 0x0, "8300 Series 1/10GbE Controller" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE8830,
+         0x0,
+         0x0,
+         "8830 Series 1/10GbE Controller" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x203,
+         "8200 Series Single Port 10GbE Converged Network Adapter"
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x207,
+         "8200 Series Dual Port 10GbE Converged Network Adapter"
+         "(TCP/IP Networking)" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x20b,
+         "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x20c,
+         "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x20f,
+         "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         0x103c, 0x3733,
+         "NC523SFP 10Gb 2-port Server Adapter" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         0x103c, 0x3346,
+         "CN1000Q Dual Port Converged Network Adapter" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         PCI_VENDOR_ID_QLOGIC,
+         0x210,
+         "QME8242-k 10GbE Dual Port Mezzanine Card" },
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE824X,
+         0x0, 0x0, "cLOM8214 1/10GbE Controller" },
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+       int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+       recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+       return recv_ctx->sds_rings == NULL;
+}
+
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+       kfree(recv_ctx->sds_rings);
+       recv_ctx->sds_rings = NULL;
+}
+
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       u8 mac_addr[ETH_ALEN];
+       int ret;
+
+       ret = qlcnic_get_mac_address(adapter, mac_addr,
+                                    adapter->ahw->pci_func);
+       if (ret)
+               return ret;
+
+       memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+       memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+       /* set station address */
+
+       if (!is_valid_ether_addr(netdev->dev_addr))
+               dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+                                       netdev->dev_addr);
+
+       return 0;
+}
+
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mac_vlan_list *cur;
+       struct list_head *head;
+
+       list_for_each(head, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+               if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
+                       qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                                 0, QLCNIC_MAC_DEL);
+                       list_del(&cur->list);
+                       kfree(cur);
+                       return;
+               }
+       }
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct sockaddr *addr = p;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return -EINVAL;
+
+       if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+               return -EOPNOTSUPP;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+
+       if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data) &&
+           ether_addr_equal_unaligned(netdev->dev_addr, addr->sa_data))
+               return 0;
+
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               netif_device_detach(netdev);
+               qlcnic_napi_disable(adapter);
+       }
+
+       qlcnic_delete_adapter_mac(adapter);
+       memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       qlcnic_set_multi(adapter->netdev);
+
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               netif_device_attach(netdev);
+               qlcnic_napi_enable(adapter);
+       }
+       return 0;
+}
+
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                       struct net_device *netdev,
+                       const unsigned char *addr, u16 vid)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err = -EOPNOTSUPP;
+
+       if (!adapter->fdb_mac_learn)
+               return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
+
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter)) {
+               if (is_unicast_ether_addr(addr)) {
+                       err = dev_uc_del(netdev, addr);
+                       if (!err)
+                               err = qlcnic_nic_del_mac(adapter, addr);
+               } else if (is_multicast_ether_addr(addr)) {
+                       err = dev_mc_del(netdev, addr);
+               } else {
+                       err =  -EINVAL;
+               }
+       }
+       return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                       struct net_device *netdev,
+                       const unsigned char *addr, u16 vid, u16 flags)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err = 0;
+
+       if (!adapter->fdb_mac_learn)
+               return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+           !qlcnic_sriov_check(adapter)) {
+               pr_info("%s: FDB e-switch is not enabled\n", __func__);
+               return -EOPNOTSUPP;
+       }
+
+       if (ether_addr_equal(addr, adapter->mac_addr))
+               return err;
+
+       if (is_unicast_ether_addr(addr)) {
+               if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
+                       err = dev_uc_add_excl(netdev, addr);
+               else
+                       err = -ENOMEM;
+       } else if (is_multicast_ether_addr(addr)) {
+               err = dev_mc_add_excl(netdev, addr);
+       } else {
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+                       struct net_device *netdev,
+                       struct net_device *filter_dev, int idx)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       if (!adapter->fdb_mac_learn)
+               return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter))
+               idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+       return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+       while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               usleep_range(10000, 11000);
+
+       if (!adapter->fw_work.work.func)
+               return;
+
+       cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+                                  struct netdev_phys_item_id *ppid)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = sizeof(ahw->phys_port_id);
+       memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+       return 0;
+}
+
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+                                 struct udp_tunnel_info *ti)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+               return;
+
+       /* Adapter supports only one VXLAN port. Use very first port
+        * for enabling offload
+        */
+       if (!qlcnic_encap_rx_offload(adapter))
+               return;
+       if (!ahw->vxlan_port_count) {
+               ahw->vxlan_port_count = 1;
+               ahw->vxlan_port = ntohs(ti->port);
+               adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+               return;
+       }
+       if (ahw->vxlan_port == ntohs(ti->port))
+               ahw->vxlan_port_count++;
+
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+                                 struct udp_tunnel_info *ti)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+               return;
+
+       if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
+           (ahw->vxlan_port != ntohs(ti->port)))
+               return;
+
+       ahw->vxlan_port_count--;
+       if (!ahw->vxlan_port_count)
+               adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+                                              struct net_device *dev,
+                                              netdev_features_t features)
+{
+       features = vlan_features_check(skb, features);
+       return vxlan_features_check(skb, features);
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+       .ndo_open          = qlcnic_open,
+       .ndo_stop          = qlcnic_close,
+       .ndo_start_xmit    = qlcnic_xmit_frame,
+       .ndo_get_stats     = qlcnic_get_stats,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_set_rx_mode   = qlcnic_set_multi,
+       .ndo_set_mac_address    = qlcnic_set_mac,
+       .ndo_change_mtu    = qlcnic_change_mtu,
+       .ndo_fix_features  = qlcnic_fix_features,
+       .ndo_set_features  = qlcnic_set_features,
+       .ndo_tx_timeout    = qlcnic_tx_timeout,
+       .ndo_vlan_rx_add_vid    = qlcnic_vlan_rx_add,
+       .ndo_vlan_rx_kill_vid   = qlcnic_vlan_rx_del,
+       .ndo_fdb_add            = qlcnic_fdb_add,
+       .ndo_fdb_del            = qlcnic_fdb_del,
+       .ndo_fdb_dump           = qlcnic_fdb_dump,
+       .ndo_get_phys_port_id   = qlcnic_get_phys_port_id,
+       .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
+       .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
+       .ndo_features_check     = qlcnic_features_check,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+#ifdef CONFIG_QLCNIC_SRIOV
+       .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
+       .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
+       .ndo_get_vf_config      = qlcnic_sriov_get_vf_config,
+       .ndo_set_vf_vlan        = qlcnic_sriov_set_vf_vlan,
+       .ndo_set_vf_spoofchk    = qlcnic_sriov_set_vf_spoofchk,
+#endif
+};
+
+static const struct net_device_ops qlcnic_netdev_failed_ops = {
+       .ndo_open          = qlcnic_open,
+};
+
+static struct qlcnic_nic_template qlcnic_ops = {
+       .config_bridged_mode    = qlcnic_config_bridged_mode,
+       .config_led             = qlcnic_82xx_config_led,
+       .start_firmware         = qlcnic_82xx_start_firmware,
+       .request_reset          = qlcnic_82xx_dev_request_reset,
+       .cancel_idc_work        = qlcnic_82xx_cancel_idc_work,
+       .napi_add               = qlcnic_82xx_napi_add,
+       .napi_del               = qlcnic_82xx_napi_del,
+       .config_ipaddr          = qlcnic_82xx_config_ipaddr,
+       .shutdown               = qlcnic_82xx_shutdown,
+       .resume                 = qlcnic_82xx_resume,
+       .clear_legacy_intr      = qlcnic_82xx_clear_legacy_intr,
+};
+
+struct qlcnic_nic_template qlcnic_vf_ops = {
+       .config_bridged_mode    = qlcnicvf_config_bridged_mode,
+       .config_led             = qlcnicvf_config_led,
+       .start_firmware         = qlcnicvf_start_firmware
+};
+
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+       .read_crb                       = qlcnic_82xx_read_crb,
+       .write_crb                      = qlcnic_82xx_write_crb,
+       .read_reg                       = qlcnic_82xx_hw_read_wx_2M,
+       .write_reg                      = qlcnic_82xx_hw_write_wx_2M,
+       .get_mac_address                = qlcnic_82xx_get_mac_address,
+       .setup_intr                     = qlcnic_82xx_setup_intr,
+       .alloc_mbx_args                 = qlcnic_82xx_alloc_mbx_args,
+       .mbx_cmd                        = qlcnic_82xx_issue_cmd,
+       .get_func_no                    = qlcnic_82xx_get_func_no,
+       .api_lock                       = qlcnic_82xx_api_lock,
+       .api_unlock                     = qlcnic_82xx_api_unlock,
+       .add_sysfs                      = qlcnic_82xx_add_sysfs,
+       .remove_sysfs                   = qlcnic_82xx_remove_sysfs,
+       .process_lb_rcv_ring_diag       = qlcnic_82xx_process_rcv_ring_diag,
+       .create_rx_ctx                  = qlcnic_82xx_fw_cmd_create_rx_ctx,
+       .create_tx_ctx                  = qlcnic_82xx_fw_cmd_create_tx_ctx,
+       .del_rx_ctx                     = qlcnic_82xx_fw_cmd_del_rx_ctx,
+       .del_tx_ctx                     = qlcnic_82xx_fw_cmd_del_tx_ctx,
+       .setup_link_event               = qlcnic_82xx_linkevent_request,
+       .get_nic_info                   = qlcnic_82xx_get_nic_info,
+       .get_pci_info                   = qlcnic_82xx_get_pci_info,
+       .set_nic_info                   = qlcnic_82xx_set_nic_info,
+       .change_macvlan                 = qlcnic_82xx_sre_macaddr_change,
+       .napi_enable                    = qlcnic_82xx_napi_enable,
+       .napi_disable                   = qlcnic_82xx_napi_disable,
+       .config_intr_coal               = qlcnic_82xx_config_intr_coalesce,
+       .config_rss                     = qlcnic_82xx_config_rss,
+       .config_hw_lro                  = qlcnic_82xx_config_hw_lro,
+       .config_loopback                = qlcnic_82xx_set_lb_mode,
+       .clear_loopback                 = qlcnic_82xx_clear_lb_mode,
+       .config_promisc_mode            = qlcnic_82xx_nic_set_promisc,
+       .change_l2_filter               = qlcnic_82xx_change_filter,
+       .get_board_info                 = qlcnic_82xx_get_board_info,
+       .set_mac_filter_count           = qlcnic_82xx_set_mac_filter_count,
+       .free_mac_list                  = qlcnic_82xx_free_mac_list,
+       .read_phys_port_id              = qlcnic_82xx_read_phys_port_id,
+       .io_error_detected              = qlcnic_82xx_io_error_detected,
+       .io_slot_reset                  = qlcnic_82xx_io_slot_reset,
+       .io_resume                      = qlcnic_82xx_io_resume,
+       .get_beacon_state               = qlcnic_82xx_get_beacon_state,
+       .enable_sds_intr                = qlcnic_82xx_enable_sds_intr,
+       .disable_sds_intr               = qlcnic_82xx_disable_sds_intr,
+       .enable_tx_intr                 = qlcnic_82xx_enable_tx_intr,
+       .disable_tx_intr                = qlcnic_82xx_disable_tx_intr,
+       .get_saved_state                = qlcnic_82xx_get_saved_state,
+       .set_saved_state                = qlcnic_82xx_set_saved_state,
+       .cache_tmpl_hdr_values          = qlcnic_82xx_cache_tmpl_hdr_values,
+       .get_cap_size                   = qlcnic_82xx_get_cap_size,
+       .set_sys_info                   = qlcnic_82xx_set_sys_info,
+       .store_cap_mask                 = qlcnic_82xx_store_cap_mask,
+};
+
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (qlcnic_82xx_check(adapter) &&
+           (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+               test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+               return 0;
+       } else {
+               return 1;
+       }
+}
+
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+                           int queue_type)
+{
+       int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+       if (queue_type == QLCNIC_RX_QUEUE)
+               max_rings = adapter->max_sds_rings;
+       else if (queue_type == QLCNIC_TX_QUEUE)
+               max_rings = adapter->max_tx_rings;
+
+       num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+                                             max_rings));
+
+       if (ring_cnt > num_rings)
+               return num_rings;
+       else
+               return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+       /* 83xx adapter does not have max_tx_rings intialized in probe */
+       if (adapter->max_tx_rings)
+               adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+                                                        QLCNIC_TX_QUEUE);
+       else
+               adapter->drv_tx_rings = tx_cnt;
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+       /* 83xx adapter does not have max_sds_rings intialized in probe */
+       if (adapter->max_sds_rings)
+               adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+                                                         QLCNIC_RX_QUEUE);
+       else
+               adapter->drv_sds_rings = rx_cnt;
+}
+
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int num_msix = 0, err = 0, vector;
+
+       adapter->flags &= ~QLCNIC_TSS_RSS;
+
+       if (adapter->drv_tss_rings > 0)
+               num_msix += adapter->drv_tss_rings;
+       else
+               num_msix += adapter->drv_tx_rings;
+
+       if (adapter->drv_rss_rings > 0)
+               num_msix += adapter->drv_rss_rings;
+       else
+               num_msix += adapter->drv_sds_rings;
+
+       if (qlcnic_83xx_check(adapter))
+               num_msix += 1;
+
+       if (!adapter->msix_entries) {
+               adapter->msix_entries = kcalloc(num_msix,
+                                               sizeof(struct msix_entry),
+                                               GFP_KERNEL);
+               if (!adapter->msix_entries)
+                       return -ENOMEM;
+       }
+
+       for (vector = 0; vector < num_msix; vector++)
+               adapter->msix_entries[vector].entry = vector;
+
+restore:
+       err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+       if (err == -ENOSPC) {
+               if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+                       return err;
+
+               netdev_info(adapter->netdev,
+                           "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+                           num_msix, err);
+
+               num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+               /* Set rings to 0 so we can restore original TSS/RSS count */
+               adapter->drv_tss_rings = 0;
+               adapter->drv_rss_rings = 0;
+
+               if (qlcnic_83xx_check(adapter))
+                       num_msix += 1;
+
+               netdev_info(adapter->netdev,
+                           "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+                           adapter->drv_tx_rings, adapter->drv_sds_rings,
+                           num_msix);
+
+               goto restore;
+       } else if (err < 0) {
+               return err;
+       }
+
+       adapter->ahw->num_msix = num_msix;
+       if (adapter->drv_tss_rings > 0)
+               adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+       if (adapter->drv_rss_rings > 0)
+               adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+       return 0;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int err, vector;
+
+       if (!adapter->msix_entries) {
+               adapter->msix_entries = kcalloc(num_msix,
+                                               sizeof(struct msix_entry),
+                                               GFP_KERNEL);
+               if (!adapter->msix_entries)
+                       return -ENOMEM;
+       }
+
+       adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+       if (adapter->ahw->msix_supported) {
+enable_msix:
+               for (vector = 0; vector < num_msix; vector++)
+                       adapter->msix_entries[vector].entry = vector;
+
+               err = pci_enable_msix_range(pdev,
+                                           adapter->msix_entries, 1, num_msix);
+
+               if (err == num_msix) {
+                       adapter->flags |= QLCNIC_MSIX_ENABLED;
+                       adapter->ahw->num_msix = num_msix;
+                       dev_info(&pdev->dev, "using msi-x interrupts\n");
+                       return 0;
+               } else if (err > 0) {
+                       pci_disable_msix(pdev);
+
+                       dev_info(&pdev->dev,
+                                "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+                                num_msix, err);
+
+                       if (qlcnic_82xx_check(adapter)) {
+                               num_msix = rounddown_pow_of_two(err);
+                               if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+                                       return -ENOSPC;
+                       } else {
+                               num_msix = rounddown_pow_of_two(err - 1);
+                               num_msix += 1;
+                               if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+                                       return -ENOSPC;
+                       }
+
+                       if (qlcnic_82xx_check(adapter) &&
+                           !qlcnic_check_multi_tx(adapter)) {
+                               adapter->drv_sds_rings = num_msix;
+                               adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+                       } else {
+                               /* Distribute vectors equally */
+                               adapter->drv_tx_rings = num_msix / 2;
+                               adapter->drv_sds_rings = adapter->drv_tx_rings;
+                       }
+
+                       if (num_msix) {
+                               dev_info(&pdev->dev,
+                                        "Trying to allocate %d MSI-X interrupt vectors\n",
+                                        num_msix);
+                               goto enable_msix;
+                       }
+               } else {
+                       dev_info(&pdev->dev,
+                                "Unable to allocate %d MSI-X vectors, err=%d\n",
+                                num_msix, err);
+                       return err;
+               }
+       }
+
+       return -EIO;
+}
+
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+       int num_msix;
+
+       num_msix = adapter->drv_sds_rings;
+
+       if (qlcnic_check_multi_tx(adapter))
+               num_msix += adapter->drv_tx_rings;
+       else
+               num_msix += QLCNIC_SINGLE_RING;
+
+       return num_msix;
+}
+
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+{
+       int err = 0;
+       u32 offset, mask_reg;
+       const struct qlcnic_legacy_intr_set *legacy_intrp;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct pci_dev *pdev = adapter->pdev;
+
+       if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
+               adapter->flags |= QLCNIC_MSI_ENABLED;
+               offset = msi_tgt_status[adapter->ahw->pci_func];
+               adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
+                                                           offset);
+               dev_info(&pdev->dev, "using msi interrupts\n");
+               adapter->msix_entries[0].vector = pdev->irq;
+               return err;
+       }
+
+       if (qlcnic_use_msi || qlcnic_use_msi_x)
+               return -EOPNOTSUPP;
+
+       legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
+       adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
+       offset = legacy_intrp->tgt_status_reg;
+       adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
+       mask_reg = legacy_intrp->tgt_mask_reg;
+       adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
+       adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
+       adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
+       dev_info(&pdev->dev, "using legacy interrupts\n");
+       adapter->msix_entries[0].vector = pdev->irq;
+       return err;
+}
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+       int num_msix, err = 0;
+
+       if (adapter->flags & QLCNIC_TSS_RSS) {
+               err = qlcnic_setup_tss_rss_intr(adapter);
+               if (err < 0)
+                       return err;
+               num_msix = adapter->ahw->num_msix;
+       } else {
+               num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
+
+               err = qlcnic_enable_msix(adapter, num_msix);
+               if (err == -ENOMEM)
+                       return err;
+
+               if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+                       qlcnic_disable_multi_tx(adapter);
+                       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+
+                       err = qlcnic_enable_msi_legacy(adapter);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err, i;
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           !ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               ahw->intr_tbl = vzalloc(ahw->num_msix *
+                                       sizeof(struct qlcnic_intrpt_config));
+               if (!ahw->intr_tbl)
+                       return -ENOMEM;
+
+               for (i = 0; i < ahw->num_msix; i++) {
+                       ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+                       ahw->intr_tbl[i].id = i;
+                       ahw->intr_tbl[i].src = 0;
+               }
+
+               err = qlcnic_82xx_config_intrpt(adapter, 1);
+               if (err)
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to configure Interrupt for %d vector\n",
+                               ahw->num_msix);
+               return err;
+       }
+
+       return 0;
+}
+
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               pci_disable_msix(adapter->pdev);
+       if (adapter->flags & QLCNIC_MSI_ENABLED)
+               pci_disable_msi(adapter->pdev);
+
+       kfree(adapter->msix_entries);
+       adapter->msix_entries = NULL;
+
+       if (adapter->ahw->intr_tbl) {
+               vfree(adapter->ahw->intr_tbl);
+               adapter->ahw->intr_tbl = NULL;
+       }
+}
+
+static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
+{
+       if (ahw->pci_base0 != NULL)
+               iounmap(ahw->pci_base0);
+}
+
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_pci_info *pci_info;
+       int ret;
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+               switch (ahw->port_type) {
+               case QLCNIC_GBE:
+                       ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
+                       break;
+               case QLCNIC_XGBE:
+                       ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
+                       break;
+               }
+               return 0;
+       }
+
+       if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+               return 0;
+
+       pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
+       ret = qlcnic_get_pci_info(adapter, pci_info);
+       kfree(pci_info);
+       return ret;
+}
+
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+       bool ret = false;
+
+       if (qlcnic_84xx_check(adapter)) {
+               ret = true;
+       } else if (qlcnic_83xx_check(adapter)) {
+               if (adapter->ahw->extra_capability[0] &
+                   QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+                       ret = true;
+               else
+                       ret = false;
+       }
+
+       return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_pci_info *pci_info;
+       int i, id = 0, ret = 0, j = 0;
+       u16 act_pci_func;
+       u8 pfn;
+
+       pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
+       ret = qlcnic_get_pci_info(adapter, pci_info);
+       if (ret)
+               goto err_pci_info;
+
+       act_pci_func = ahw->total_nic_func;
+
+       adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+                                act_pci_func, GFP_KERNEL);
+       if (!adapter->npars) {
+               ret = -ENOMEM;
+               goto err_pci_info;
+       }
+
+       adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+                               QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+       if (!adapter->eswitch) {
+               ret = -ENOMEM;
+               goto err_npars;
+       }
+
+       for (i = 0; i < ahw->max_vnic_func; i++) {
+               pfn = pci_info[i].id;
+
+               if (pfn >= ahw->max_vnic_func) {
+                       ret = -EINVAL;
+                       dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+                               __func__, pfn, ahw->max_vnic_func);
+                       goto err_eswitch;
+               }
+
+               if (!pci_info[i].active ||
+                   (pci_info[i].type != QLCNIC_TYPE_NIC))
+                       continue;
+
+               if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+                       if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+                                                                &id))
+                               adapter->npars[j].eswitch_status = true;
+                       else
+                               continue;
+               } else {
+                       adapter->npars[j].eswitch_status = true;
+               }
+
+               adapter->npars[j].pci_func = pfn;
+               adapter->npars[j].active = (u8)pci_info[i].active;
+               adapter->npars[j].type = (u8)pci_info[i].type;
+               adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
+               adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
+               adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
+               memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
+               j++;
+       }
+
+       /* Update eSwitch status for adapters without per port eSwitch
+        * configuration capability
+        */
+       if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+               for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+                       adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+       }
+
+       kfree(pci_info);
+       return 0;
+
+err_eswitch:
+       kfree(adapter->eswitch);
+       adapter->eswitch = NULL;
+err_npars:
+       kfree(adapter->npars);
+       adapter->npars = NULL;
+err_pci_info:
+       kfree(pci_info);
+
+       return ret;
+}
+
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+       u8 id;
+       int ret;
+       u32 data = QLCNIC_MGMT_FUNC;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       ret = qlcnic_api_lock(adapter);
+       if (ret)
+               goto err_lock;
+
+       id = ahw->pci_func;
+       data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+       data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
+              QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
+       qlcnic_api_unlock(adapter);
+err_lock:
+       return ret;
+}
+
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+                           const struct pci_device_id *ent)
+{
+       u32 op_mode, priv_level;
+
+       /* Determine FW API version */
+       adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+                                                          QLCNIC_FW_API);
+
+       /* Find PCI function number */
+       qlcnic_get_func_no(adapter);
+
+       /* Determine function privilege level */
+       op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+       if (op_mode == QLC_DEV_DRV_DEFAULT)
+               priv_level = QLCNIC_MGMT_FUNC;
+       else
+               priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+       if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+               adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+               dev_info(&adapter->pdev->dev,
+                       "HAL Version: %d Non Privileged function\n",
+                        adapter->ahw->fw_hal_version);
+               adapter->nic_ops = &qlcnic_vf_ops;
+       } else
+               adapter->nic_ops = &qlcnic_ops;
+}
+
+#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
+{
+       switch (dev_id) {
+       case PCI_DEVICE_ID_QLOGIC_QLE824X:
+               *bar = QLCNIC_82XX_BAR0_LENGTH;
+               break;
+       case PCI_DEVICE_ID_QLOGIC_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_QLE8830:
+       case PCI_DEVICE_ID_QLOGIC_QLE844X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
+               *bar = QLCNIC_83XX_BAR0_LENGTH;
+               break;
+       default:
+               *bar = 0;
+       }
+}
+
+static int qlcnic_setup_pci_map(struct pci_dev *pdev,
+                               struct qlcnic_hardware_context *ahw)
+{
+       u32 offset;
+       void __iomem *mem_ptr0 = NULL;
+       unsigned long mem_len, pci_len0 = 0, bar0_len;
+
+       /* remap phys address */
+       mem_len = pci_resource_len(pdev, 0);
+
+       qlcnic_get_bar_length(pdev->device, &bar0_len);
+       if (mem_len >= bar0_len) {
+
+               mem_ptr0 = pci_ioremap_bar(pdev, 0);
+               if (mem_ptr0 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       return -EIO;
+               }
+               pci_len0 = mem_len;
+       } else {
+               return -EIO;
+       }
+
+       dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
+
+       ahw->pci_base0 = mem_ptr0;
+       ahw->pci_len0 = pci_len0;
+       offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
+       qlcnic_get_ioaddr(ahw, offset);
+
+       return 0;
+}
+
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+                                        int index)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       unsigned short subsystem_vendor;
+       bool ret = true;
+
+       subsystem_vendor = pdev->subsystem_vendor;
+
+       if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+               if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
+                   qlcnic_boards[index].sub_device == pdev->subsystem_device)
+                       ret = true;
+               else
+                       ret = false;
+       }
+
+       return ret;
+}
+
+static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int i, found = 0;
+
+       for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+               if (qlcnic_boards[i].vendor == pdev->vendor &&
+                   qlcnic_boards[i].device == pdev->device &&
+                   qlcnic_validate_subsystem_id(adapter, i)) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+       else
+               sprintf(name, "%pM: %s" , adapter->mac_addr,
+                       qlcnic_boards[i].short_name);
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+       int err;
+       u32 fw_major, fw_minor, fw_build, prev_fw_version;
+       struct pci_dev *pdev = adapter->pdev;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+
+       prev_fw_version = adapter->fw_version;
+
+       fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+       fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+       fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+       adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+       err = qlcnic_get_board_info(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Error getting board config info.\n");
+               return;
+       }
+       if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+               if (fw_dump->tmpl_hdr == NULL ||
+                               adapter->fw_version > prev_fw_version) {
+                       vfree(fw_dump->tmpl_hdr);
+                       if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+                               dev_info(&pdev->dev,
+                                       "Supports FW dump capability\n");
+               }
+       }
+
+       dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+                QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+       if (adapter->ahw->port_type == QLCNIC_XGBE) {
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+               } else {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+               }
+
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+       } else if (adapter->ahw->port_type == QLCNIC_GBE) {
+               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+               adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+       }
+
+       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+
+       adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+       adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_info nic_info;
+       int err = 0;
+
+       memset(&nic_info, 0, sizeof(struct qlcnic_info));
+       err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
+       if (err)
+               return err;
+
+       adapter->ahw->physical_port = (u8)nic_info.phys_port;
+       adapter->ahw->switch_mode = nic_info.switch_mode;
+       adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
+       adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
+       adapter->ahw->capabilities = nic_info.capabilities;
+
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+               u32 temp;
+               temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+               if (err == -EIO)
+                       return err;
+               adapter->ahw->extra_capability[0] = temp;
+       } else {
+               adapter->ahw->extra_capability[0] = 0;
+       }
+
+       adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
+       adapter->ahw->max_mtu = nic_info.max_mtu;
+
+       if (adapter->ahw->capabilities & BIT_6) {
+               adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+               adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+               adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+               adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+               dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+       } else {
+               adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+               adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+               adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+               adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+       }
+
+       return err;
+}
+
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+                           struct qlcnic_esw_func_cfg *esw_cfg)
+{
+       if (esw_cfg->discard_tagged)
+               adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+       else
+               adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+       if (esw_cfg->vlan_id) {
+               adapter->rx_pvid = esw_cfg->vlan_id;
+               adapter->tx_pvid = esw_cfg->vlan_id;
+       } else {
+               adapter->rx_pvid = 0;
+               adapter->tx_pvid = 0;
+       }
+}
+
+static int
+qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err;
+
+       if (qlcnic_sriov_vf_check(adapter)) {
+               err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
+               if (err) {
+                       netdev_err(netdev,
+                                  "Cannot add VLAN filter for VLAN id %d, err=%d",
+                                  vid, err);
+                       return err;
+               }
+       }
+
+       set_bit(vid, adapter->vlans);
+       return 0;
+}
+
+static int
+qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err;
+
+       if (qlcnic_sriov_vf_check(adapter)) {
+               err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
+               if (err) {
+                       netdev_err(netdev,
+                                  "Cannot delete VLAN filter for VLAN id %d, err=%d",
+                                  vid, err);
+                       return err;
+               }
+       }
+
+       qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+       clear_bit(vid, adapter->vlans);
+       return 0;
+}
+
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_esw_func_cfg *esw_cfg)
+{
+       adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
+                               QLCNIC_PROMISC_DISABLED);
+
+       if (esw_cfg->mac_anti_spoof)
+               adapter->flags |= QLCNIC_MACSPOOF;
+
+       if (!esw_cfg->mac_override)
+               adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+       if (!esw_cfg->promisc_mode)
+               adapter->flags |= QLCNIC_PROMISC_DISABLED;
+}
+
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_esw_func_cfg esw_cfg;
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return 0;
+
+       esw_cfg.pci_func = adapter->ahw->pci_func;
+       if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+                       return -EIO;
+       qlcnic_set_vlan_config(adapter, &esw_cfg);
+       qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+       qlcnic_set_netdev_features(adapter, &esw_cfg);
+
+       return 0;
+}
+
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+                               struct qlcnic_esw_func_cfg *esw_cfg)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (qlcnic_83xx_check(adapter))
+               return;
+
+       adapter->offload_flags = esw_cfg->offload_flags;
+       adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+       netdev_update_features(netdev);
+       adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+       u32 op_mode, priv_level;
+       int err = 0;
+
+       err = qlcnic_initialize_nic(adapter);
+       if (err)
+               return err;
+
+       if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+               return 0;
+
+       op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+       priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+       if (op_mode == QLC_DEV_DRV_DEFAULT)
+               priv_level = QLCNIC_MGMT_FUNC;
+       else
+               priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+       if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+               if (priv_level == QLCNIC_MGMT_FUNC) {
+                       adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+                       err = qlcnic_init_pci_info(adapter);
+                       if (err)
+                               return err;
+                       /* Set privilege level for other functions */
+                       qlcnic_set_function_modes(adapter);
+                       dev_info(&adapter->pdev->dev,
+                               "HAL Version: %d, Management function\n",
+                                adapter->ahw->fw_hal_version);
+               } else if (priv_level == QLCNIC_PRIV_FUNC) {
+                       adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
+                       dev_info(&adapter->pdev->dev,
+                               "HAL Version: %d, Privileged function\n",
+                                adapter->ahw->fw_hal_version);
+               }
+       } else {
+               adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+       }
+
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+       return err;
+}
+
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_esw_func_cfg esw_cfg;
+       struct qlcnic_npar_info *npar;
+       u8 i;
+
+       if (adapter->need_fw_reset)
+               return 0;
+
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               if (!adapter->npars[i].eswitch_status)
+                       continue;
+
+               memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+               esw_cfg.pci_func = adapter->npars[i].pci_func;
+               esw_cfg.mac_override = BIT_0;
+               esw_cfg.promisc_mode = BIT_0;
+               if (qlcnic_82xx_check(adapter)) {
+                       esw_cfg.offload_flags = BIT_0;
+                       if (QLCNIC_IS_TSO_CAPABLE(adapter))
+                               esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+               }
+               if (qlcnic_config_switch_port(adapter, &esw_cfg))
+                       return -EIO;
+               npar = &adapter->npars[i];
+               npar->pvid = esw_cfg.vlan_id;
+               npar->mac_override = esw_cfg.mac_override;
+               npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+               npar->discard_tagged = esw_cfg.discard_tagged;
+               npar->promisc_mode = esw_cfg.promisc_mode;
+               npar->offload_flags = esw_cfg.offload_flags;
+       }
+
+       return 0;
+}
+
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+                       struct qlcnic_npar_info *npar, int pci_func)
+{
+       struct qlcnic_esw_func_cfg esw_cfg;
+       esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+       esw_cfg.pci_func = pci_func;
+       esw_cfg.vlan_id = npar->pvid;
+       esw_cfg.mac_override = npar->mac_override;
+       esw_cfg.discard_tagged = npar->discard_tagged;
+       esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+       esw_cfg.offload_flags = npar->offload_flags;
+       esw_cfg.promisc_mode = npar->promisc_mode;
+       if (qlcnic_config_switch_port(adapter, &esw_cfg))
+               return -EIO;
+
+       esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+       if (qlcnic_config_switch_port(adapter, &esw_cfg))
+               return -EIO;
+
+       return 0;
+}
+
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+{
+       int i, err;
+       struct qlcnic_npar_info *npar;
+       struct qlcnic_info nic_info;
+       u8 pci_func;
+
+       if (qlcnic_82xx_check(adapter))
+               if (!adapter->need_fw_reset)
+                       return 0;
+
+       /* Set the NPAR config data after FW reset */
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               npar = &adapter->npars[i];
+               pci_func = npar->pci_func;
+               if (!adapter->npars[i].eswitch_status)
+                       continue;
+
+               memset(&nic_info, 0, sizeof(struct qlcnic_info));
+               err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+               if (err)
+                       return err;
+               nic_info.min_tx_bw = npar->min_bw;
+               nic_info.max_tx_bw = npar->max_bw;
+               err = qlcnic_set_nic_info(adapter, &nic_info);
+               if (err)
+                       return err;
+
+               if (npar->enable_pm) {
+                       err = qlcnic_config_port_mirroring(adapter,
+                                                          npar->dest_npar, 1,
+                                                          pci_func);
+                       if (err)
+                               return err;
+               }
+               err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+       u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+       u32 npar_state;
+
+       if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+               return 0;
+
+       npar_state = QLC_SHARED_REG_RD32(adapter,
+                                        QLCNIC_CRB_DEV_NPAR_STATE);
+       while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+               msleep(1000);
+               npar_state = QLC_SHARED_REG_RD32(adapter,
+                                                QLCNIC_CRB_DEV_NPAR_STATE);
+       }
+       if (!npar_opt_timeo) {
+               dev_err(&adapter->pdev->dev,
+                       "Waiting for NPAR state to operational timeout\n");
+               return -EIO;
+       }
+       return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return 0;
+
+       err = qlcnic_set_default_offload_settings(adapter);
+       if (err)
+               return err;
+
+       err = qlcnic_reset_npar_config(adapter);
+       if (err)
+               return err;
+
+       qlcnic_dev_set_npar_ready(adapter);
+
+       return err;
+}
+
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       err = qlcnic_can_start_firmware(adapter);
+       if (err < 0)
+               return err;
+       else if (!err)
+               goto check_fw_status;
+
+       if (qlcnic_load_fw_file)
+               qlcnic_request_firmware(adapter);
+       else {
+               err = qlcnic_check_flash_fw_ver(adapter);
+               if (err)
+                       goto err_out;
+
+               adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
+       }
+
+       err = qlcnic_need_fw_reset(adapter);
+       if (err == 0)
+               goto check_fw_status;
+
+       err = qlcnic_pinit_from_rom(adapter);
+       if (err)
+               goto err_out;
+
+       err = qlcnic_load_firmware(adapter);
+       if (err)
+               goto err_out;
+
+       qlcnic_release_firmware(adapter);
+       QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
+
+check_fw_status:
+       err = qlcnic_check_fw_status(adapter);
+       if (err)
+               goto err_out;
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+       qlcnic_idc_debug_info(adapter, 1);
+       err = qlcnic_check_eswitch_mode(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Memory allocation failed for eswitch\n");
+               goto err_out;
+       }
+       err = qlcnic_set_mgmt_operations(adapter);
+       if (err)
+               goto err_out;
+
+       qlcnic_check_options(adapter);
+       adapter->need_fw_reset = 0;
+
+       qlcnic_release_firmware(adapter);
+       return 0;
+
+err_out:
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+       dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
+       qlcnic_release_firmware(adapter);
+       return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+       irq_handler_t handler;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       int err, ring, num_sds_rings;
+
+       unsigned long flags = 0;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               if (qlcnic_82xx_check(adapter))
+                       handler = qlcnic_tmp_intr;
+               else
+                       handler = qlcnic_83xx_tmp_intr;
+               if (!QLCNIC_IS_MSI_FAMILY(adapter))
+                       flags |= IRQF_SHARED;
+
+       } else {
+               if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                       handler = qlcnic_msix_intr;
+               else if (adapter->flags & QLCNIC_MSI_ENABLED)
+                       handler = qlcnic_msi_intr;
+               else {
+                       flags |= IRQF_SHARED;
+                       if (qlcnic_82xx_check(adapter))
+                               handler = qlcnic_intr;
+                       else
+                               handler = qlcnic_83xx_intr;
+               }
+       }
+       adapter->irq = netdev->irq;
+
+       if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+               if (qlcnic_82xx_check(adapter) ||
+                   (qlcnic_83xx_check(adapter) &&
+                    (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+                       num_sds_rings = adapter->drv_sds_rings;
+                       for (ring = 0; ring < num_sds_rings; ring++) {
+                               sds_ring = &recv_ctx->sds_rings[ring];
+                               if (qlcnic_82xx_check(adapter) &&
+                                   !qlcnic_check_multi_tx(adapter) &&
+                                   (ring == (num_sds_rings - 1))) {
+                                       if (!(adapter->flags &
+                                             QLCNIC_MSIX_ENABLED))
+                                               snprintf(sds_ring->name,
+                                                        sizeof(sds_ring->name),
+                                                        "qlcnic");
+                                       else
+                                               snprintf(sds_ring->name,
+                                                        sizeof(sds_ring->name),
+                                                        "%s-tx-0-rx-%d",
+                                                        netdev->name, ring);
+                               } else {
+                                       snprintf(sds_ring->name,
+                                                sizeof(sds_ring->name),
+                                                "%s-rx-%d",
+                                                netdev->name, ring);
+                               }
+                               err = request_irq(sds_ring->irq, handler, flags,
+                                                 sds_ring->name, sds_ring);
+                               if (err)
+                                       return err;
+                       }
+               }
+               if ((qlcnic_82xx_check(adapter) &&
+                    qlcnic_check_multi_tx(adapter)) ||
+                   (qlcnic_83xx_check(adapter) &&
+                    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+                    !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
+                       handler = qlcnic_msix_tx_intr;
+                       for (ring = 0; ring < adapter->drv_tx_rings;
+                            ring++) {
+                               tx_ring = &adapter->tx_ring[ring];
+                               snprintf(tx_ring->name, sizeof(tx_ring->name),
+                                        "%s-tx-%d", netdev->name, ring);
+                               err = request_irq(tx_ring->irq, handler, flags,
+                                                 tx_ring->name, tx_ring);
+                               if (err)
+                                       return err;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+               if (qlcnic_82xx_check(adapter) ||
+                   (qlcnic_83xx_check(adapter) &&
+                    (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+                       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+                               sds_ring = &recv_ctx->sds_rings[ring];
+                               free_irq(sds_ring->irq, sds_ring);
+                       }
+               }
+               if ((qlcnic_83xx_check(adapter) &&
+                    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+                   (qlcnic_82xx_check(adapter) &&
+                    qlcnic_check_multi_tx(adapter))) {
+                       for (ring = 0; ring < adapter->drv_tx_rings;
+                            ring++) {
+                               tx_ring = &adapter->tx_ring[ring];
+                               if (tx_ring->irq)
+                                       free_irq(tx_ring->irq, tx_ring);
+                       }
+               }
+       }
+}
+
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
+{
+       u32 capab = 0;
+
+       if (qlcnic_82xx_check(adapter)) {
+               if (adapter->ahw->extra_capability[0] &
+                   QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+                       adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+       } else {
+               capab = adapter->ahw->capabilities;
+               if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+                       adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+       }
+}
+
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err;
+
+       /* Initialize interrupt coalesce parameters */
+       ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+       if (qlcnic_83xx_check(adapter)) {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+               ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+               ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+               err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+       } else {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+               err = qlcnic_82xx_set_rx_coalesce(adapter);
+       }
+
+       return err;
+}
+
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+       int ring;
+       struct qlcnic_host_rds_ring *rds_ring;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return -EIO;
+
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               return 0;
+
+       if (qlcnic_set_eswitch_port_config(adapter))
+               return -EIO;
+
+       qlcnic_get_lro_mss_capability(adapter);
+
+       if (qlcnic_fw_create_ctx(adapter))
+               return -EIO;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+       }
+
+       qlcnic_set_multi(netdev);
+       qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+       adapter->ahw->linkup = 0;
+
+       if (adapter->drv_sds_rings > 1)
+               qlcnic_config_rss(adapter, 1);
+
+       qlcnic_config_def_intr_coalesce(adapter);
+
+       if (netdev->features & NETIF_F_LRO)
+               qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+       set_bit(__QLCNIC_DEV_UP, &adapter->state);
+       qlcnic_napi_enable(adapter);
+
+       qlcnic_linkevent_request(adapter, 1);
+
+       adapter->ahw->reset_context = 0;
+       netif_tx_start_all_queues(netdev);
+       return 0;
+}
+
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+       int err = 0;
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               err = __qlcnic_up(adapter, netdev);
+       rtnl_unlock();
+
+       return err;
+}
+
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+       int ring;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+               return;
+
+       smp_mb();
+       netif_carrier_off(netdev);
+       adapter->ahw->linkup = 0;
+       netif_tx_disable(netdev);
+
+       qlcnic_free_mac_list(adapter);
+
+       if (adapter->fhash.fnum)
+               qlcnic_delete_lb_filters(adapter);
+
+       qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
+
+       qlcnic_napi_disable(adapter);
+
+       qlcnic_fw_destroy_ctx(adapter);
+       adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
+
+       qlcnic_reset_rx_buffers_list(adapter);
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+               qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+       rtnl_lock();
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+       rtnl_unlock();
+
+}
+
+int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       int err;
+
+       if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+               return 0;
+
+       err = qlcnic_napi_add(adapter, netdev);
+       if (err)
+               return err;
+
+       err = qlcnic_alloc_sw_resources(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Error in setting sw resources\n");
+               goto err_out_napi_del;
+       }
+
+       err = qlcnic_alloc_hw_resources(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Error in setting hw resources\n");
+               goto err_out_free_sw;
+       }
+
+       err = qlcnic_request_irq(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "failed to setup interrupt\n");
+               goto err_out_free_hw;
+       }
+
+       qlcnic_create_sysfs_entries(adapter);
+
+       if (qlcnic_encap_rx_offload(adapter))
+               udp_tunnel_get_rx_info(netdev);
+
+       adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+       return 0;
+
+err_out_free_hw:
+       qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+       qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+       qlcnic_napi_del(adapter);
+       return err;
+}
+
+void qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       qlcnic_remove_sysfs_entries(adapter);
+
+       qlcnic_free_hw_resources(adapter);
+       qlcnic_release_rx_buffers(adapter);
+       qlcnic_free_irq(adapter);
+       qlcnic_napi_del(adapter);
+       qlcnic_free_sw_resources(adapter);
+
+       adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       int drv_tx_rings = adapter->drv_tx_rings;
+       int ring;
+
+       clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       qlcnic_disable_sds_intr(adapter, sds_ring);
+               }
+       }
+
+       qlcnic_fw_destroy_ctx(adapter);
+
+       qlcnic_detach(adapter);
+
+       adapter->ahw->diag_test = 0;
+       adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
+
+       if (qlcnic_attach(adapter))
+               goto out;
+
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+out:
+       netif_device_attach(netdev);
+}
+
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err = 0;
+
+       adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+                               GFP_KERNEL);
+       if (!adapter->recv_ctx) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+       if (qlcnic_83xx_check(adapter)) {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+               ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+               ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       } else {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       }
+
+       /* clear stats */
+       memset(&adapter->stats, 0, sizeof(adapter->stats));
+err_out:
+       return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+       kfree(adapter->recv_ctx);
+       adapter->recv_ctx = NULL;
+
+       if (fw_dump->tmpl_hdr) {
+               vfree(fw_dump->tmpl_hdr);
+               fw_dump->tmpl_hdr = NULL;
+       }
+
+       if (fw_dump->dma_buffer) {
+               dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+                                 fw_dump->dma_buffer, fw_dump->phys_addr);
+               fw_dump->dma_buffer = NULL;
+       }
+
+       kfree(adapter->ahw->reset.buff);
+       adapter->ahw->fw_dump.tmpl_hdr = NULL;
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int ring;
+       int ret;
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       qlcnic_detach(adapter);
+
+       adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+       adapter->ahw->diag_test = test;
+       adapter->ahw->linkup = 0;
+
+       ret = qlcnic_attach(adapter);
+       if (ret) {
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       ret = qlcnic_fw_create_ctx(adapter);
+       if (ret) {
+               qlcnic_detach(adapter);
+               netif_device_attach(netdev);
+               return ret;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+               qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+               for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+                       sds_ring = &adapter->recv_ctx->sds_rings[ring];
+                       qlcnic_enable_sds_intr(adapter, sds_ring);
+               }
+       }
+
+       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+               adapter->ahw->loopback_state = 0;
+               qlcnic_linkevent_request(adapter, 1);
+       }
+
+       set_bit(__QLCNIC_DEV_UP, &adapter->state);
+
+       return 0;
+}
+
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       netif_device_detach(netdev);
+
+       qlcnic_down(adapter, netdev);
+
+       qlcnic_up(adapter, netdev);
+
+       netif_device_attach(netdev);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
+       return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+       int err = 0;
+       struct net_device *netdev = adapter->netdev;
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+               netif_device_detach(netdev);
+
+               if (netif_running(netdev))
+                       __qlcnic_down(adapter, netdev);
+
+               qlcnic_detach(adapter);
+
+               if (netif_running(netdev)) {
+                       err = qlcnic_attach(adapter);
+                       if (!err) {
+                               __qlcnic_up(adapter, netdev);
+                               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+                       }
+               }
+
+               netif_device_attach(netdev);
+       }
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return err;
+}
+
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u16 act_pci_fn = ahw->total_nic_func;
+       u16 count;
+
+       ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+       if (act_pci_fn <= 2)
+               count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
+                        act_pci_fn;
+       else
+               count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
+                        act_pci_fn;
+       ahw->max_uc_count = count;
+}
+
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
+int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
+                   int pci_using_dac)
+{
+       int err;
+       struct pci_dev *pdev = adapter->pdev;
+
+       adapter->rx_csum = 1;
+       adapter->ahw->mc_enabled = 0;
+       qlcnic_set_mac_filter_count(adapter);
+
+       netdev->netdev_ops         = &qlcnic_netdev_ops;
+       netdev->watchdog_timeo     = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
+
+       qlcnic_change_mtu(netdev, netdev->mtu);
+
+       netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+               &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
+
+       netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                            NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+                            NETIF_F_HW_VLAN_CTAG_RX);
+       netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+                                 NETIF_F_IPV6_CSUM);
+
+       if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+               netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+               netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+       }
+
+       if (pci_using_dac) {
+               netdev->features |= NETIF_F_HIGHDMA;
+               netdev->vlan_features |= NETIF_F_HIGHDMA;
+       }
+
+       if (qlcnic_vlan_tx_check(adapter))
+               netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
+
+       if (qlcnic_sriov_vf_check(adapter))
+               netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+               netdev->features |= NETIF_F_LRO;
+
+       if (qlcnic_encap_tx_offload(adapter)) {
+               netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+               /* encapsulation Tx offload supported by Adapter */
+               netdev->hw_enc_features = NETIF_F_IP_CSUM        |
+                                         NETIF_F_GSO_UDP_TUNNEL |
+                                         NETIF_F_TSO            |
+                                         NETIF_F_TSO6;
+       }
+
+       if (qlcnic_encap_rx_offload(adapter))
+               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
+       netdev->hw_features = netdev->features;
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+       netdev->irq = adapter->msix_entries[0].vector;
+
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
+       if (err)
+               return err;
+
+       qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to register net device\n");
+               return err;
+       }
+
+       return 0;
+}
+
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
+{
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+                       !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+               *pci_using_dac = 1;
+       else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+                       !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+               *pci_using_dac = 0;
+       else {
+               dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               if (tx_ring) {
+                       vfree(tx_ring->cmd_buf_arr);
+                       tx_ring->cmd_buf_arr = NULL;
+               }
+       }
+       kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+                         struct net_device *netdev)
+{
+       int ring, vector, index;
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+       tx_ring = kcalloc(adapter->drv_tx_rings,
+                         sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+       if (tx_ring == NULL)
+               return -ENOMEM;
+
+       adapter->tx_ring = tx_ring;
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               tx_ring->num_desc = adapter->num_txd;
+               tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+               cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+               if (cmd_buf_arr == NULL) {
+                       qlcnic_free_tx_rings(adapter);
+                       return -ENOMEM;
+               }
+               tx_ring->cmd_buf_arr = cmd_buf_arr;
+               spin_lock_init(&tx_ring->tx_clean_lock);
+       }
+
+       if (qlcnic_83xx_check(adapter) ||
+           (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       tx_ring->adapter = adapter;
+                       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+                               index = adapter->drv_sds_rings + ring;
+                               vector = adapter->msix_entries[index].vector;
+                               tx_ring->irq = vector;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 fw_cmd = 0;
+
+       if (qlcnic_82xx_check(adapter))
+               fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
+       else if (qlcnic_83xx_check(adapter))
+               fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
+
+       if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
+               qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
+}
+
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+       qlcnic_api_lock(adapter);
+       qlcnic_api_unlock(adapter);
+}
+
+
+static int
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct net_device *netdev = NULL;
+       struct qlcnic_adapter *adapter = NULL;
+       struct qlcnic_hardware_context *ahw;
+       int err, pci_using_dac = -1;
+       char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               err = -ENODEV;
+               goto err_out_disable_pdev;
+       }
+
+       err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+       if (err)
+               goto err_out_disable_pdev;
+
+       err = pci_request_regions(pdev, qlcnic_driver_name);
+       if (err)
+               goto err_out_disable_pdev;
+
+       pci_set_master(pdev);
+       pci_enable_pcie_error_reporting(pdev);
+
+       ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+       if (!ahw) {
+               err = -ENOMEM;
+               goto err_out_free_res;
+       }
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_QLOGIC_QLE824X:
+               ahw->hw_ops = &qlcnic_hw_ops;
+               ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+               break;
+       case PCI_DEVICE_ID_QLOGIC_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_QLE8830:
+       case PCI_DEVICE_ID_QLOGIC_QLE844X:
+               qlcnic_83xx_register_map(ahw);
+               break;
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+               qlcnic_sriov_vf_register_map(ahw);
+               break;
+       default:
+               goto err_out_free_hw_res;
+       }
+
+       err = qlcnic_setup_pci_map(pdev, ahw);
+       if (err)
+               goto err_out_free_hw_res;
+
+       netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+                                  QLCNIC_MAX_TX_RINGS);
+       if (!netdev) {
+               err = -ENOMEM;
+               goto err_out_iounmap;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+
+       adapter = netdev_priv(netdev);
+       adapter->netdev  = netdev;
+       adapter->pdev    = pdev;
+       adapter->ahw = ahw;
+
+       adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+       if (adapter->qlcnic_wq == NULL) {
+               err = -ENOMEM;
+               dev_err(&pdev->dev, "Failed to create workqueue\n");
+               goto err_out_free_netdev;
+       }
+
+       err = qlcnic_alloc_adapter_resources(adapter);
+       if (err)
+               goto err_out_free_wq;
+
+       adapter->dev_rst_time = jiffies;
+       ahw->revision_id = pdev->revision;
+       ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
+       if (qlcnic_mac_learn == FDB_MAC_LEARN)
+               adapter->fdb_mac_learn = true;
+       else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+               adapter->drv_mac_learn = true;
+
+       rwlock_init(&adapter->ahw->crb_lock);
+       mutex_init(&adapter->ahw->mem_lock);
+
+       INIT_LIST_HEAD(&adapter->mac_list);
+
+       qlcnic_register_dcb(adapter);
+
+       if (qlcnic_82xx_check(adapter)) {
+               qlcnic_check_vf(adapter, ent);
+               adapter->portnum = adapter->ahw->pci_func;
+               qlcnic_reset_api_lock(adapter);
+               err = qlcnic_start_firmware(adapter);
+               if (err) {
+                       dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+                               "\t\tIf reboot doesn't help, try flashing the card\n");
+                       goto err_out_maintenance_mode;
+               }
+
+               /* compute and set default and max tx/sds rings */
+               if (adapter->ahw->msix_supported) {
+                       if (qlcnic_check_multi_tx_capability(adapter) == 1)
+                               qlcnic_set_tx_ring_count(adapter,
+                                                        QLCNIC_SINGLE_RING);
+                       else
+                               qlcnic_set_tx_ring_count(adapter,
+                                                        QLCNIC_DEF_TX_RINGS);
+                       qlcnic_set_sds_ring_count(adapter,
+                                                 QLCNIC_DEF_SDS_RINGS);
+               } else {
+                       qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+                       qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+               }
+
+               err = qlcnic_setup_idc_param(adapter);
+               if (err)
+                       goto err_out_free_hw;
+
+               adapter->flags |= QLCNIC_NEED_FLR;
+
+       } else if (qlcnic_83xx_check(adapter)) {
+               qlcnic_83xx_check_vf(adapter, ent);
+               adapter->portnum = adapter->ahw->pci_func;
+               err = qlcnic_83xx_init(adapter, pci_using_dac);
+               if (err) {
+                       switch (err) {
+                       case -ENOTRECOVERABLE:
+                               dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+                               dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
+                               goto err_out_free_hw;
+                       case -ENOMEM:
+                               dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+                               goto err_out_free_hw;
+                       case -EOPNOTSUPP:
+                               dev_err(&pdev->dev, "Adapter initialization failed\n");
+                               goto err_out_free_hw;
+                       default:
+                               dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
+                               goto err_out_maintenance_mode;
+                       }
+               }
+
+               if (qlcnic_sriov_vf_check(adapter))
+                       return 0;
+       } else {
+               dev_err(&pdev->dev,
+                       "%s: failed. Please Reboot\n", __func__);
+               err = -ENODEV;
+               goto err_out_free_hw;
+       }
+
+       if (qlcnic_read_mac_addr(adapter))
+               dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+       qlcnic_read_phys_port_id(adapter);
+
+       if (adapter->portnum == 0) {
+               qlcnic_get_board_name(adapter, board_name);
+
+               pr_info("%s: %s Board Chip rev 0x%x\n",
+                       module_name(THIS_MODULE),
+                       board_name, adapter->ahw->revision_id);
+       }
+
+       if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+           !!qlcnic_use_msi)
+               dev_warn(&pdev->dev,
+                        "Device does not support MSI interrupts\n");
+
+       if (qlcnic_82xx_check(adapter)) {
+               qlcnic_dcb_enable(adapter->dcb);
+               qlcnic_dcb_get_info(adapter->dcb);
+               err = qlcnic_setup_intr(adapter);
+
+               if (err) {
+                       dev_err(&pdev->dev, "Failed to setup interrupt\n");
+                       goto err_out_disable_msi;
+               }
+       }
+
+       err = qlcnic_get_act_pci_func(adapter);
+       if (err)
+               goto err_out_disable_mbx_intr;
+
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
+
+       err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
+       if (err)
+               goto err_out_disable_mbx_intr;
+
+       pci_set_drvdata(pdev, adapter);
+
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+                                    FW_POLL_DELAY);
+
+       switch (adapter->ahw->port_type) {
+       case QLCNIC_GBE:
+               dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+                               adapter->netdev->name);
+               break;
+       case QLCNIC_XGBE:
+               dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+                               adapter->netdev->name);
+               break;
+       }
+
+       if (adapter->drv_mac_learn)
+               qlcnic_alloc_lb_filters_mem(adapter);
+
+       qlcnic_add_sysfs(adapter);
+       qlcnic_register_hwmon_dev(adapter);
+       return 0;
+
+err_out_disable_mbx_intr:
+       if (qlcnic_83xx_check(adapter))
+               qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+       qlcnic_teardown_intr(adapter);
+       qlcnic_cancel_idc_work(adapter);
+       qlcnic_clr_all_drv_state(adapter, 0);
+
+err_out_free_hw:
+       qlcnic_free_adapter_resources(adapter);
+
+err_out_free_wq:
+       destroy_workqueue(adapter->qlcnic_wq);
+
+err_out_free_netdev:
+       free_netdev(netdev);
+
+err_out_iounmap:
+       qlcnic_cleanup_pci_map(ahw);
+
+err_out_free_hw_res:
+       kfree(ahw);
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+       return err;
+
+err_out_maintenance_mode:
+       set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
+       netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+       netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
+       ahw->port_type = QLCNIC_XGBE;
+
+       if (qlcnic_83xx_check(adapter))
+               adapter->tgt_status_reg = NULL;
+       else
+               ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
+       err = register_netdev(netdev);
+
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register net device\n");
+               qlcnic_clr_all_drv_state(adapter, 0);
+               goto err_out_free_hw;
+       }
+
+       pci_set_drvdata(pdev, adapter);
+       qlcnic_add_sysfs(adapter);
+
+       return 0;
+}
+
+static void qlcnic_remove(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter;
+       struct net_device *netdev;
+       struct qlcnic_hardware_context *ahw;
+
+       adapter = pci_get_drvdata(pdev);
+       if (adapter == NULL)
+               return;
+
+       netdev = adapter->netdev;
+
+       qlcnic_cancel_idc_work(adapter);
+       qlcnic_sriov_pf_disable(adapter);
+       ahw = adapter->ahw;
+
+       unregister_netdev(netdev);
+       qlcnic_sriov_cleanup(adapter);
+
+       if (qlcnic_83xx_check(adapter)) {
+               qlcnic_83xx_initialize_nic(adapter, 0);
+               cancel_delayed_work_sync(&adapter->idc_aen_work);
+               qlcnic_83xx_free_mbx_intr(adapter);
+               qlcnic_83xx_detach_mailbox_work(adapter);
+               qlcnic_83xx_free_mailbox(ahw->mailbox);
+               kfree(ahw->fw_info);
+       }
+
+       qlcnic_dcb_free(adapter->dcb);
+       qlcnic_detach(adapter);
+       kfree(adapter->npars);
+       kfree(adapter->eswitch);
+
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_clr_all_drv_state(adapter, 0);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       qlcnic_free_lb_filters_mem(adapter);
+
+       qlcnic_teardown_intr(adapter);
+
+       qlcnic_remove_sysfs(adapter);
+
+       qlcnic_unregister_hwmon_dev(adapter);
+
+       qlcnic_cleanup_pci_map(adapter->ahw);
+
+       qlcnic_release_firmware(adapter);
+
+       pci_disable_pcie_error_reporting(pdev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+
+       if (adapter->qlcnic_wq) {
+               destroy_workqueue(adapter->qlcnic_wq);
+               adapter->qlcnic_wq = NULL;
+       }
+
+       qlcnic_free_adapter_resources(adapter);
+       kfree(ahw);
+       free_netdev(netdev);
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+       if (__qlcnic_shutdown(pdev))
+               return;
+
+       pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       int retval;
+
+       retval = __qlcnic_shutdown(pdev);
+       if (retval)
+               return retval;
+
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       return 0;
+}
+
+static int qlcnic_resume(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       return  __qlcnic_resume(adapter);
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int err;
+
+       if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+               netdev_err(netdev, "%s: Device is in non-operational state\n",
+                          __func__);
+
+               return -EIO;
+       }
+
+       netif_carrier_off(netdev);
+
+       err = qlcnic_attach(adapter);
+       if (err)
+               return err;
+
+       err = __qlcnic_up(adapter, netdev);
+       if (err)
+               qlcnic_detach(adapter);
+
+       return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       __qlcnic_down(adapter, netdev);
+
+       return 0;
+}
+
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+       void *head;
+       int i;
+       struct net_device *netdev = adapter->netdev;
+       u32 filter_size = 0;
+       u16 act_pci_func = 0;
+
+       if (adapter->fhash.fmax && adapter->fhash.fhead)
+               return;
+
+       act_pci_func = adapter->ahw->total_nic_func;
+       spin_lock_init(&adapter->mac_learn_lock);
+       spin_lock_init(&adapter->rx_mac_learn_lock);
+
+       if (qlcnic_sriov_vf_check(adapter)) {
+               filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+               adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+       } else if (qlcnic_82xx_check(adapter)) {
+               filter_size = QLCNIC_LB_MAX_FILTERS;
+               adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+       } else {
+               filter_size = QLC_83XX_LB_MAX_FILTERS;
+               adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+       }
+
+       head = kcalloc(adapter->fhash.fbucket_size,
+                      sizeof(struct hlist_head), GFP_ATOMIC);
+
+       if (!head)
+               return;
+
+       adapter->fhash.fmax = (filter_size / act_pci_func);
+       adapter->fhash.fhead = head;
+
+       netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+                   act_pci_func, adapter->fhash.fmax);
+
+       for (i = 0; i < adapter->fhash.fbucket_size; i++)
+               INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+       adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+       head = kcalloc(adapter->rx_fhash.fbucket_size,
+                      sizeof(struct hlist_head), GFP_ATOMIC);
+
+       if (!head)
+               return;
+
+       adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+       adapter->rx_fhash.fhead = head;
+
+       for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+               INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+       if (adapter->fhash.fmax)
+               kfree(adapter->fhash.fhead);
+
+       adapter->fhash.fhead = NULL;
+       adapter->fhash.fmax = 0;
+
+       if (adapter->rx_fhash.fmax)
+               kfree(adapter->rx_fhash.fhead);
+
+       adapter->rx_fhash.fmax = 0;
+       adapter->rx_fhash.fhead = NULL;
+}
+
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       u32 temp_state, temp_val, temp = 0;
+       int rv = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
+       if (qlcnic_82xx_check(adapter))
+               temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+       temp_state = qlcnic_get_temp_state(temp);
+       temp_val = qlcnic_get_temp_val(temp);
+
+       if (temp_state == QLCNIC_TEMP_PANIC) {
+               dev_err(&netdev->dev,
+                      "Device temperature %d degrees C exceeds"
+                      " maximum allowed. Hardware has been shut down.\n",
+                      temp_val);
+               rv = 1;
+       } else if (temp_state == QLCNIC_TEMP_WARN) {
+               if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
+                       dev_err(&netdev->dev,
+                              "Device temperature %d degrees C "
+                              "exceeds operating range."
+                              " Immediate action needed.\n",
+                              temp_val);
+               }
+       } else {
+               if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
+                       dev_info(&netdev->dev,
+                              "Device temperature is now %d degrees C"
+                              " in normal range.\n", temp_val);
+               }
+       }
+       adapter->ahw->temp = temp_state;
+       return rv;
+}
+
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
+{
+       int i;
+       struct cmd_desc_type0 *tx_desc_info;
+
+       for (i = 0; i < tx_ring->num_desc; i++) {
+               tx_desc_info = &tx_ring->desc_head[i];
+               pr_info("TX Desc: %d\n", i);
+               print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+                              &tx_ring->desc_head[i],
+                              sizeof(struct cmd_desc_type0), true);
+       }
+}
+
+static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
+       int ring;
+
+       if (!netdev || !netif_running(netdev))
+               return;
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               if (!rds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n",
+                            ring, readl(rds_ring->crb_rcv_producer),
+                            rds_ring->producer, rds_ring->num_desc);
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &(recv_ctx->sds_rings[ring]);
+               if (!sds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n",
+                           ring, readl(sds_ring->crb_sts_consumer),
+                           sds_ring->consumer, readl(sds_ring->crb_intr_mask),
+                           sds_ring->num_desc);
+       }
+
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               if (!tx_ring)
+                       continue;
+               netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+                           ring, tx_ring->ctx_id);
+               netdev_info(netdev,
+                           "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+                           tx_ring->tx_stats.xmit_finished,
+                           tx_ring->tx_stats.xmit_called,
+                           tx_ring->tx_stats.xmit_on,
+                           tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
+               netdev_info(netdev,
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+                           readl(tx_ring->crb_cmd_producer),
+                           tx_ring->producer, tx_ring->sw_consumer,
+                           le32_to_cpu(*(tx_ring->hw_consumer)));
+
+               netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+                           tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+               if (netif_msg_tx_err(adapter->ahw))
+                       dump_tx_ring_desc(tx_ring);
+       }
+
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+               return;
+
+       qlcnic_dump_rings(adapter);
+
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS ||
+           netif_msg_tx_err(adapter->ahw)) {
+               netdev_err(netdev, "Tx timeout, reset the adapter.\n");
+               if (qlcnic_82xx_check(adapter))
+                       adapter->need_fw_reset = 1;
+               else if (qlcnic_83xx_check(adapter))
+                       qlcnic_83xx_idc_request_reset(adapter,
+                                                     QLCNIC_FORCE_FW_DUMP_KEY);
+       } else {
+               netdev_err(netdev, "Tx timeout, reset adapter context.\n");
+               adapter->ahw->reset_context = 1;
+       }
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct net_device_stats *stats = &netdev->stats;
+
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               qlcnic_update_stats(adapter);
+
+       stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+       stats->tx_packets = adapter->stats.xmitfinished;
+       stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+       stats->tx_bytes = adapter->stats.txbytes;
+       stats->rx_dropped = adapter->stats.rxdropped;
+       stats->tx_dropped = adapter->stats.txdropped;
+
+       return stats;
+}
+
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+       u32 status;
+
+       status = readl(adapter->isr_int_vec);
+
+       if (!(status & adapter->ahw->int_vec_bit))
+               return IRQ_NONE;
+
+       /* check interrupt state machine, to be sure */
+       status = readl(adapter->crb_int_state_reg);
+       if (!ISR_LEGACY_INT_TRIGGERED(status))
+               return IRQ_NONE;
+
+       writel(0xffffffff, adapter->tgt_status_reg);
+       /* read twice to ensure write is flushed */
+       readl(adapter->isr_int_vec);
+       readl(adapter->isr_int_vec);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               goto done;
+       else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+               writel(0xffffffff, adapter->tgt_status_reg);
+               goto done;
+       }
+
+       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
+
+done:
+       adapter->ahw->diag_cnt++;
+       qlcnic_enable_sds_intr(adapter, sds_ring);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
+
+       napi_schedule(&sds_ring->napi);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       /* clear interrupt */
+       writel(0xffffffff, adapter->tgt_status_reg);
+
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+       struct qlcnic_host_sds_ring *sds_ring = data;
+
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+       struct qlcnic_host_tx_ring *tx_ring = data;
+
+       napi_schedule(&tx_ring->napi);
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
+       int ring;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               return;
+
+       recv_ctx = adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_disable_sds_intr(adapter, sds_ring);
+               napi_schedule(&sds_ring->napi);
+       }
+
+       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+               /* Only Multi-Tx queue capable devices need to
+                * schedule NAPI for TX rings
+                */
+               if ((qlcnic_83xx_check(adapter) &&
+                    (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+                   (qlcnic_82xx_check(adapter) &&
+                    !qlcnic_check_multi_tx(adapter)))
+                       return;
+
+               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       qlcnic_disable_tx_intr(adapter, tx_ring);
+                       napi_schedule(&tx_ring->napi);
+               }
+       }
+}
+#endif
+
+static void
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+       u32 val;
+
+       val = adapter->portnum & 0xf;
+       val |= encoding << 7;
+       val |= (jiffies - adapter->dev_rst_time) << 8;
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+       adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
+{
+       u32  val;
+
+       WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+                       state != QLCNIC_DEV_NEED_QUISCENT);
+
+       if (qlcnic_api_lock(adapter))
+               return -EIO;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+       if (state == QLCNIC_DEV_NEED_RESET)
+               QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+       else if (state == QLCNIC_DEV_NEED_QUISCENT)
+               QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+       qlcnic_api_unlock(adapter);
+
+       return 0;
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+       u32  val;
+
+       if (qlcnic_api_lock(adapter))
+               return -EBUSY;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+       QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+       qlcnic_api_unlock(adapter);
+
+       return 0;
+}
+
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+{
+       u32  val;
+
+       if (qlcnic_api_lock(adapter))
+               goto err;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+       QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+
+       if (failed) {
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                   QLCNIC_DEV_FAILED);
+               dev_info(&adapter->pdev->dev,
+                               "Device state set to Failed. Please Reboot\n");
+       } else if (!(val & 0x11111111))
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                   QLCNIC_DEV_COLD);
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+       QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+       qlcnic_api_unlock(adapter);
+err:
+       adapter->fw_fail_cnt = 0;
+       adapter->flags &= ~QLCNIC_FW_HANG;
+       clear_bit(__QLCNIC_START_FW, &adapter->state);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/* Grab api lock, before checking state */
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+       int act, state, active_mask;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+       act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+
+       if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
+               active_mask = (~(1 << (ahw->pci_func * 4)));
+               act = act & active_mask;
+       }
+
+       if (((state & 0x11111111) == (act & 0x11111111)) ||
+                       ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+               return 0;
+       else
+               return 1;
+}
+
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+       u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+       if (val != QLCNIC_DRV_IDC_VER) {
+               dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+                       " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+       }
+
+       return 0;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+       u32 val, prev_state;
+       u8 dev_init_timeo = adapter->dev_init_timeo;
+       u8 portnum = adapter->portnum;
+       u8 ret;
+
+       if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+               return 1;
+
+       if (qlcnic_api_lock(adapter))
+               return -1;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+       if (!(val & (1 << (portnum * 4)))) {
+               QLC_DEV_SET_REF_CNT(val, portnum);
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+       }
+
+       prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       QLCDB(adapter, HW, "Device state = %u\n", prev_state);
+
+       switch (prev_state) {
+       case QLCNIC_DEV_COLD:
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                   QLCNIC_DEV_INITIALIZING);
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+                                   QLCNIC_DRV_IDC_VER);
+               qlcnic_idc_debug_info(adapter, 0);
+               qlcnic_api_unlock(adapter);
+               return 1;
+
+       case QLCNIC_DEV_READY:
+               ret = qlcnic_check_idc_ver(adapter);
+               qlcnic_api_unlock(adapter);
+               return ret;
+
+       case QLCNIC_DEV_NEED_RESET:
+               val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+               QLC_DEV_SET_RST_RDY(val, portnum);
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+               break;
+
+       case QLCNIC_DEV_NEED_QUISCENT:
+               val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+               QLC_DEV_SET_QSCNT_RDY(val, portnum);
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+               break;
+
+       case QLCNIC_DEV_FAILED:
+               dev_err(&adapter->pdev->dev, "Device in failed state.\n");
+               qlcnic_api_unlock(adapter);
+               return -1;
+
+       case QLCNIC_DEV_INITIALIZING:
+       case QLCNIC_DEV_QUISCENT:
+               break;
+       }
+
+       qlcnic_api_unlock(adapter);
+
+       do {
+               msleep(1000);
+               prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+               if (prev_state == QLCNIC_DEV_QUISCENT)
+                       continue;
+       } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
+
+       if (!dev_init_timeo) {
+               dev_err(&adapter->pdev->dev,
+                       "Waiting for device to initialize timeout\n");
+               return -1;
+       }
+
+       if (qlcnic_api_lock(adapter))
+               return -1;
+
+       val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+       QLC_DEV_CLR_RST_QSCNT(val, portnum);
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+       ret = qlcnic_check_idc_ver(adapter);
+       qlcnic_api_unlock(adapter);
+
+       return ret;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter = container_of(work,
+                       struct qlcnic_adapter, fw_work.work);
+       u32 dev_state = 0xf;
+       u32 val;
+
+       if (qlcnic_api_lock(adapter))
+               goto err_ret;
+
+       dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (dev_state == QLCNIC_DEV_QUISCENT ||
+           dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+               qlcnic_api_unlock(adapter);
+               qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+                                               FW_POLL_DELAY * 2);
+               return;
+       }
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               qlcnic_api_unlock(adapter);
+               goto wait_npar;
+       }
+
+       if (dev_state == QLCNIC_DEV_INITIALIZING ||
+           dev_state == QLCNIC_DEV_READY) {
+               dev_info(&adapter->pdev->dev, "Detected state change from "
+                               "DEV_NEED_RESET, skipping ack check\n");
+               goto skip_ack_check;
+       }
+
+       if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+               dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+                                       adapter->reset_ack_timeo);
+               goto skip_ack_check;
+       }
+
+       if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+               dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+               if (dev_state == QLCNIC_DEV_NEED_RESET) {
+                       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                           QLCNIC_DEV_INITIALIZING);
+                       set_bit(__QLCNIC_START_FW, &adapter->state);
+                       QLCDB(adapter, DRV, "Restarting fw\n");
+                       qlcnic_idc_debug_info(adapter, 0);
+                       val = QLC_SHARED_REG_RD32(adapter,
+                                                 QLCNIC_CRB_DRV_STATE);
+                       QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+                       QLC_SHARED_REG_WR32(adapter,
+                                           QLCNIC_CRB_DRV_STATE, val);
+               }
+
+               qlcnic_api_unlock(adapter);
+
+               rtnl_lock();
+               if (qlcnic_check_fw_dump_state(adapter) &&
+                   (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+                       QLCDB(adapter, DRV, "Take FW dump\n");
+                       qlcnic_dump_fw(adapter);
+                       adapter->flags |= QLCNIC_FW_HANG;
+               }
+               rtnl_unlock();
+
+               adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+               if (!adapter->nic_ops->start_firmware(adapter)) {
+                       qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+                       adapter->fw_wait_cnt = 0;
+                       return;
+               }
+               goto err_ret;
+       }
+
+       qlcnic_api_unlock(adapter);
+
+wait_npar:
+       dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
+       switch (dev_state) {
+       case QLCNIC_DEV_READY:
+               if (!qlcnic_start_firmware(adapter)) {
+                       qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+                       adapter->fw_wait_cnt = 0;
+                       return;
+               }
+       case QLCNIC_DEV_FAILED:
+               break;
+       default:
+               qlcnic_schedule_work(adapter,
+                       qlcnic_fwinit_work, FW_POLL_DELAY);
+               return;
+       }
+
+err_ret:
+       dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+               "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+       netif_device_attach(adapter->netdev);
+       qlcnic_clr_all_drv_state(adapter, 0);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter = container_of(work,
+                       struct qlcnic_adapter, fw_work.work);
+       struct net_device *netdev = adapter->netdev;
+       u32 status;
+
+       netif_device_detach(netdev);
+
+       /* Dont grab rtnl lock during Quiscent mode */
+       if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+               if (netif_running(netdev))
+                       __qlcnic_down(adapter, netdev);
+       } else
+               qlcnic_down(adapter, netdev);
+
+       status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+       if (status & QLCNIC_RCODE_FATAL_ERROR) {
+               dev_err(&adapter->pdev->dev,
+                       "Detaching the device: peg halt status1=0x%x\n",
+                                       status);
+
+               if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+                       dev_err(&adapter->pdev->dev,
+                       "On board active cooling fan failed. "
+                               "Device has been halted.\n");
+                       dev_err(&adapter->pdev->dev,
+                               "Replace the adapter.\n");
+               }
+
+               goto err_ret;
+       }
+
+       if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
+               dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
+                       adapter->ahw->temp);
+               goto err_ret;
+       }
+
+       /* Dont ack if this instance is the reset owner */
+       if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+               if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to set driver state,"
+                                       "detaching the device.\n");
+                       goto err_ret;
+               }
+       }
+
+       adapter->fw_wait_cnt = 0;
+
+       qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+       return;
+
+err_ret:
+       netif_device_attach(netdev);
+       qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+       u32 state;
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+       if (state == QLCNIC_DEV_NPAR_NON_OPER)
+               return;
+
+       if (qlcnic_api_lock(adapter))
+               return;
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+                           QLCNIC_DEV_NPAR_NON_OPER);
+       qlcnic_api_unlock(adapter);
+}
+
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+                                         u32 key)
+{
+       u32 state, xg_val = 0, gb_val = 0;
+
+       qlcnic_xg_set_xg0_mask(xg_val);
+       qlcnic_xg_set_xg1_mask(xg_val);
+       QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
+       qlcnic_gb_set_gb0_mask(gb_val);
+       qlcnic_gb_set_gb1_mask(gb_val);
+       qlcnic_gb_set_gb2_mask(gb_val);
+       qlcnic_gb_set_gb3_mask(gb_val);
+       QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
+       dev_info(&adapter->pdev->dev, "Pause control frames disabled"
+                               " on all ports\n");
+       adapter->need_fw_reset = 1;
+
+       if (qlcnic_api_lock(adapter))
+               return;
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+       if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+               netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
+                          __func__);
+               qlcnic_api_unlock(adapter);
+
+               return;
+       }
+
+       if (state == QLCNIC_DEV_READY) {
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                   QLCNIC_DEV_NEED_RESET);
+               adapter->flags |= QLCNIC_FW_RESET_OWNER;
+               QLCDB(adapter, DRV, "NEED_RESET state set\n");
+               qlcnic_idc_debug_info(adapter, 0);
+       }
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+                           QLCNIC_DEV_NPAR_NON_OPER);
+       qlcnic_api_unlock(adapter);
+}
+
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+       if (qlcnic_api_lock(adapter))
+               return;
+
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+                           QLCNIC_DEV_NPAR_OPER);
+       QLCDB(adapter, DRV, "NPAR operational state set\n");
+
+       qlcnic_api_unlock(adapter);
+}
+
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+                         work_func_t func, int delay)
+{
+       if (test_bit(__QLCNIC_AER, &adapter->state))
+               return;
+
+       INIT_DELAYED_WORK(&adapter->fw_work, func);
+       queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+                          round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter = container_of(work,
+                               struct qlcnic_adapter, fw_work.work);
+       struct net_device *netdev = adapter->netdev;
+       u32 npar_state;
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+               npar_state = QLC_SHARED_REG_RD32(adapter,
+                                                QLCNIC_CRB_DEV_NPAR_STATE);
+               if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+                       qlcnic_clr_all_drv_state(adapter, 0);
+               else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+                       qlcnic_schedule_work(adapter, qlcnic_attach_work,
+                                                       FW_POLL_DELAY);
+               else
+                       goto attach;
+               QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+               return;
+       }
+attach:
+       qlcnic_dcb_get_info(adapter->dcb);
+
+       if (netif_running(netdev)) {
+               if (qlcnic_up(adapter, netdev))
+                       goto done;
+
+               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+done:
+       netif_device_attach(netdev);
+       adapter->fw_fail_cnt = 0;
+       adapter->flags &= ~QLCNIC_FW_HANG;
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
+
+       if (!qlcnic_clr_drv_state(adapter))
+               qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+                                                       FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+       u32 state = 0, heartbeat;
+       u32 peg_status;
+       int err = 0;
+
+       if (qlcnic_check_temp(adapter))
+               goto detach;
+
+       if (adapter->need_fw_reset)
+               qlcnic_dev_request_reset(adapter, 0);
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_NEED_RESET) {
+               qlcnic_set_npar_non_operational(adapter);
+               adapter->need_fw_reset = 1;
+       } else if (state == QLCNIC_DEV_NEED_QUISCENT)
+               goto detach;
+
+       heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+       if (heartbeat != adapter->heartbeat) {
+               adapter->heartbeat = heartbeat;
+               adapter->fw_fail_cnt = 0;
+               if (adapter->need_fw_reset)
+                       goto detach;
+
+               if (ahw->reset_context && qlcnic_auto_fw_reset)
+                       qlcnic_reset_hw_context(adapter);
+
+               return 0;
+       }
+
+       if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+               return 0;
+
+       adapter->flags |= QLCNIC_FW_HANG;
+
+       qlcnic_dev_request_reset(adapter, 0);
+
+       if (qlcnic_auto_fw_reset)
+               clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+       dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+       peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+       dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+                       "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+                       "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+                       "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+                       "PEG_NET_4_PC: 0x%x\n",
+                       peg_status,
+                       QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
+       if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+               dev_err(&adapter->pdev->dev,
+                       "Firmware aborted with error code 0x00006700. "
+                               "Device is being reset.\n");
+detach:
+       adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+               QLCNIC_DEV_NEED_RESET;
+
+       if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
+                                                     &adapter->state)) {
+
+               qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+               QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+       } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+                  adapter->flags & QLCNIC_FW_RESET_OWNER) {
+               qlcnic_dump_fw(adapter);
+       }
+
+       return 1;
+}
+
+void qlcnic_fw_poll_work(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter = container_of(work,
+                               struct qlcnic_adapter, fw_work.work);
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+               goto reschedule;
+
+
+       if (qlcnic_check_health(adapter))
+               return;
+
+       if (adapter->fhash.fnum)
+               qlcnic_prune_lb_filters(adapter);
+
+reschedule:
+       qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static int qlcnic_is_first_func(struct pci_dev *pdev)
+{
+       struct pci_dev *oth_pdev;
+       int val = pdev->devfn;
+
+       while (val-- > 0) {
+               oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
+                       (pdev->bus), pdev->bus->number,
+                       PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
+               if (!oth_pdev)
+                       continue;
+
+               if (oth_pdev->current_state != PCI_D3cold) {
+                       pci_dev_put(oth_pdev);
+                       return 0;
+               }
+               pci_dev_put(oth_pdev);
+       }
+       return 1;
+}
+
+static int qlcnic_attach_func(struct pci_dev *pdev)
+{
+       int err, first_func;
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+
+       pdev->error_state = pci_channel_io_normal;
+
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       first_func = qlcnic_is_first_func(pdev);
+
+       if (qlcnic_api_lock(adapter))
+               return -EINVAL;
+
+       if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+               adapter->need_fw_reset = 1;
+               set_bit(__QLCNIC_START_FW, &adapter->state);
+               QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+                                   QLCNIC_DEV_INITIALIZING);
+               QLCDB(adapter, DRV, "Restarting fw\n");
+       }
+       qlcnic_api_unlock(adapter);
+
+       err = qlcnic_start_firmware(adapter);
+       if (err)
+               return err;
+
+       qlcnic_clr_drv_state(adapter);
+       kfree(adapter->msix_entries);
+       adapter->msix_entries = NULL;
+       err = qlcnic_setup_intr(adapter);
+
+       if (err) {
+               kfree(adapter->msix_entries);
+               netdev_err(netdev, "failed to setup interrupt\n");
+               return err;
+       }
+
+       if (netif_running(netdev)) {
+               err = qlcnic_attach(adapter);
+               if (err) {
+                       qlcnic_clr_all_drv_state(adapter, 1);
+                       clear_bit(__QLCNIC_AER, &adapter->state);
+                       netif_device_attach(netdev);
+                       return err;
+               }
+
+               err = qlcnic_up(adapter, netdev);
+               if (err)
+                       goto done;
+
+               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+ done:
+       netif_device_attach(netdev);
+       return err;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+                                                     pci_channel_state_t state)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       if (state == pci_channel_io_normal)
+               return PCI_ERS_RESULT_RECOVERED;
+
+       set_bit(__QLCNIC_AER, &adapter->state);
+       netif_device_detach(netdev);
+
+       cancel_delayed_work_sync(&adapter->fw_work);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_detach(adapter);
+       qlcnic_teardown_intr(adapter);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+{
+       pci_ers_result_t res;
+
+       rtnl_lock();
+       res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+                                        PCI_ERS_RESULT_RECOVERED;
+       rtnl_unlock();
+
+       return res;
+}
+
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+{
+       u32 state;
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+                                                           &adapter->state))
+               qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+                                    FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+                                                pci_channel_state_t state)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+       if (hw_ops->io_error_detected) {
+               return hw_ops->io_error_detected(pdev, state);
+       } else {
+               dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+       if (hw_ops->io_slot_reset) {
+               return hw_ops->io_slot_reset(pdev);
+       } else {
+               dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+       if (hw_ops->io_resume)
+               hw_ops->io_resume(pdev);
+       else
+               dev_err(&pdev->dev, "AER resume handler not registered.\n");
+}
+
+
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       err = qlcnic_can_start_firmware(adapter);
+       if (err)
+               return err;
+
+       err = qlcnic_check_npar_opertional(adapter);
+       if (err)
+               return err;
+
+       err = qlcnic_initialize_nic(adapter);
+       if (err)
+               return err;
+
+       qlcnic_check_options(adapter);
+
+       err = qlcnic_set_eswitch_port_config(adapter);
+       if (err)
+               return err;
+
+       adapter->need_fw_reset = 0;
+
+       return err;
+}
+
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+                         int queue_type)
+{
+       struct net_device *netdev = adapter->netdev;
+       u8 max_hw_rings = 0;
+       char buf[8];
+       int cur_rings;
+
+       if (queue_type == QLCNIC_RX_QUEUE) {
+               max_hw_rings = adapter->max_sds_rings;
+               cur_rings = adapter->drv_sds_rings;
+               strcpy(buf, "SDS");
+       } else if (queue_type == QLCNIC_TX_QUEUE) {
+               max_hw_rings = adapter->max_tx_rings;
+               cur_rings = adapter->drv_tx_rings;
+               strcpy(buf, "Tx");
+       }
+
+       if (!is_power_of_2(ring_cnt)) {
+               netdev_err(netdev, "%s rings value should be a power of 2\n",
+                          buf);
+               return -EINVAL;
+       }
+
+       if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+           !qlcnic_check_multi_tx(adapter)) {
+                       netdev_err(netdev, "No Multi Tx queue support\n");
+                       return -EINVAL;
+       }
+
+       if (ring_cnt > num_online_cpus()) {
+               netdev_err(netdev,
+                          "%s value[%u] should not be higher than, number of online CPUs\n",
+                          buf, num_online_cpus());
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
+       int err;
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
+       netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       qlcnic_detach(adapter);
+
+       if (qlcnic_83xx_check(adapter)) {
+               qlcnic_83xx_free_mbx_intr(adapter);
+               qlcnic_83xx_enable_mbx_poll(adapter);
+       }
+
+       qlcnic_teardown_intr(adapter);
+
+       err = qlcnic_setup_intr(adapter);
+       if (err) {
+               kfree(adapter->msix_entries);
+               netdev_err(netdev, "failed to setup interrupt\n");
+               return err;
+       }
+
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
+
+       if (qlcnic_83xx_check(adapter)) {
+               qlcnic_83xx_initialize_nic(adapter, 1);
+               err = qlcnic_83xx_setup_mbx_intr(adapter);
+               qlcnic_83xx_disable_mbx_poll(adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "failed to setup mbx interrupt\n");
+                       goto done;
+               }
+       }
+
+       if (netif_running(netdev)) {
+               err = qlcnic_attach(adapter);
+               if (err)
+                       goto done;
+               err = __qlcnic_up(adapter, netdev);
+               if (err)
+                       goto done;
+               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+done:
+       netif_device_attach(netdev);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return err;
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static void
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+                       struct net_device *dev, unsigned long event)
+{
+       struct in_device *indev;
+
+       indev = in_dev_get(dev);
+       if (!indev)
+               return;
+
+       for_ifa(indev) {
+               switch (event) {
+               case NETDEV_UP:
+                       qlcnic_config_ipaddr(adapter,
+                                       ifa->ifa_address, QLCNIC_IP_UP);
+                       break;
+               case NETDEV_DOWN:
+                       qlcnic_config_ipaddr(adapter,
+                                       ifa->ifa_address, QLCNIC_IP_DOWN);
+                       break;
+               default:
+                       break;
+               }
+       } endfor_ifa(indev);
+
+       in_dev_put(indev);
+}
+
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct net_device *dev;
+       u16 vid;
+
+       qlcnic_config_indev_addr(adapter, netdev, event);
+
+       rcu_read_lock();
+       for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+               dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
+               if (!dev)
+                       continue;
+               qlcnic_config_indev_addr(adapter, dev, event);
+       }
+       rcu_read_unlock();
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct qlcnic_adapter *adapter;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+recheck:
+       if (dev == NULL)
+               goto done;
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               dev = vlan_dev_real_dev(dev);
+               goto recheck;
+       }
+
+       if (!is_qlcnic_netdev(dev))
+               goto done;
+
+       adapter = netdev_priv(dev);
+
+       if (!adapter)
+               goto done;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               goto done;
+
+       qlcnic_config_indev_addr(adapter, dev, event);
+done:
+       return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+               unsigned long event, void *ptr)
+{
+       struct qlcnic_adapter *adapter;
+       struct net_device *dev;
+
+       struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+       dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+       if (dev == NULL)
+               goto done;
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               dev = vlan_dev_real_dev(dev);
+               goto recheck;
+       }
+
+       if (!is_qlcnic_netdev(dev))
+               goto done;
+
+       adapter = netdev_priv(dev);
+
+       if (!adapter)
+               goto done;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               goto done;
+
+       switch (event) {
+       case NETDEV_UP:
+               qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
+               break;
+       case NETDEV_DOWN:
+               qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
+               break;
+       default:
+               break;
+       }
+
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block   qlcnic_netdev_cb = {
+       .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+       .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+static const struct pci_error_handlers qlcnic_err_handler = {
+       .error_detected = qlcnic_io_error_detected,
+       .slot_reset = qlcnic_io_slot_reset,
+       .resume = qlcnic_io_resume,
+};
+
+static struct pci_driver qlcnic_driver = {
+       .name = qlcnic_driver_name,
+       .id_table = qlcnic_pci_tbl,
+       .probe = qlcnic_probe,
+       .remove = qlcnic_remove,
+#ifdef CONFIG_PM
+       .suspend = qlcnic_suspend,
+       .resume = qlcnic_resume,
+#endif
+       .shutdown = qlcnic_shutdown,
+       .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+       .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
+
+};
+
+static int __init qlcnic_init_module(void)
+{
+       int ret;
+
+       printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+       register_netdevice_notifier(&qlcnic_netdev_cb);
+       register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+       ret = pci_register_driver(&qlcnic_driver);
+       if (ret) {
+#ifdef CONFIG_INET
+               unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+               unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+       }
+
+       return ret;
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+       pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+       unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+       unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644 (file)
index 0000000..0844b7c
--- /dev/null
@@ -0,0 +1,1451 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <net/ip.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
+
+#define QLC_83XX_MINIDUMP_FLASH                0x520000
+#define QLC_83XX_OCM_INDEX                     3
+#define QLC_83XX_PCI_INDEX                     0
+#define QLC_83XX_DMA_ENGINE_INDEX              8
+
+static const u32 qlcnic_ms_read_data[] = {
+       0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
+#define QLCNIC_DUMP_WCRB       BIT_0
+#define QLCNIC_DUMP_RWCRB      BIT_1
+#define QLCNIC_DUMP_ANDCRB     BIT_2
+#define QLCNIC_DUMP_ORCRB      BIT_3
+#define QLCNIC_DUMP_POLLCRB    BIT_4
+#define QLCNIC_DUMP_RD_SAVE    BIT_5
+#define QLCNIC_DUMP_WRT_SAVED  BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST        BIT_7
+#define QLCNIC_DUMP_SKIP       BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX   0xff
+
+struct qlcnic_pex_dma_descriptor {
+       u32     read_data_size;
+       u32     dma_desc_cmd;
+       u32     src_addr_low;
+       u32     src_addr_high;
+       u32     dma_bus_addr_low;
+       u32     dma_bus_addr_high;
+       u32     rsvd[6];
+} __packed;
+
+struct qlcnic_common_entry_hdr {
+       u32     type;
+       u32     offset;
+       u32     cap_size;
+#if defined(__LITTLE_ENDIAN)
+       u8      mask;
+       u8      rsvd[2];
+       u8      flags;
+#else
+       u8      flags;
+       u8      rsvd[2];
+       u8      mask;
+#endif
+} __packed;
+
+struct __crb {
+       u32     addr;
+#if defined(__LITTLE_ENDIAN)
+       u8      stride;
+       u8      rsvd1[3];
+#else
+       u8      rsvd1[3];
+       u8      stride;
+#endif
+       u32     data_size;
+       u32     no_ops;
+       u32     rsvd2[4];
+} __packed;
+
+struct __ctrl {
+       u32     addr;
+#if defined(__LITTLE_ENDIAN)
+       u8      stride;
+       u8      index_a;
+       u16     timeout;
+#else
+       u16     timeout;
+       u8      index_a;
+       u8      stride;
+#endif
+       u32     data_size;
+       u32     no_ops;
+#if defined(__LITTLE_ENDIAN)
+       u8      opcode;
+       u8      index_v;
+       u8      shl_val;
+       u8      shr_val;
+#else
+       u8      shr_val;
+       u8      shl_val;
+       u8      index_v;
+       u8      opcode;
+#endif
+       u32     val1;
+       u32     val2;
+       u32     val3;
+} __packed;
+
+struct __cache {
+       u32     addr;
+#if defined(__LITTLE_ENDIAN)
+       u16     stride;
+       u16     init_tag_val;
+#else
+       u16     init_tag_val;
+       u16     stride;
+#endif
+       u32     size;
+       u32     no_ops;
+       u32     ctrl_addr;
+       u32     ctrl_val;
+       u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
+       u8      read_addr_stride;
+       u8      read_addr_num;
+       u8      rsvd1[2];
+#else
+       u8      rsvd1[2];
+       u8      read_addr_num;
+       u8      read_addr_stride;
+#endif
+} __packed;
+
+struct __ocm {
+       u8      rsvd[8];
+       u32     size;
+       u32     no_ops;
+       u8      rsvd1[8];
+       u32     read_addr;
+       u32     read_addr_stride;
+} __packed;
+
+struct __mem {
+       u32     desc_card_addr;
+       u32     dma_desc_cmd;
+       u32     start_dma_cmd;
+       u32     rsvd[3];
+       u32     addr;
+       u32     size;
+} __packed;
+
+struct __mux {
+       u32     addr;
+       u8      rsvd[4];
+       u32     size;
+       u32     no_ops;
+       u32     val;
+       u32     val_stride;
+       u32     read_addr;
+       u8      rsvd2[4];
+} __packed;
+
+struct __queue {
+       u32     sel_addr;
+#if defined(__LITTLE_ENDIAN)
+       u16     stride;
+       u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u16     stride;
+#endif
+       u32     size;
+       u32     no_ops;
+       u8      rsvd2[8];
+       u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
+       u8      read_addr_stride;
+       u8      read_addr_cnt;
+       u8      rsvd3[2];
+#else
+       u8      rsvd3[2];
+       u8      read_addr_cnt;
+       u8      read_addr_stride;
+#endif
+} __packed;
+
+struct __pollrd {
+       u32     sel_addr;
+       u32     read_addr;
+       u32     sel_val;
+#if defined(__LITTLE_ENDIAN)
+       u16     sel_val_stride;
+       u16     no_ops;
+#else
+       u16     no_ops;
+       u16     sel_val_stride;
+#endif
+       u32     poll_wait;
+       u32     poll_mask;
+       u32     data_size;
+       u8      rsvd[4];
+} __packed;
+
+struct __mux2 {
+       u32     sel_addr1;
+       u32     sel_addr2;
+       u32     sel_val1;
+       u32     sel_val2;
+       u32     no_ops;
+       u32     sel_val_mask;
+       u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
+       u8      sel_val_stride;
+       u8      data_size;
+       u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u8      data_size;
+       u8      sel_val_stride;
+#endif
+} __packed;
+
+struct __pollrdmwr {
+       u32     addr1;
+       u32     addr2;
+       u32     val1;
+       u32     val2;
+       u32     poll_wait;
+       u32     poll_mask;
+       u32     mod_mask;
+       u32     data_size;
+} __packed;
+
+struct qlcnic_dump_entry {
+       struct qlcnic_common_entry_hdr hdr;
+       union {
+               struct __crb            crb;
+               struct __cache          cache;
+               struct __ocm            ocm;
+               struct __mem            mem;
+               struct __mux            mux;
+               struct __queue          que;
+               struct __ctrl           ctrl;
+               struct __pollrdmwr      pollrdmwr;
+               struct __mux2           mux2;
+               struct __pollrd         pollrd;
+       } region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+       QLCNIC_DUMP_NOP         = 0,
+       QLCNIC_DUMP_READ_CRB    = 1,
+       QLCNIC_DUMP_READ_MUX    = 2,
+       QLCNIC_DUMP_QUEUE       = 3,
+       QLCNIC_DUMP_BRD_CONFIG  = 4,
+       QLCNIC_DUMP_READ_OCM    = 6,
+       QLCNIC_DUMP_PEG_REG     = 7,
+       QLCNIC_DUMP_L1_DTAG     = 8,
+       QLCNIC_DUMP_L1_ITAG     = 9,
+       QLCNIC_DUMP_L1_DATA     = 11,
+       QLCNIC_DUMP_L1_INST     = 12,
+       QLCNIC_DUMP_L2_DTAG     = 21,
+       QLCNIC_DUMP_L2_ITAG     = 22,
+       QLCNIC_DUMP_L2_DATA     = 23,
+       QLCNIC_DUMP_L2_INST     = 24,
+       QLCNIC_DUMP_POLL_RD     = 35,
+       QLCNIC_READ_MUX2        = 36,
+       QLCNIC_READ_POLLRDMWR   = 37,
+       QLCNIC_DUMP_READ_ROM    = 71,
+       QLCNIC_DUMP_READ_MEM    = 72,
+       QLCNIC_DUMP_READ_CTRL   = 98,
+       QLCNIC_DUMP_TLHDR       = 99,
+       QLCNIC_DUMP_RDEND       = 255
+};
+
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+                                       u32 value)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr;
+
+       hdr = fw_dump->tmpl_hdr;
+       fw_dump->tmpl_hdr_size = hdr->size;
+       fw_dump->version = hdr->version;
+       fw_dump->num_entries = hdr->num_entries;
+       fw_dump->offset = hdr->offset;
+
+       hdr->drv_cap_mask = hdr->cap_mask;
+       fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+       hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+                                       u32 value)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->saved_state[index] = value;
+}
+
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+       hdr = fw_dump->tmpl_hdr;
+       fw_dump->tmpl_hdr_size = hdr->size;
+       fw_dump->version = hdr->version;
+       fw_dump->num_entries = hdr->num_entries;
+       fw_dump->offset = hdr->offset;
+
+       hdr->drv_cap_mask = hdr->cap_mask;
+       fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+                              QLCNIC_TEMPLATE_VERSION;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+       hdr = tmpl_hdr;
+       hdr->drv_cap_mask = mask;
+}
+
+struct qlcnic_dump_operations {
+       enum qlcnic_minidump_opcode opcode;
+       u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+                      __le32 *);
+};
+
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 addr, data;
+       struct __crb *crb = &entry->region.crb;
+
+       addr = crb->addr;
+
+       for (i = 0; i < crb->no_ops; i++) {
+               data = qlcnic_ind_rd(adapter, addr);
+               *buffer++ = cpu_to_le32(addr);
+               *buffer++ = cpu_to_le32(data);
+               addr += crb->stride;
+       }
+       return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+                           struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+       struct __ctrl *ctr = &entry->region.ctrl;
+       int i, k, timeout = 0;
+       u32 addr, data, temp;
+       u8 no_ops;
+
+       addr = ctr->addr;
+       no_ops = ctr->no_ops;
+
+       for (i = 0; i < no_ops; i++) {
+               k = 0;
+               for (k = 0; k < 8; k++) {
+                       if (!(ctr->opcode & (1 << k)))
+                               continue;
+                       switch (1 << k) {
+                       case QLCNIC_DUMP_WCRB:
+                               qlcnic_ind_wr(adapter, addr, ctr->val1);
+                               break;
+                       case QLCNIC_DUMP_RWCRB:
+                               data = qlcnic_ind_rd(adapter, addr);
+                               qlcnic_ind_wr(adapter, addr, data);
+                               break;
+                       case QLCNIC_DUMP_ANDCRB:
+                               data = qlcnic_ind_rd(adapter, addr);
+                               qlcnic_ind_wr(adapter, addr,
+                                             (data & ctr->val2));
+                               break;
+                       case QLCNIC_DUMP_ORCRB:
+                               data = qlcnic_ind_rd(adapter, addr);
+                               qlcnic_ind_wr(adapter, addr,
+                                             (data | ctr->val3));
+                               break;
+                       case QLCNIC_DUMP_POLLCRB:
+                               while (timeout <= ctr->timeout) {
+                                       data = qlcnic_ind_rd(adapter, addr);
+                                       if ((data & ctr->val2) == ctr->val1)
+                                               break;
+                                       usleep_range(1000, 2000);
+                                       timeout++;
+                               }
+                               if (timeout > ctr->timeout) {
+                                       dev_info(&adapter->pdev->dev,
+                                       "Timed out, aborting poll CRB\n");
+                                       return -EINVAL;
+                               }
+                               break;
+                       case QLCNIC_DUMP_RD_SAVE:
+                               temp = ctr->index_a;
+                               if (temp)
+                                       addr = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
+                               data = qlcnic_ind_rd(adapter, addr);
+                               qlcnic_set_saved_state(adapter, hdr,
+                                                      ctr->index_v, data);
+                               break;
+                       case QLCNIC_DUMP_WRT_SAVED:
+                               temp = ctr->index_v;
+                               if (temp)
+                                       data = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
+                               else
+                                       data = ctr->val1;
+
+                               temp = ctr->index_a;
+                               if (temp)
+                                       addr = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
+                               qlcnic_ind_wr(adapter, addr, data);
+                               break;
+                       case QLCNIC_DUMP_MOD_SAVE_ST:
+                               data = qlcnic_get_saved_state(adapter, hdr,
+                                                             ctr->index_v);
+                               data <<= ctr->shl_val;
+                               data >>= ctr->shr_val;
+                               if (ctr->val2)
+                                       data &= ctr->val2;
+                               data |= ctr->val3;
+                               data += ctr->val1;
+                               qlcnic_set_saved_state(adapter, hdr,
+                                                      ctr->index_v, data);
+                               break;
+                       default:
+                               dev_info(&adapter->pdev->dev,
+                                        "Unknown opcode\n");
+                               break;
+                       }
+               }
+               addr += ctr->stride;
+       }
+       return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int loop;
+       u32 val, data = 0;
+       struct __mux *mux = &entry->region.mux;
+
+       val = mux->val;
+       for (loop = 0; loop < mux->no_ops; loop++) {
+               qlcnic_ind_wr(adapter, mux->addr, val);
+               data = qlcnic_ind_rd(adapter, mux->read_addr);
+               *buffer++ = cpu_to_le32(val);
+               *buffer++ = cpu_to_le32(data);
+               val += mux->val_stride;
+       }
+       return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i, loop;
+       u32 cnt, addr, data, que_id = 0;
+       struct __queue *que = &entry->region.que;
+
+       addr = que->read_addr;
+       cnt = que->read_addr_cnt;
+
+       for (loop = 0; loop < que->no_ops; loop++) {
+               qlcnic_ind_wr(adapter, que->sel_addr, que_id);
+               addr = que->read_addr;
+               for (i = 0; i < cnt; i++) {
+                       data = qlcnic_ind_rd(adapter, addr);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += que->read_addr_stride;
+               }
+               que_id += que->stride;
+       }
+       return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 data;
+       void __iomem *addr;
+       struct __ocm *ocm = &entry->region.ocm;
+
+       addr = adapter->ahw->pci_base0 + ocm->read_addr;
+       for (i = 0; i < ocm->no_ops; i++) {
+               data = readl(addr);
+               *buffer++ = cpu_to_le32(data);
+               addr += ocm->read_addr_stride;
+       }
+       return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i, count = 0;
+       u32 fl_addr, size, val, lck_val, addr;
+       struct __mem *rom = &entry->region.mem;
+
+       fl_addr = rom->addr;
+       size = rom->size / 4;
+lock_try:
+       lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+       if (!lck_val && count < MAX_CTL_CHECK) {
+               usleep_range(10000, 11000);
+               count++;
+               goto lock_try;
+       }
+       QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+                           adapter->ahw->pci_func);
+       for (i = 0; i < size; i++) {
+               addr = fl_addr & 0xFFFF0000;
+               qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
+               addr = LSW(fl_addr) + FLASH_ROM_DATA;
+               val = qlcnic_ind_rd(adapter, addr);
+               fl_addr += 4;
+               *buffer++ = cpu_to_le32(val);
+       }
+       QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+       return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+                               struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 cnt, val, data, addr;
+       struct __cache *l1 = &entry->region.cache;
+
+       val = l1->init_tag_val;
+
+       for (i = 0; i < l1->no_ops; i++) {
+               qlcnic_ind_wr(adapter, l1->addr, val);
+               qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
+               addr = l1->read_addr;
+               cnt = l1->read_addr_num;
+               while (cnt) {
+                       data = qlcnic_ind_rd(adapter, addr);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += l1->read_addr_stride;
+                       cnt--;
+               }
+               val += l1->stride;
+       }
+       return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+                               struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 cnt, val, data, addr;
+       u8 poll_mask, poll_to, time_out = 0;
+       struct __cache *l2 = &entry->region.cache;
+
+       val = l2->init_tag_val;
+       poll_mask = LSB(MSW(l2->ctrl_val));
+       poll_to = MSB(MSW(l2->ctrl_val));
+
+       for (i = 0; i < l2->no_ops; i++) {
+               qlcnic_ind_wr(adapter, l2->addr, val);
+               if (LSW(l2->ctrl_val))
+                       qlcnic_ind_wr(adapter, l2->ctrl_addr,
+                                     LSW(l2->ctrl_val));
+               if (!poll_mask)
+                       goto skip_poll;
+               do {
+                       data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
+                       if (!(data & poll_mask))
+                               break;
+                       usleep_range(1000, 2000);
+                       time_out++;
+               } while (time_out <= poll_to);
+
+               if (time_out > poll_to) {
+                       dev_err(&adapter->pdev->dev,
+                               "Timeout exceeded in %s, aborting dump\n",
+                               __func__);
+                       return -EINVAL;
+               }
+skip_poll:
+               addr = l2->read_addr;
+               cnt = l2->read_addr_num;
+               while (cnt) {
+                       data = qlcnic_ind_rd(adapter, addr);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += l2->read_addr_stride;
+                       cnt--;
+               }
+               val += l2->stride;
+       }
+       return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
+                                        struct __mem *mem, __le32 *buffer,
+                                        int *ret)
+{
+       u32 addr, data, test;
+       int i, reg_read;
+
+       reg_read = mem->size;
+       addr = mem->addr;
+       /* check for data size of multiple of 16 and 16 byte alignment */
+       if ((addr & 0xf) || (reg_read%16)) {
+               dev_info(&adapter->pdev->dev,
+                        "Unaligned memory addr:0x%x size:0x%x\n",
+                        addr, reg_read);
+               *ret = -EINVAL;
+               return 0;
+       }
+
+       mutex_lock(&adapter->ahw->mem_lock);
+
+       while (reg_read != 0) {
+               qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
+
+               for (i = 0; i < MAX_CTL_CHECK; i++) {
+                       test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+                       if (!(test & TA_CTL_BUSY))
+                               break;
+               }
+               if (i == MAX_CTL_CHECK) {
+                       if (printk_ratelimit()) {
+                               dev_err(&adapter->pdev->dev,
+                                       "failed to read through agent\n");
+                               *ret = -EIO;
+                               goto out;
+                       }
+               }
+               for (i = 0; i < 4; i++) {
+                       data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
+                       *buffer++ = cpu_to_le32(data);
+               }
+               addr += 16;
+               reg_read -= 16;
+               ret += 16;
+       }
+out:
+       mutex_unlock(&adapter->ahw->mem_lock);
+       return mem->size;
+}
+
+/* DMA register base address */
+#define QLC_DMA_REG_BASE_ADDR(dma_no)  (0x77320000 + (dma_no * 0x10000))
+
+/* DMA register offsets w.r.t base address */
+#define QLC_DMA_CMD_BUFF_ADDR_LOW      0
+#define QLC_DMA_CMD_BUFF_ADDR_HI       4
+#define QLC_DMA_CMD_STATUS_CTRL                8
+
+static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
+                               struct __mem *mem)
+{
+       struct device *dev = &adapter->pdev->dev;
+       u32 dma_no, dma_base_addr, temp_addr;
+       int i, ret, dma_sts;
+       void *tmpl_hdr;
+
+       tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+       dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                       QLC_83XX_DMA_ENGINE_INDEX);
+       dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
+       if (ret)
+               return ret;
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
+       ret = qlcnic_ind_wr(adapter, temp_addr, 0);
+       if (ret)
+               return ret;
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
+       if (ret)
+               return ret;
+
+       /* Wait for DMA to complete */
+       temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+       for (i = 0; i < 400; i++) {
+               dma_sts = qlcnic_ind_rd(adapter, temp_addr);
+
+               if (dma_sts & BIT_1)
+                       usleep_range(250, 500);
+               else
+                       break;
+       }
+
+       if (i >= 400) {
+               dev_info(dev, "PEX DMA operation timed out");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
+                                    struct __mem *mem,
+                                    __le32 *buffer, int *ret)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       u32 temp, dma_base_addr, size = 0, read_size = 0;
+       struct qlcnic_pex_dma_descriptor *dma_descr;
+       struct device *dev = &adapter->pdev->dev;
+       dma_addr_t dma_phys_addr;
+       void *dma_buffer;
+       void *tmpl_hdr;
+
+       tmpl_hdr = fw_dump->tmpl_hdr;
+
+       /* Check if DMA engine is available */
+       temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                     QLC_83XX_DMA_ENGINE_INDEX);
+       dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
+       temp = qlcnic_ind_rd(adapter,
+                            dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
+
+       if (!(temp & BIT_31)) {
+               dev_info(dev, "%s: DMA engine is not available\n", __func__);
+               *ret = -EIO;
+               return 0;
+       }
+
+       /* Create DMA descriptor */
+       dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
+                           GFP_KERNEL);
+       if (!dma_descr) {
+               *ret = -ENOMEM;
+               return 0;
+       }
+
+       /* dma_desc_cmd  0:15  = 0
+        * dma_desc_cmd 16:19  = mem->dma_desc_cmd 0:3
+        * dma_desc_cmd 20:23  = pci function number
+        * dma_desc_cmd 24:31  = mem->dma_desc_cmd 8:15
+        */
+       dma_phys_addr = fw_dump->phys_addr;
+       dma_buffer = fw_dump->dma_buffer;
+       temp = 0;
+       temp = mem->dma_desc_cmd & 0xff0f;
+       temp |= (adapter->ahw->pci_func & 0xf) << 4;
+       dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
+       dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
+       dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
+       dma_descr->src_addr_high = 0;
+
+       /* Collect memory dump using multiple DMA operations if required */
+       while (read_size < mem->size) {
+               if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
+                       size = QLC_PEX_DMA_READ_SIZE;
+               else
+                       size = mem->size - read_size;
+
+               dma_descr->src_addr_low = mem->addr + read_size;
+               dma_descr->read_data_size = size;
+
+               /* Write DMA descriptor to MS memory*/
+               temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
+               *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+                                             (u32 *)dma_descr, temp);
+               if (*ret) {
+                       dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
+                                mem->desc_card_addr);
+                       goto free_dma_descr;
+               }
+
+               *ret = qlcnic_start_pex_dma(adapter, mem);
+               if (*ret) {
+                       dev_info(dev, "Failed to start PEX DMA operation\n");
+                       goto free_dma_descr;
+               }
+
+               memcpy(buffer, dma_buffer, size);
+               buffer += size / 4;
+               read_size += size;
+       }
+
+free_dma_descr:
+       kfree(dma_descr);
+
+       return read_size;
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+                             struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       struct device *dev = &adapter->pdev->dev;
+       struct __mem *mem = &entry->region.mem;
+       u32 data_size;
+       int ret = 0;
+
+       if (fw_dump->use_pex_dma) {
+               data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
+                                                     &ret);
+               if (ret)
+                       dev_info(dev,
+                                "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
+                                entry->hdr.mask);
+               else
+                       return data_size;
+       }
+
+       data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
+       if (ret) {
+               dev_info(dev,
+                        "Failed to read memory dump using test agent method: mask[0x%x]\n",
+                        entry->hdr.mask);
+               return 0;
+       } else {
+               return data_size;
+       }
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+       return 0;
+}
+
+static int qlcnic_valid_dump_entry(struct device *dev,
+                                  struct qlcnic_dump_entry *entry, u32 size)
+{
+       int ret = 1;
+       if (size != entry->hdr.cap_size) {
+               dev_err(dev,
+                       "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+                       entry->hdr.type, entry->hdr.mask, size,
+                       entry->hdr.cap_size);
+               ret = 0;
+       }
+       return ret;
+}
+
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+                                struct qlcnic_dump_entry *entry,
+                                __le32 *buffer)
+{
+       struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+       u32 data, wait_count, poll_wait, temp;
+
+       poll_wait = poll->poll_wait;
+
+       qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+       wait_count = 0;
+
+       while (wait_count < poll_wait) {
+               data = qlcnic_ind_rd(adapter, poll->addr1);
+               if ((data & poll->poll_mask) != 0)
+                       break;
+               wait_count++;
+       }
+
+       if (wait_count == poll_wait) {
+               dev_err(&adapter->pdev->dev,
+                       "Timeout exceeded in %s, aborting dump\n",
+                       __func__);
+               return 0;
+       }
+
+       data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+       qlcnic_ind_wr(adapter, poll->addr2, data);
+       qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+       wait_count = 0;
+
+       while (wait_count < poll_wait) {
+               temp = qlcnic_ind_rd(adapter, poll->addr1);
+               if ((temp & poll->poll_mask) != 0)
+                       break;
+               wait_count++;
+       }
+
+       *buffer++ = cpu_to_le32(poll->addr2);
+       *buffer++ = cpu_to_le32(data);
+
+       return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+                             struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       struct __pollrd *pollrd = &entry->region.pollrd;
+       u32 data, wait_count, poll_wait, sel_val;
+       int i;
+
+       poll_wait = pollrd->poll_wait;
+       sel_val = pollrd->sel_val;
+
+       for (i = 0; i < pollrd->no_ops; i++) {
+               qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+               wait_count = 0;
+               while (wait_count < poll_wait) {
+                       data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+                       if ((data & pollrd->poll_mask) != 0)
+                               break;
+                       wait_count++;
+               }
+
+               if (wait_count == poll_wait) {
+                       dev_err(&adapter->pdev->dev,
+                               "Timeout exceeded in %s, aborting dump\n",
+                               __func__);
+                       return 0;
+               }
+
+               data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+               *buffer++ = cpu_to_le32(sel_val);
+               *buffer++ = cpu_to_le32(data);
+               sel_val += pollrd->sel_val_stride;
+       }
+       return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+                           struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       struct __mux2 *mux2 = &entry->region.mux2;
+       u32 data;
+       u32 t_sel_val, sel_val1, sel_val2;
+       int i;
+
+       sel_val1 = mux2->sel_val1;
+       sel_val2 = mux2->sel_val2;
+
+       for (i = 0; i < mux2->no_ops; i++) {
+               qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+               t_sel_val = sel_val1 & mux2->sel_val_mask;
+               qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+               data = qlcnic_ind_rd(adapter, mux2->read_addr);
+               *buffer++ = cpu_to_le32(t_sel_val);
+               *buffer++ = cpu_to_le32(data);
+               qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+               t_sel_val = sel_val2 & mux2->sel_val_mask;
+               qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+               data = qlcnic_ind_rd(adapter, mux2->read_addr);
+               *buffer++ = cpu_to_le32(t_sel_val);
+               *buffer++ = cpu_to_le32(data);
+               sel_val1 += mux2->sel_val_stride;
+               sel_val2 += mux2->sel_val_stride;
+       }
+
+       return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+                               struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       u32 fl_addr, size;
+       struct __mem *rom = &entry->region.mem;
+
+       fl_addr = rom->addr;
+       size = rom->size / 4;
+
+       if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+                                              (u8 *)buffer, size))
+               return rom->size;
+
+       return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+       {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+       {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+       {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+       {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+       {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+       {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+       {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+       {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+       {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+       {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+       {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+       {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+       {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+       {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+       {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+       {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+       {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+       {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+       {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+       {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+       {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+       {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+       {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+       {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+       {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+       {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+       {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+       {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+       {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+       uint64_t sum = 0;
+       int count = temp_size / sizeof(uint32_t);
+       while (count-- > 0)
+               sum += *temp_buffer++;
+       while (sum >> 32)
+               sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+       return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+                                            u8 *buffer, u32 size)
+{
+       int ret = 0;
+
+       if (qlcnic_82xx_check(adapter))
+               return -EIO;
+
+       if (qlcnic_83xx_lock_flash(adapter))
+               return -EIO;
+
+       ret = qlcnic_83xx_lockless_flash_read32(adapter,
+                                               QLC_83XX_MINIDUMP_FLASH,
+                                               buffer, size / sizeof(u32));
+
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+       u32 size = sizeof(tmp_hdr) / sizeof(u32);
+       int ret = 0;
+
+       if (qlcnic_82xx_check(adapter))
+               return -EIO;
+
+       if (qlcnic_83xx_lock_flash(adapter))
+               return -EIO;
+
+       ret = qlcnic_83xx_lockless_flash_read32(adapter,
+                                               QLC_83XX_MINIDUMP_FLASH,
+                                               (u8 *)&tmp_hdr, size);
+
+       qlcnic_83xx_unlock_flash(adapter);
+
+       cmd->rsp.arg[2] = tmp_hdr.size;
+       cmd->rsp.arg[3] = tmp_hdr.version;
+
+       return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+                                           u32 *version, u32 *temp_size,
+                                           u8 *use_flash_temp)
+{
+       int err = 0;
+       struct qlcnic_cmd_args cmd;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+               return -ENOMEM;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+                       qlcnic_free_mbx_args(&cmd);
+                       return -EIO;
+               }
+               *use_flash_temp = 1;
+       }
+
+       *temp_size = cmd.rsp.arg[2];
+       *version = cmd.rsp.arg[3];
+       qlcnic_free_mbx_args(&cmd);
+
+       if (!(*temp_size))
+               return -EIO;
+
+       return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+                                            u32 *buffer, u32 temp_size)
+{
+       int err = 0, i;
+       void *tmp_addr;
+       __le32 *tmp_buf;
+       struct qlcnic_cmd_args cmd;
+       dma_addr_t tmp_addr_t = 0;
+
+       tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+                                     &tmp_addr_t, GFP_KERNEL);
+       if (!tmp_addr)
+               return -ENOMEM;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+               err = -ENOMEM;
+               goto free_mem;
+       }
+
+       cmd.req.arg[1] = LSD(tmp_addr_t);
+       cmd.req.arg[2] = MSD(tmp_addr_t);
+       cmd.req.arg[3] = temp_size;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       tmp_buf = tmp_addr;
+       if (err == QLCNIC_RCODE_SUCCESS) {
+               for (i = 0; i < temp_size / sizeof(u32); i++)
+                       *buffer++ = __le32_to_cpu(*tmp_buf++);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+       dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+       return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw;
+       struct qlcnic_fw_dump *fw_dump;
+       u32 version, csum, *tmp_buf;
+       u8 use_flash_temp = 0;
+       u32 temp_size = 0;
+       void *temp_buffer;
+       int err;
+
+       ahw = adapter->ahw;
+       fw_dump = &ahw->fw_dump;
+       err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+                                              &use_flash_temp);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Can't get template size %d\n", err);
+               return -EIO;
+       }
+
+       fw_dump->tmpl_hdr = vzalloc(temp_size);
+       if (!fw_dump->tmpl_hdr)
+               return -ENOMEM;
+
+       tmp_buf = (u32 *)fw_dump->tmpl_hdr;
+       if (use_flash_temp)
+               goto flash_temp;
+
+       err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+       if (err) {
+flash_temp:
+               err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+                                                       temp_size);
+
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to get minidump template header %d\n",
+                               err);
+                       vfree(fw_dump->tmpl_hdr);
+                       fw_dump->tmpl_hdr = NULL;
+                       return -EIO;
+               }
+       }
+
+       csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+       if (csum) {
+               dev_err(&adapter->pdev->dev,
+                       "Template header checksum validation failed\n");
+               vfree(fw_dump->tmpl_hdr);
+               fw_dump->tmpl_hdr = NULL;
+               return -EIO;
+       }
+
+       qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
+       if (fw_dump->use_pex_dma) {
+               fw_dump->dma_buffer = NULL;
+               temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+                                                QLC_PEX_DMA_READ_SIZE,
+                                                &fw_dump->phys_addr,
+                                                GFP_KERNEL);
+               if (!temp_buffer)
+                       fw_dump->use_pex_dma = false;
+               else
+                       fw_dump->dma_buffer = temp_buffer;
+       }
+
+
+       dev_info(&adapter->pdev->dev,
+                "Default minidump capture mask 0x%x\n",
+                fw_dump->cap_mask);
+
+       qlcnic_enable_fw_dump_state(adapter);
+
+       return 0;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       static const struct qlcnic_dump_operations *fw_dump_ops;
+       struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+       u32 entry_offset, dump, no_entries, buf_offset = 0;
+       int i, k, ops_cnt, ops_index, dump_size = 0;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_hardware_context *ahw;
+       struct qlcnic_dump_entry *entry;
+       void *tmpl_hdr;
+       u32 ocm_window;
+       __le32 *buffer;
+       char mesg[64];
+       char *msg[] = {mesg, NULL};
+
+       ahw = adapter->ahw;
+       tmpl_hdr = fw_dump->tmpl_hdr;
+
+       /* Return if we don't have firmware dump template header */
+       if (!tmpl_hdr)
+               return -EIO;
+
+       if (!qlcnic_check_fw_dump_state(adapter)) {
+               dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+               return -EIO;
+       }
+
+       if (fw_dump->clr) {
+               dev_info(&adapter->pdev->dev,
+                        "Previous dump not cleared, not capturing dump\n");
+               return -EIO;
+       }
+
+       netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
+       /* Calculate the size for dump data area only */
+       for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+               if (i & fw_dump->cap_mask)
+                       dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
+       if (!dump_size)
+               return -EIO;
+
+       fw_dump->data = vzalloc(dump_size);
+       if (!fw_dump->data)
+               return -ENOMEM;
+
+       buffer = fw_dump->data;
+       fw_dump->size = dump_size;
+       no_entries = fw_dump->num_entries;
+       entry_offset = fw_dump->offset;
+       qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+       qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
+
+       if (qlcnic_82xx_check(adapter)) {
+               ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+               fw_dump_ops = qlcnic_fw_dump_ops;
+       } else {
+               hdr_83xx = tmpl_hdr;
+               ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+               fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+               ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+               hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+               hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+       }
+
+       for (i = 0; i < no_entries; i++) {
+               entry = tmpl_hdr + entry_offset;
+               if (!(entry->hdr.mask & fw_dump->cap_mask)) {
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+
+               /* Find the handler for this entry */
+               ops_index = 0;
+               while (ops_index < ops_cnt) {
+                       if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+                               break;
+                       ops_index++;
+               }
+
+               if (ops_index == ops_cnt) {
+                       dev_info(dev, "Skipping unknown entry opcode %d\n",
+                                entry->hdr.type);
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+
+               /* Collect dump for this entry */
+               dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+               if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+
+               buf_offset += entry->hdr.cap_size;
+               entry_offset += entry->hdr.offset;
+               buffer = fw_dump->data + buf_offset;
+       }
+
+       fw_dump->clr = 1;
+       snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+       netdev_info(adapter->netdev,
+                   "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+                   fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+                   fw_dump->tmpl_hdr);
+       /* Send a udev event to notify availability of FW dump */
+       kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
+       return 0;
+}
+
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+       /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+        * dump needs to be captured as part of regular firmware dump
+        * collection process, firmware exports it's capability through
+        * capability registers
+        */
+       return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+               (adapter->ahw->extra_capability[0] &
+                QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+       u32 prev_version, current_version;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+       struct pci_dev *pdev = adapter->pdev;
+       bool extended = false;
+       int ret;
+
+       prev_version = adapter->fw_version;
+       current_version = qlcnic_83xx_get_fw_version(adapter);
+
+       if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+               vfree(fw_dump->tmpl_hdr);
+
+               if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+                       extended = !qlcnic_83xx_extend_md_capab(adapter);
+
+               ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
+               if (ret)
+                       return;
+
+               dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+               /* Once we have minidump template with extended iSCSI dump
+                * capability, update the minidump capture mask to 0x1f as
+                * per FW requirement
+                */
+               if (extended) {
+                       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+                       hdr = fw_dump->tmpl_hdr;
+                       hdr->drv_cap_mask = 0x1f;
+                       fw_dump->cap_mask = 0x1f;
+                       dev_info(&pdev->dev,
+                                "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+               }
+       }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644 (file)
index 0000000..24061b9
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "qlcnic.h"
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+       u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+       u8      version;
+       u8      msg_type:4;
+       u8      rsvd1:3;
+       u8      op_type:1;
+       u8      num_cmds;
+       u8      num_frags;
+       u8      frag_num;
+       u8      cmd_op;
+       u16     seq_id;
+       u64     rsvd3;
+#elif defined(__BIG_ENDIAN)
+       u8      num_frags;
+       u8      num_cmds;
+       u8      op_type:1;
+       u8      rsvd1:3;
+       u8      msg_type:4;
+       u8      version;
+       u16     seq_id;
+       u8      cmd_op;
+       u8      frag_num;
+       u64     rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+       QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+       QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+       QLCNIC_BC_CMD_GET_ACL = 0x2,
+       QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
+};
+
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+       /* Lock for manipulating list */
+       spinlock_t              lock;
+       struct list_head        wait_list;
+       int                     count;
+};
+
+enum qlcnic_trans_state {
+       QLC_INIT = 0,
+       QLC_WAIT_FOR_CHANNEL_FREE,
+       QLC_WAIT_FOR_RESP,
+       QLC_ABORT,
+       QLC_END,
+};
+
+struct qlcnic_bc_trans {
+       u8                              func_id;
+       u8                              active;
+       u8                              curr_rsp_frag;
+       u8                              curr_req_frag;
+       u16                             cmd_id;
+       u16                             req_pay_size;
+       u16                             rsp_pay_size;
+       u32                             trans_id;
+       enum qlcnic_trans_state         trans_state;
+       struct list_head                list;
+       struct qlcnic_bc_hdr            *req_hdr;
+       struct qlcnic_bc_hdr            *rsp_hdr;
+       struct qlcnic_bc_payload        *req_pay;
+       struct qlcnic_bc_payload        *rsp_pay;
+       struct completion               resp_cmpl;
+       struct qlcnic_vf_info           *vf;
+};
+
+enum qlcnic_vf_state {
+       QLC_BC_VF_SEND = 0,
+       QLC_BC_VF_RECV,
+       QLC_BC_VF_CHANNEL,
+       QLC_BC_VF_STATE,
+       QLC_BC_VF_FLR,
+       QLC_BC_VF_SOFT_FLR,
+};
+
+enum qlcnic_vlan_mode {
+       QLC_NO_VLAN_MODE = 0,
+       QLC_PVID_MODE,
+       QLC_GUEST_VLAN_MODE,
+};
+
+struct qlcnic_resources {
+       u16 num_tx_mac_filters;
+       u16 num_rx_ucast_mac_filters;
+       u16 num_rx_mcast_mac_filters;
+
+       u16 num_txvlan_keys;
+
+       u16 num_rx_queues;
+       u16 num_tx_queues;
+
+       u16 num_rx_buf_rings;
+       u16 num_rx_status_rings;
+
+       u16 num_destip;
+       u32 num_lro_flows_supported;
+       u16 max_local_ipv6_addrs;
+       u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+       u16                     handle;
+       u16                     max_tx_bw;
+       u16                     min_tx_bw;
+       u16                     pvid;
+       u8                      vlan_mode;
+       u8                      qos;
+       bool                    spoofchk;
+       u8                      mac[6];
+};
+
+struct qlcnic_vf_info {
+       u8                              pci_func;
+       u16                             rx_ctx_id;
+       u16                             tx_ctx_id;
+       u16                             *sriov_vlans;
+       int                             num_vlan;
+       unsigned long                   state;
+       struct completion               ch_free_cmpl;
+       struct work_struct              trans_work;
+       struct work_struct              flr_work;
+       /* It synchronizes commands sent from VF */
+       struct mutex                    send_cmd_lock;
+       struct qlcnic_bc_trans          *send_cmd;
+       struct qlcnic_bc_trans          *flr_trans;
+       struct qlcnic_trans_list        rcv_act;
+       struct qlcnic_trans_list        rcv_pend;
+       struct qlcnic_adapter           *adapter;
+       struct qlcnic_vport             *vp;
+       spinlock_t                      vlan_list_lock; /* Lock for VLAN list */
+};
+
+struct qlcnic_async_cmd {
+       struct list_head        list;
+       struct qlcnic_cmd_args  *cmd;
+};
+
+struct qlcnic_back_channel {
+       u16                     trans_counter;
+       struct workqueue_struct *bc_trans_wq;
+       struct workqueue_struct *bc_async_wq;
+       struct workqueue_struct *bc_flr_wq;
+       struct qlcnic_adapter   *adapter;
+       struct list_head        async_cmd_list;
+       struct work_struct      vf_async_work;
+       spinlock_t              queue_lock; /* async_cmd_list queue lock */
+};
+
+struct qlcnic_sriov {
+       u16                             vp_handle;
+       u8                              num_vfs;
+       u8                              any_vlan;
+       u8                              vlan_mode;
+       u16                             num_allowed_vlans;
+       u16                             *allowed_vlans;
+       u16                             vlan;
+       struct qlcnic_resources         ff_max;
+       struct qlcnic_back_channel      bc;
+       struct qlcnic_vf_info           *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
+                               struct qlcnic_bc_trans *);
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+                                  struct qlcnic_info *, u16);
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+                             struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+                             struct qlcnic_vf_info *, u16);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+       return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+                                   struct qlcnic_bc_trans *,
+                                   struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
+                                struct qlcnic_bc_trans *,
+                                struct qlcnic_vf_info *);
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
+int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
+int qlcnic_sriov_get_vf_config(struct net_device *, int ,
+                              struct ifla_vf_info *);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+                                        u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+                                        u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+                                     u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+                                     u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+                                             struct qlcnic_vf_info *vf) {}
+static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+                                              struct qlcnic_bc_trans *trans,
+                                              struct qlcnic_vf_info *vf)
+{ return false; }
+static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
+static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644 (file)
index 0000000..d710705
--- /dev/null
@@ -0,0 +1,2228 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE        1
+
+#define QLC_MBOX_RESP_TIMEOUT          (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT       (10 * HZ)
+
+#define QLC_BC_MSG             0
+#define QLC_BC_CFREE           1
+#define QLC_BC_FLR             2
+#define QLC_BC_HDR_SZ          16
+#define QLC_BC_PAYLOAD_SZ      (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF           2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF     512
+
+#define QLC_83XX_VF_RESET_FAIL_THRESH  8
+#define QLC_BC_CMD_MAX_RETRY_CNT       5
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
+                                 struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+                                       struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+       .read_crb                       = qlcnic_83xx_read_crb,
+       .write_crb                      = qlcnic_83xx_write_crb,
+       .read_reg                       = qlcnic_83xx_rd_reg_indirect,
+       .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
+       .get_mac_address                = qlcnic_83xx_get_mac_address,
+       .setup_intr                     = qlcnic_83xx_setup_intr,
+       .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
+       .mbx_cmd                        = qlcnic_sriov_issue_cmd,
+       .get_func_no                    = qlcnic_83xx_get_func_no,
+       .api_lock                       = qlcnic_83xx_cam_lock,
+       .api_unlock                     = qlcnic_83xx_cam_unlock,
+       .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
+       .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
+       .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
+       .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
+       .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
+       .setup_link_event               = qlcnic_83xx_setup_link_event,
+       .get_nic_info                   = qlcnic_83xx_get_nic_info,
+       .get_pci_info                   = qlcnic_83xx_get_pci_info,
+       .set_nic_info                   = qlcnic_83xx_set_nic_info,
+       .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
+       .napi_enable                    = qlcnic_83xx_napi_enable,
+       .napi_disable                   = qlcnic_83xx_napi_disable,
+       .config_intr_coal               = qlcnic_83xx_config_intr_coal,
+       .config_rss                     = qlcnic_83xx_config_rss,
+       .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
+       .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
+       .change_l2_filter               = qlcnic_83xx_change_l2_filter,
+       .get_board_info                 = qlcnic_83xx_get_port_info,
+       .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
+       .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
+       .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+       .config_bridged_mode    = qlcnic_config_bridged_mode,
+       .config_led             = qlcnic_config_led,
+       .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
+       .napi_add               = qlcnic_83xx_napi_add,
+       .napi_del               = qlcnic_83xx_napi_del,
+       .shutdown               = qlcnic_sriov_vf_shutdown,
+       .resume                 = qlcnic_sriov_vf_resume,
+       .config_ipaddr          = qlcnic_83xx_config_ipaddr,
+       .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+       {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+       {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+       {QLCNIC_BC_CMD_GET_ACL, 3, 14},
+       {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+       return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+       return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_flr_check(u32 val)
+{
+       return (val & (1 << QLC_BC_FLR)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+       return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+       struct pci_dev *dev = adapter->pdev;
+       int pos;
+       u16 stride, offset;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return 0;
+
+       pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+       pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+       pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+       return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+       struct qlcnic_sriov *sriov;
+       struct qlcnic_back_channel *bc;
+       struct workqueue_struct *wq;
+       struct qlcnic_vport *vp;
+       struct qlcnic_vf_info *vf;
+       int err, i;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return -EIO;
+
+       sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+       if (!sriov)
+               return -ENOMEM;
+
+       adapter->ahw->sriov = sriov;
+       sriov->num_vfs = num_vfs;
+       bc = &sriov->bc;
+       sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
+                                num_vfs, GFP_KERNEL);
+       if (!sriov->vf_info) {
+               err = -ENOMEM;
+               goto qlcnic_free_sriov;
+       }
+
+       wq = create_singlethread_workqueue("bc-trans");
+       if (wq == NULL) {
+               err = -ENOMEM;
+               dev_err(&adapter->pdev->dev,
+                       "Cannot create bc-trans workqueue\n");
+               goto qlcnic_free_vf_info;
+       }
+
+       bc->bc_trans_wq = wq;
+
+       wq = create_singlethread_workqueue("async");
+       if (wq == NULL) {
+               err = -ENOMEM;
+               dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+               goto qlcnic_destroy_trans_wq;
+       }
+
+       bc->bc_async_wq =  wq;
+       INIT_LIST_HEAD(&bc->async_cmd_list);
+       INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+       spin_lock_init(&bc->queue_lock);
+       bc->adapter = adapter;
+
+       for (i = 0; i < num_vfs; i++) {
+               vf = &sriov->vf_info[i];
+               vf->adapter = adapter;
+               vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+               mutex_init(&vf->send_cmd_lock);
+               spin_lock_init(&vf->vlan_list_lock);
+               INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+               INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+               spin_lock_init(&vf->rcv_act.lock);
+               spin_lock_init(&vf->rcv_pend.lock);
+               init_completion(&vf->ch_free_cmpl);
+
+               INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
+
+               if (qlcnic_sriov_pf_check(adapter)) {
+                       vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+                       if (!vp) {
+                               err = -ENOMEM;
+                               goto qlcnic_destroy_async_wq;
+                       }
+                       sriov->vf_info[i].vp = vp;
+                       vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+                       vp->max_tx_bw = MAX_BW;
+                       vp->min_tx_bw = MIN_BW;
+                       vp->spoofchk = false;
+                       random_ether_addr(vp->mac);
+                       dev_info(&adapter->pdev->dev,
+                                "MAC Address %pM is configured for VF %d\n",
+                                vp->mac, i);
+               }
+       }
+
+       return 0;
+
+qlcnic_destroy_async_wq:
+       destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+       destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+       kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+       kfree(adapter->ahw->sriov);
+       return err;
+}
+
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
+{
+       struct qlcnic_bc_trans *trans;
+       struct qlcnic_cmd_args cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&t_list->lock, flags);
+
+       while (!list_empty(&t_list->wait_list)) {
+               trans = list_first_entry(&t_list->wait_list,
+                                        struct qlcnic_bc_trans, list);
+               list_del(&trans->list);
+               t_list->count--;
+               cmd.req.arg = (u32 *)trans->req_pay;
+               cmd.rsp.arg = (u32 *)trans->rsp_pay;
+               qlcnic_free_mbx_args(&cmd);
+               qlcnic_sriov_cleanup_transaction(trans);
+       }
+
+       spin_unlock_irqrestore(&t_list->lock, flags);
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_back_channel *bc = &sriov->bc;
+       struct qlcnic_vf_info *vf;
+       int i;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return;
+
+       qlcnic_sriov_cleanup_async_list(bc);
+       destroy_workqueue(bc->bc_async_wq);
+
+       for (i = 0; i < sriov->num_vfs; i++) {
+               vf = &sriov->vf_info[i];
+               qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+               cancel_work_sync(&vf->trans_work);
+               qlcnic_sriov_cleanup_list(&vf->rcv_act);
+       }
+
+       destroy_workqueue(bc->bc_trans_wq);
+
+       for (i = 0; i < sriov->num_vfs; i++)
+               kfree(sriov->vf_info[i].vp);
+
+       kfree(sriov->vf_info);
+       kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+       qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+       __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+       if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+               return;
+
+       qlcnic_sriov_free_vlans(adapter);
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_sriov_pf_cleanup(adapter);
+
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+                                   u32 *pay, u8 pci_func, u8 size)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       struct qlcnic_cmd_args cmd;
+       unsigned long timeout;
+       int err;
+
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+       cmd.hdr = hdr;
+       cmd.pay = pay;
+       cmd.pay_size = size;
+       cmd.func_num = pci_func;
+       cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+       cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+       err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+                       ahw->op_mode);
+               return err;
+       }
+
+       if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+                       ahw->op_mode);
+               flush_workqueue(mbx->work_q);
+       }
+
+       return cmd.rsp_opcode;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+       adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+       adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+       adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+       adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+       adapter->num_txd = MAX_CMD_DESCRIPTORS;
+       adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_info *npar_info, u16 vport_id)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
+       int err;
+       u32 status;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = vport_id << 16 | 0x1;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to get vport info, err=%d\n", err);
+               qlcnic_free_mbx_args(&cmd);
+               return err;
+       }
+
+       status = cmd.rsp.arg[2] & 0xffff;
+       if (status & BIT_0)
+               npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
+       if (status & BIT_1)
+               npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
+       if (status & BIT_2)
+               npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
+       if (status & BIT_3)
+               npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
+       if (status & BIT_4)
+               npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
+       if (status & BIT_5)
+               npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
+       if (status & BIT_6)
+               npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
+       if (status & BIT_7)
+               npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
+       if (status & BIT_8)
+               npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
+       if (status & BIT_9)
+               npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
+
+       npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
+       npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
+       npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+       npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
+
+       dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
+                "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+                "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+                "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+                "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+                "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
+                npar_info->min_tx_bw, npar_info->max_tx_bw,
+                npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
+                npar_info->max_rx_mcast_mac_filters,
+                npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+                npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+                npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+                npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+                npar_info->max_remote_ipv6_addrs);
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_cmd_args *cmd)
+{
+       adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
+       adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+       return 0;
+}
+
+static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       int i, num_vlans;
+       u16 *vlans;
+
+       if (sriov->allowed_vlans)
+               return 0;
+
+       sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+       sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+       dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+                sriov->num_allowed_vlans);
+
+       qlcnic_sriov_alloc_vlans(adapter);
+
+       if (!sriov->any_vlan)
+               return 0;
+
+       num_vlans = sriov->num_allowed_vlans;
+       sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
+       if (!sriov->allowed_vlans)
+               return -ENOMEM;
+
+       vlans = (u16 *)&cmd->rsp.arg[3];
+       for (i = 0; i < num_vlans; i++)
+               sriov->allowed_vlans[i] = vlans[i];
+
+       return 0;
+}
+
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_cmd_args cmd;
+       int ret = 0;
+
+       memset(&cmd, 0, sizeof(cmd));
+       ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret) {
+               dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
+                       ret);
+       } else {
+               sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
+               switch (sriov->vlan_mode) {
+               case QLC_GUEST_VLAN_MODE:
+                       ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
+                       break;
+               case QLC_PVID_MODE:
+                       ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+                       break;
+               }
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+       return ret;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_info nic_info;
+       int err;
+
+       err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
+       if (err)
+               return err;
+
+       ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
+       err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+       if (err)
+               return -EIO;
+
+       if (qlcnic_83xx_get_port_info(adapter))
+               return -EIO;
+
+       qlcnic_sriov_vf_cfg_buff_desc(adapter);
+       adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+       dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+                adapter->ahw->fw_hal_version);
+
+       ahw->physical_port = (u8) nic_info.phys_port;
+       ahw->switch_mode = nic_info.switch_mode;
+       ahw->max_mtu = nic_info.max_mtu;
+       ahw->op_mode = nic_info.op_mode;
+       ahw->capabilities = nic_info.capabilities;
+       return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
+                                int pci_using_dac)
+{
+       int err;
+
+       adapter->flags |= QLCNIC_VLAN_FILTERING;
+       adapter->ahw->total_nic_func = 1;
+       INIT_LIST_HEAD(&adapter->vf_mc_list);
+       if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+               dev_warn(&adapter->pdev->dev,
+                        "Device does not support MSI interrupts\n");
+
+       /* compute and set default and max tx/sds rings */
+       qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+       qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+       err = qlcnic_setup_intr(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+               goto err_out_disable_msi;
+       }
+
+       err = qlcnic_83xx_setup_mbx_intr(adapter);
+       if (err)
+               goto err_out_disable_msi;
+
+       err = qlcnic_sriov_init(adapter, 1);
+       if (err)
+               goto err_out_disable_mbx_intr;
+
+       err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+       if (err)
+               goto err_out_cleanup_sriov;
+
+       err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+       if (err)
+               goto err_out_disable_bc_intr;
+
+       err = qlcnic_sriov_vf_init_driver(adapter);
+       if (err)
+               goto err_out_send_channel_term;
+
+       err = qlcnic_sriov_get_vf_acl(adapter);
+       if (err)
+               goto err_out_send_channel_term;
+
+       err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+       if (err)
+               goto err_out_send_channel_term;
+
+       pci_set_drvdata(adapter->pdev, adapter);
+       dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+                adapter->netdev->name);
+
+       qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+                            adapter->ahw->idc.delay);
+       return 0;
+
+err_out_send_channel_term:
+       qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+       __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+       qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+       qlcnic_teardown_intr(adapter);
+       return err;
+}
+
+static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
+{
+       u32 state;
+
+       do {
+               msleep(20);
+               if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
+                       return -EIO;
+               state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+       } while (state != QLC_83XX_IDC_DEV_READY);
+
+       return 0;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err;
+
+       set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
+       ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+       ahw->reset_context = 0;
+       adapter->fw_fail_cnt = 0;
+       ahw->msix_supported = 1;
+       adapter->need_fw_reset = 0;
+       adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+       err = qlcnic_sriov_check_dev_ready(adapter);
+       if (err)
+               return err;
+
+       err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+       if (err)
+               return err;
+
+       if (qlcnic_read_mac_addr(adapter))
+               dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+       INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+       dev_info(&adapter->pdev->dev,
+                "HAL Version: %d Non Privileged SRIOV function\n",
+                ahw->fw_hal_version);
+       adapter->nic_ops = &qlcnic_sriov_vf_ops;
+       set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+       return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+       ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
+       ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
+       ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+       u32 pay_size;
+
+       pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+       if (pay_size)
+               pay_size = QLC_BC_PAYLOAD_SZ;
+       else
+               pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+       return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+       struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+       u8 i;
+
+       if (qlcnic_sriov_vf_check(adapter))
+               return 0;
+
+       for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+               if (vf_info[i].pci_func == pci_func)
+                       return i;
+       }
+
+       return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+       *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+       if (!*trans)
+               return -ENOMEM;
+
+       init_completion(&(*trans)->resp_cmpl);
+       return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+                                           u32 size)
+{
+       *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+       if (!*hdr)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+       const struct qlcnic_mailbox_metadata *mbx_tbl;
+       int i, size;
+
+       mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+       size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+       for (i = 0; i < size; i++) {
+               if (type == mbx_tbl[i].cmd) {
+                       mbx->op_type = QLC_BC_CMD;
+                       mbx->req.num = mbx_tbl[i].in_args;
+                       mbx->rsp.num = mbx_tbl[i].out_args;
+                       mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+                                              GFP_ATOMIC);
+                       if (!mbx->req.arg)
+                               return -ENOMEM;
+                       mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+                                              GFP_ATOMIC);
+                       if (!mbx->rsp.arg) {
+                               kfree(mbx->req.arg);
+                               mbx->req.arg = NULL;
+                               return -ENOMEM;
+                       }
+                       mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+                                          (3 << 29));
+                       mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
+                       return 0;
+               }
+       }
+       return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+                                      struct qlcnic_cmd_args *cmd,
+                                      u16 seq, u8 msg_type)
+{
+       struct qlcnic_bc_hdr *hdr;
+       int i;
+       u32 num_regs, bc_pay_sz;
+       u16 remainder;
+       u8 cmd_op, num_frags, t_num_frags;
+
+       bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+       if (msg_type == QLC_BC_COMMAND) {
+               trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+               trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+               num_regs = cmd->req.num;
+               trans->req_pay_size = (num_regs * 4);
+               num_regs = cmd->rsp.num;
+               trans->rsp_pay_size = (num_regs * 4);
+               cmd_op = cmd->req.arg[0] & 0xff;
+               remainder = (trans->req_pay_size) % (bc_pay_sz);
+               num_frags = (trans->req_pay_size) / (bc_pay_sz);
+               if (remainder)
+                       num_frags++;
+               t_num_frags = num_frags;
+               if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+                       return -ENOMEM;
+               remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+               num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+               if (remainder)
+                       num_frags++;
+               if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+                       return -ENOMEM;
+               num_frags  = t_num_frags;
+               hdr = trans->req_hdr;
+       }  else {
+               cmd->req.arg = (u32 *)trans->req_pay;
+               cmd->rsp.arg = (u32 *)trans->rsp_pay;
+               cmd_op = cmd->req.arg[0] & 0xff;
+               cmd->cmd_op = cmd_op;
+               remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+               num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+               if (remainder)
+                       num_frags++;
+               cmd->req.num = trans->req_pay_size / 4;
+               cmd->rsp.num = trans->rsp_pay_size / 4;
+               hdr = trans->rsp_hdr;
+               cmd->op_type = trans->req_hdr->op_type;
+       }
+
+       trans->trans_id = seq;
+       trans->cmd_id = cmd_op;
+       for (i = 0; i < num_frags; i++) {
+               hdr[i].version = 2;
+               hdr[i].msg_type = msg_type;
+               hdr[i].op_type = cmd->op_type;
+               hdr[i].num_cmds = 1;
+               hdr[i].num_frags = num_frags;
+               hdr[i].frag_num = i + 1;
+               hdr[i].cmd_op = cmd_op;
+               hdr[i].seq_id = seq;
+       }
+       return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+       if (!trans)
+               return;
+       kfree(trans->req_hdr);
+       kfree(trans->rsp_hdr);
+       kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+                                   struct qlcnic_bc_trans *trans, u8 type)
+{
+       struct qlcnic_trans_list *t_list;
+       unsigned long flags;
+       int ret = 0;
+
+       if (type == QLC_BC_RESPONSE) {
+               t_list = &vf->rcv_act;
+               spin_lock_irqsave(&t_list->lock, flags);
+               t_list->count--;
+               list_del(&trans->list);
+               if (t_list->count > 0)
+                       ret = 1;
+               spin_unlock_irqrestore(&t_list->lock, flags);
+       }
+       if (type == QLC_BC_COMMAND) {
+               while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+                       msleep(100);
+               vf->send_cmd = NULL;
+               clear_bit(QLC_BC_VF_SEND, &vf->state);
+       }
+       return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+                                        struct qlcnic_vf_info *vf,
+                                        work_func_t func)
+{
+       if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+           vf->adapter->need_fw_reset)
+               return;
+
+       queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+       struct completion *cmpl = &trans->resp_cmpl;
+
+       if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+               trans->trans_state = QLC_END;
+       else
+               trans->trans_state = QLC_ABORT;
+
+       return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+                                           u8 type)
+{
+       if (type == QLC_BC_RESPONSE) {
+               trans->curr_rsp_frag++;
+               if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+                       trans->trans_state = QLC_INIT;
+               else
+                       trans->trans_state = QLC_END;
+       } else {
+               trans->curr_req_frag++;
+               if (trans->curr_req_frag < trans->req_hdr->num_frags)
+                       trans->trans_state = QLC_INIT;
+               else
+                       trans->trans_state = QLC_WAIT_FOR_RESP;
+       }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+                                              u8 type)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct completion *cmpl = &vf->ch_free_cmpl;
+
+       if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+               trans->trans_state = QLC_ABORT;
+               return;
+       }
+
+       clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+       qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+                                    u32 *hdr, u32 *pay, u32 size)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 fw_mbx;
+       u8 i, max = 2, hdr_size, j;
+
+       hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+       max = (size / sizeof(u32)) + hdr_size;
+
+       fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
+       for (i = 2, j = 0; j < hdr_size; i++, j++)
+               *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+       for (; j < max; i++, j++)
+               *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+       int ret = -EBUSY;
+       u32 timeout = 10000;
+
+       do {
+               if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+                       ret = 0;
+                       break;
+               }
+               mdelay(1);
+       } while (--timeout);
+
+       return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       u32 pay_size, hdr_size;
+       u32 *hdr, *pay;
+       int ret;
+       u8 pci_func = trans->func_id;
+
+       if (__qlcnic_sriov_issue_bc_post(vf))
+               return -EBUSY;
+
+       if (type == QLC_BC_COMMAND) {
+               hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+               pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+               hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+               pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+                                                      trans->curr_req_frag);
+               pay_size = (pay_size / sizeof(u32));
+       } else {
+               hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+               pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+               hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+               pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+                                                      trans->curr_rsp_frag);
+               pay_size = (pay_size / sizeof(u32));
+       }
+
+       ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+                                      pci_func, pay_size);
+       return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+                                     struct qlcnic_vf_info *vf, u8 type)
+{
+       bool flag = true;
+       int err = -EIO;
+
+       while (flag) {
+               if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+                   vf->adapter->need_fw_reset)
+                       trans->trans_state = QLC_ABORT;
+
+               switch (trans->trans_state) {
+               case QLC_INIT:
+                       trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+                       if (qlcnic_sriov_issue_bc_post(trans, type))
+                               trans->trans_state = QLC_ABORT;
+                       break;
+               case QLC_WAIT_FOR_CHANNEL_FREE:
+                       qlcnic_sriov_wait_for_channel_free(trans, type);
+                       break;
+               case QLC_WAIT_FOR_RESP:
+                       qlcnic_sriov_wait_for_resp(trans);
+                       break;
+               case QLC_END:
+                       err = 0;
+                       flag = false;
+                       break;
+               case QLC_ABORT:
+                       err = -EIO;
+                       flag = false;
+                       clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+                       break;
+               default:
+                       err = -EIO;
+                       flag = false;
+               }
+       }
+       return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_bc_trans *trans, int pci_func)
+{
+       struct qlcnic_vf_info *vf;
+       int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+       if (index < 0)
+               return -EIO;
+
+       vf = &adapter->ahw->sriov->vf_info[index];
+       trans->vf = vf;
+       trans->func_id = pci_func;
+
+       if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+               if (qlcnic_sriov_pf_check(adapter))
+                       return -EIO;
+               if (qlcnic_sriov_vf_check(adapter) &&
+                   trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+                       return -EIO;
+       }
+
+       mutex_lock(&vf->send_cmd_lock);
+       vf->send_cmd = trans;
+       err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+       qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+       mutex_unlock(&vf->send_cmd_lock);
+       return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_bc_trans *trans,
+                                         struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+       if (qlcnic_sriov_pf_check(adapter)) {
+               qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+               return;
+       }
+#endif
+       cmd->rsp.arg[0] |= (0x9 << 25);
+       return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+       struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+                                                trans_work);
+       struct qlcnic_bc_trans *trans = NULL;
+       struct qlcnic_adapter *adapter  = vf->adapter;
+       struct qlcnic_cmd_args cmd;
+       u8 req;
+
+       if (adapter->need_fw_reset)
+               return;
+
+       if (test_bit(QLC_BC_VF_FLR, &vf->state))
+               return;
+
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+       trans = list_first_entry(&vf->rcv_act.wait_list,
+                                struct qlcnic_bc_trans, list);
+       adapter = vf->adapter;
+
+       if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+                                       QLC_BC_RESPONSE))
+               goto cleanup_trans;
+
+       __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+       trans->trans_state = QLC_INIT;
+       __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+       qlcnic_free_mbx_args(&cmd);
+       req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+       qlcnic_sriov_cleanup_transaction(trans);
+       if (req)
+               qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+                                            qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+                                       struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_bc_trans *trans;
+       u32 pay_size;
+
+       if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+               return;
+
+       trans = vf->send_cmd;
+
+       if (trans == NULL)
+               goto clear_send;
+
+       if (trans->trans_id != hdr->seq_id)
+               goto clear_send;
+
+       pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+                                              trans->curr_rsp_frag);
+       qlcnic_sriov_pull_bc_msg(vf->adapter,
+                                (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+                                (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+                                pay_size);
+       if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+               goto clear_send;
+
+       complete(&trans->resp_cmpl);
+
+clear_send:
+       clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+                               struct qlcnic_vf_info *vf,
+                               struct qlcnic_bc_trans *trans)
+{
+       struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+       t_list->count++;
+       list_add_tail(&trans->list, &t_list->wait_list);
+       if (t_list->count == 1)
+               qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+                                            qlcnic_sriov_process_bc_cmd);
+       return 0;
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+                                    struct qlcnic_vf_info *vf,
+                                    struct qlcnic_bc_trans *trans)
+{
+       struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+       spin_lock(&t_list->lock);
+
+       __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+       spin_unlock(&t_list->lock);
+       return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+                                             struct qlcnic_vf_info *vf,
+                                             struct qlcnic_bc_hdr *hdr)
+{
+       struct qlcnic_bc_trans *trans = NULL;
+       struct list_head *node;
+       u32 pay_size, curr_frag;
+       u8 found = 0, active = 0;
+
+       spin_lock(&vf->rcv_pend.lock);
+       if (vf->rcv_pend.count > 0) {
+               list_for_each(node, &vf->rcv_pend.wait_list) {
+                       trans = list_entry(node, struct qlcnic_bc_trans, list);
+                       if (trans->trans_id == hdr->seq_id) {
+                               found = 1;
+                               break;
+                       }
+               }
+       }
+
+       if (found) {
+               curr_frag = trans->curr_req_frag;
+               pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+                                                      curr_frag);
+               qlcnic_sriov_pull_bc_msg(vf->adapter,
+                                        (u32 *)(trans->req_hdr + curr_frag),
+                                        (u32 *)(trans->req_pay + curr_frag),
+                                        pay_size);
+               trans->curr_req_frag++;
+               if (trans->curr_req_frag >= hdr->num_frags) {
+                       vf->rcv_pend.count--;
+                       list_del(&trans->list);
+                       active = 1;
+               }
+       }
+       spin_unlock(&vf->rcv_pend.lock);
+
+       if (active)
+               if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+                       qlcnic_sriov_cleanup_transaction(trans);
+
+       return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+                                      struct qlcnic_bc_hdr *hdr,
+                                      struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_bc_trans *trans;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       struct qlcnic_cmd_args cmd;
+       u32 pay_size;
+       int err;
+       u8 cmd_op;
+
+       if (adapter->need_fw_reset)
+               return;
+
+       if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+           hdr->op_type != QLC_BC_CMD &&
+           hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+               return;
+
+       if (hdr->frag_num > 1) {
+               qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+               return;
+       }
+
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+       cmd_op = hdr->cmd_op;
+       if (qlcnic_sriov_alloc_bc_trans(&trans))
+               return;
+
+       if (hdr->op_type == QLC_BC_CMD)
+               err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+       else
+               err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+       if (err) {
+               qlcnic_sriov_cleanup_transaction(trans);
+               return;
+       }
+
+       cmd.op_type = hdr->op_type;
+       if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+                                       QLC_BC_COMMAND)) {
+               qlcnic_free_mbx_args(&cmd);
+               qlcnic_sriov_cleanup_transaction(trans);
+               return;
+       }
+
+       pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+                                        trans->curr_req_frag);
+       qlcnic_sriov_pull_bc_msg(vf->adapter,
+                                (u32 *)(trans->req_hdr + trans->curr_req_frag),
+                                (u32 *)(trans->req_pay + trans->curr_req_frag),
+                                pay_size);
+       trans->func_id = vf->pci_func;
+       trans->vf = vf;
+       trans->trans_id = hdr->seq_id;
+       trans->curr_req_frag++;
+
+       if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
+               return;
+
+       if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+               if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+                       qlcnic_free_mbx_args(&cmd);
+                       qlcnic_sriov_cleanup_transaction(trans);
+               }
+       } else {
+               spin_lock(&vf->rcv_pend.lock);
+               list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+               vf->rcv_pend.count++;
+               spin_unlock(&vf->rcv_pend.lock);
+       }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+                                         struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_bc_hdr hdr;
+       u32 *ptr = (u32 *)&hdr;
+       u8 msg_type, i;
+
+       for (i = 2; i < 6; i++)
+               ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+       msg_type = hdr.msg_type;
+
+       switch (msg_type) {
+       case QLC_BC_COMMAND:
+               qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+               break;
+       case QLC_BC_RESPONSE:
+               qlcnic_sriov_handle_bc_resp(&hdr, vf);
+               break;
+       }
+}
+
+static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
+                                         struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_adapter *adapter = vf->adapter;
+
+       if (qlcnic_sriov_pf_check(adapter))
+               qlcnic_sriov_pf_handle_flr(sriov, vf);
+       else
+               dev_err(&adapter->pdev->dev,
+                       "Invalid event to VF. VF should not get FLR event\n");
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+       struct qlcnic_vf_info *vf;
+       struct qlcnic_sriov *sriov;
+       int index;
+       u8 pci_func;
+
+       sriov = adapter->ahw->sriov;
+       pci_func = qlcnic_sriov_target_func_id(event);
+       index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+       if (index < 0)
+               return;
+
+       vf = &sriov->vf_info[index];
+       vf->pci_func = pci_func;
+
+       if (qlcnic_sriov_channel_free_check(event))
+               complete(&vf->ch_free_cmpl);
+
+       if (qlcnic_sriov_flr_check(event)) {
+               qlcnic_sriov_handle_flr_event(sriov, vf);
+               return;
+       }
+
+       if (qlcnic_sriov_bc_msg_check(event))
+               qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+               return 0;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+               return -ENOMEM;
+
+       if (enable)
+               cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+       err = qlcnic_83xx_issue_cmd(adapter, &cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to %s bc events, err=%d\n",
+                       (enable ? "enable" : "disable"), err);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
+                                    struct qlcnic_bc_trans *trans)
+{
+       u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
+       u32 state;
+
+       state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+       if (state == QLC_83XX_IDC_DEV_READY) {
+               msleep(20);
+               clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
+               trans->trans_state = QLC_INIT;
+               if (++adapter->fw_fail_cnt > max)
+                       return -EIO;
+               else
+                       return 0;
+       }
+
+       return -EIO;
+}
+
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_bc_trans *trans;
+       int err;
+       u32 rsp_data, opcode, mbx_err_code, rsp;
+       u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+       u8 func = ahw->pci_func;
+
+       rsp = qlcnic_sriov_alloc_bc_trans(&trans);
+       if (rsp)
+               goto free_cmd;
+
+       rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
+       if (rsp)
+               goto cleanup_transaction;
+
+retry:
+       if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+               rsp = -EIO;
+               QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+                     QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
+               goto err_out;
+       }
+
+       err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
+       if (err) {
+               dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
+                       (cmd->req.arg[0] & 0xffff), func);
+               rsp = QLCNIC_RCODE_TIMEOUT;
+
+               /* After adapter reset PF driver may take some time to
+                * respond to VF's request. Retry request till maximum retries.
+                */
+               if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+                   !qlcnic_sriov_retry_bc_cmd(adapter, trans))
+                       goto retry;
+
+               goto err_out;
+       }
+
+       rsp_data = cmd->rsp.arg[0];
+       mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+       opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+       if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+           (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+               rsp = QLCNIC_RCODE_SUCCESS;
+       } else {
+               if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+                       rsp = QLCNIC_RCODE_SUCCESS;
+               } else {
+                       rsp = mbx_err_code;
+                       if (!rsp)
+                               rsp = 1;
+
+                       dev_err(dev,
+                               "MBX command 0x%x failed with err:0x%x for VF %d\n",
+                               opcode, mbx_err_code, func);
+               }
+       }
+
+err_out:
+       if (rsp == QLCNIC_RCODE_TIMEOUT) {
+               ahw->reset_context = 1;
+               adapter->need_fw_reset = 1;
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+       }
+
+cleanup_transaction:
+       qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+       }
+
+       return rsp;
+}
+
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+               return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+       else
+               return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+       int ret;
+
+       memset(&cmd, 0, sizeof(cmd));
+       if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+               return -ENOMEM;
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+                       ret);
+               goto out;
+       }
+
+       cmd_op = (cmd.rsp.arg[0] & 0xff);
+       if (cmd.rsp.arg[0] >> 25 == 2)
+               return 2;
+       if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+               set_bit(QLC_BC_VF_STATE, &vf->state);
+       else
+               clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return ret;
+}
+
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
+                                 enum qlcnic_mac_type mac_type)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf;
+       u16 vlan_id;
+       int i;
+
+       vf = &adapter->ahw->sriov->vf_info[0];
+
+       if (!qlcnic_sriov_check_any_vlan(vf)) {
+               qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+       } else {
+               spin_lock(&vf->vlan_list_lock);
+               for (i = 0; i < sriov->num_allowed_vlans; i++) {
+                       vlan_id = vf->sriov_vlans[i];
+                       if (vlan_id)
+                               qlcnic_nic_add_mac(adapter, mac, vlan_id,
+                                                  mac_type);
+               }
+               spin_unlock(&vf->vlan_list_lock);
+               if (qlcnic_84xx_check(adapter))
+                       qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+       }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+       struct list_head *head = &bc->async_cmd_list;
+       struct qlcnic_async_cmd *entry;
+
+       flush_workqueue(bc->bc_async_wq);
+       cancel_work_sync(&bc->vf_async_work);
+
+       spin_lock(&bc->queue_lock);
+       while (!list_empty(head)) {
+               entry = list_entry(head->next, struct qlcnic_async_cmd,
+                                  list);
+               list_del(&entry->list);
+               kfree(entry->cmd);
+               kfree(entry);
+       }
+       spin_unlock(&bc->queue_lock);
+}
+
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       static const u8 bcast_addr[ETH_ALEN] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+       struct netdev_hw_addr *ha;
+       u32 mode = VPORT_MISS_MODE_DROP;
+
+       if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+               return;
+
+       if (netdev->flags & IFF_PROMISC) {
+               if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+                       mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if ((netdev->flags & IFF_ALLMULTI) ||
+                  (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+               mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+       } else {
+               qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
+               if (!netdev_mc_empty(netdev)) {
+                       qlcnic_flush_mcast_mac(adapter);
+                       netdev_for_each_mc_addr(ha, netdev)
+                               qlcnic_vf_add_mc_list(netdev, ha->addr,
+                                                     QLCNIC_MULTICAST_MAC);
+               }
+       }
+
+       /* configure unicast MAC address, if there is not sufficient space
+        * to store all the unicast addresses then enable promiscuous mode
+        */
+       if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if (!netdev_uc_empty(netdev)) {
+               netdev_for_each_uc_addr(ha, netdev)
+                       qlcnic_vf_add_mc_list(netdev, ha->addr,
+                                             QLCNIC_UNICAST_MAC);
+       }
+
+       if (adapter->pdev->is_virtfn) {
+               if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+                   !adapter->fdb_mac_learn) {
+                       qlcnic_alloc_lb_filters_mem(adapter);
+                       adapter->drv_mac_learn = 1;
+                       adapter->rx_mac_learn = true;
+               } else {
+                       adapter->drv_mac_learn = 0;
+                       adapter->rx_mac_learn = false;
+               }
+       }
+
+       qlcnic_nic_set_promisc(adapter, mode);
+}
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
+{
+       struct qlcnic_async_cmd *entry, *tmp;
+       struct qlcnic_back_channel *bc;
+       struct qlcnic_cmd_args *cmd;
+       struct list_head *head;
+       LIST_HEAD(del_list);
+
+       bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+       head = &bc->async_cmd_list;
+
+       spin_lock(&bc->queue_lock);
+       list_splice_init(head, &del_list);
+       spin_unlock(&bc->queue_lock);
+
+       list_for_each_entry_safe(entry, tmp, &del_list, list) {
+               list_del(&entry->list);
+               cmd = entry->cmd;
+               __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+               kfree(entry);
+       }
+
+       if (!list_empty(head))
+               queue_work(bc->bc_async_wq, &bc->vf_async_work);
+
+       return;
+}
+
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+                            struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_async_cmd *entry = NULL;
+
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+       if (!entry)
+               return NULL;
+
+       entry->cmd = cmd;
+
+       spin_lock(&bc->queue_lock);
+       list_add_tail(&entry->list, &bc->async_cmd_list);
+       spin_unlock(&bc->queue_lock);
+
+       return entry;
+}
+
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_async_cmd *entry = NULL;
+
+       entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+       if (!entry) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+               return;
+       }
+
+       queue_work(bc->bc_async_wq, &bc->vf_async_work);
+}
+
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+                                       struct qlcnic_cmd_args *cmd)
+{
+
+       struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+       if (adapter->need_fw_reset)
+               return -EIO;
+
+       qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
+       return 0;
+}
+
+static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
+{
+       int err;
+
+       adapter->need_fw_reset = 0;
+       qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+       err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+       if (err)
+               return err;
+
+       err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+       if (err)
+               goto err_out_cleanup_bc_intr;
+
+       err = qlcnic_sriov_vf_init_driver(adapter);
+       if (err)
+               goto err_out_term_channel;
+
+       return 0;
+
+err_out_term_channel:
+       qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_cleanup_bc_intr:
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+       return err;
+}
+
+static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (netif_running(netdev)) {
+               if (!qlcnic_up(adapter, netdev))
+                       qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+       netif_device_attach(netdev);
+}
+
+static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
+       struct net_device *netdev = adapter->netdev;
+       u8 i, max_ints = ahw->num_msix - 1;
+
+       netif_device_detach(netdev);
+       qlcnic_83xx_detach_mailbox_work(adapter);
+       qlcnic_83xx_disable_mbx_intr(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       for (i = 0; i < max_ints; i++) {
+               intr_tbl[i].id = i;
+               intr_tbl[i].enabled = 0;
+               intr_tbl[i].src = 0;
+       }
+       ahw->reset_context = 0;
+}
+
+static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       u8 func = ahw->pci_func;
+       u32 state;
+
+       if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+           (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
+               if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+                       qlcnic_sriov_vf_attach(adapter);
+                       adapter->fw_fail_cnt = 0;
+                       dev_info(dev,
+                                "%s: Reinitialization of VF 0x%x done after FW reset\n",
+                                __func__, func);
+               } else {
+                       dev_err(dev,
+                               "%s: Reinitialization of VF 0x%x failed after FW reset\n",
+                               __func__, func);
+                       state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+                       dev_info(dev, "Current state 0x%x after FW reset\n",
+                                state);
+               }
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       u8 func = ahw->pci_func;
+       u32 state;
+
+       adapter->reset_ctx_cnt++;
+
+       /* Skip the context reset and check if FW is hung */
+       if (adapter->reset_ctx_cnt < 3) {
+               adapter->need_fw_reset = 1;
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               dev_info(dev,
+                        "Resetting context, wait here to check if FW is in failed state\n");
+               return 0;
+       }
+
+       /* Check if number of resets exceed the threshold.
+        * If it exceeds the threshold just fail the VF.
+        */
+       if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
+               clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+               adapter->tx_timeo_cnt = 0;
+               adapter->fw_fail_cnt = 0;
+               adapter->reset_ctx_cnt = 0;
+               qlcnic_sriov_vf_detach(adapter);
+               dev_err(dev,
+                       "Device context resets have exceeded the threshold, device interface will be shutdown\n");
+               return -EIO;
+       }
+
+       dev_info(dev, "Resetting context of VF 0x%x\n", func);
+       dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
+                __func__, adapter->reset_ctx_cnt, func);
+       set_bit(__QLCNIC_RESETTING, &adapter->state);
+       adapter->need_fw_reset = 1;
+       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+       qlcnic_sriov_vf_detach(adapter);
+       adapter->need_fw_reset = 0;
+
+       if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+               qlcnic_sriov_vf_attach(adapter);
+               adapter->tx_timeo_cnt = 0;
+               adapter->reset_ctx_cnt = 0;
+               adapter->fw_fail_cnt = 0;
+               dev_info(dev, "Done resetting context for VF 0x%x\n", func);
+       } else {
+               dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
+                       __func__, func);
+               state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+               dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int ret = 0;
+
+       if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
+               ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
+       else if (ahw->reset_context)
+               ret = qlcnic_sriov_vf_handle_context_reset(adapter);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return ret;
+}
+
+static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+       dev_err(&adapter->pdev->dev, "Device is in failed state\n");
+       if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
+               qlcnic_sriov_vf_detach(adapter);
+
+       clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return -EIO;
+}
+
+static int
+qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+       dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
+       if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+               set_bit(__QLCNIC_RESETTING, &adapter->state);
+               adapter->tx_timeo_cnt = 0;
+               adapter->reset_ctx_cnt = 0;
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               qlcnic_sriov_vf_detach(adapter);
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+       u8 func = adapter->ahw->pci_func;
+
+       if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+               dev_err(&adapter->pdev->dev,
+                       "Firmware hang detected by VF 0x%x\n", func);
+               set_bit(__QLCNIC_RESETTING, &adapter->state);
+               adapter->tx_timeo_cnt = 0;
+               adapter->reset_ctx_cnt = 0;
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               qlcnic_sriov_vf_detach(adapter);
+       }
+       return 0;
+}
+
+static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+       dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
+       return 0;
+}
+
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+       if (adapter->fhash.fnum)
+               qlcnic_prune_lb_filters(adapter);
+}
+
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
+{
+       struct qlcnic_adapter *adapter;
+       struct qlc_83xx_idc *idc;
+       int ret = 0;
+
+       adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+       idc = &adapter->ahw->idc;
+       idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+       switch (idc->curr_state) {
+       case QLC_83XX_IDC_DEV_READY:
+               ret = qlcnic_sriov_vf_idc_ready_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_NEED_RESET:
+       case QLC_83XX_IDC_DEV_INIT:
+               ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+               ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_FAILED:
+               ret = qlcnic_sriov_vf_idc_failed_state(adapter);
+               break;
+       case QLC_83XX_IDC_DEV_QUISCENT:
+               break;
+       default:
+               ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
+       }
+
+       idc->prev_state = idc->curr_state;
+       qlcnic_sriov_vf_periodic_tasks(adapter);
+
+       if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
+               qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+                                    idc->delay);
+}
+
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+       while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               msleep(20);
+
+       clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+                                     struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+       int i, err = -EINVAL;
+
+       if (!vf->sriov_vlans)
+               return err;
+
+       spin_lock_bh(&vf->vlan_list_lock);
+
+       for (i = 0; i < sriov->num_allowed_vlans; i++) {
+               if (vf->sriov_vlans[i] == vlan_id) {
+                       err = 0;
+                       break;
+               }
+       }
+
+       spin_unlock_bh(&vf->vlan_list_lock);
+       return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+                                          struct qlcnic_vf_info *vf)
+{
+       int err = 0;
+
+       spin_lock_bh(&vf->vlan_list_lock);
+
+       if (vf->num_vlan >= sriov->num_allowed_vlans)
+               err = -EINVAL;
+
+       spin_unlock_bh(&vf->vlan_list_lock);
+       return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
+                                         u16 vid, u8 enable)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf;
+       bool vlan_exist;
+       u8 allowed = 0;
+       int i;
+
+       vf = &adapter->ahw->sriov->vf_info[0];
+       vlan_exist = qlcnic_sriov_check_any_vlan(vf);
+       if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
+               return -EINVAL;
+
+       if (enable) {
+               if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+                       return -EINVAL;
+
+               if (qlcnic_sriov_validate_num_vlans(sriov, vf))
+                       return -EINVAL;
+
+               if (sriov->any_vlan) {
+                       for (i = 0; i < sriov->num_allowed_vlans; i++) {
+                               if (sriov->allowed_vlans[i] == vid)
+                                       allowed = 1;
+                       }
+
+                       if (!allowed)
+                               return -EINVAL;
+               }
+       } else {
+               if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+                                       enum qlcnic_vlan_operations opcode)
+{
+       struct qlcnic_adapter *adapter = vf->adapter;
+       struct qlcnic_sriov *sriov;
+
+       sriov = adapter->ahw->sriov;
+
+       if (!vf->sriov_vlans)
+               return;
+
+       spin_lock_bh(&vf->vlan_list_lock);
+
+       switch (opcode) {
+       case QLC_VLAN_ADD:
+               qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+               break;
+       case QLC_VLAN_DELETE:
+               qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+               break;
+       default:
+               netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+       }
+
+       spin_unlock_bh(&vf->vlan_list_lock);
+       return;
+}
+
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
+                                  u16 vid, u8 enable)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_vf_info *vf;
+       struct qlcnic_cmd_args cmd;
+       int ret;
+
+       memset(&cmd, 0, sizeof(cmd));
+       if (vid == 0)
+               return 0;
+
+       vf = &adapter->ahw->sriov->vf_info[0];
+       ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
+                                            QLCNIC_BC_CMD_CFG_GUEST_VLAN);
+       if (ret)
+               return ret;
+
+       cmd.req.arg[1] = (enable & 1) | vid << 16;
+
+       qlcnic_sriov_cleanup_async_list(&sriov->bc);
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to configure guest VLAN, err=%d\n", ret);
+       } else {
+               netif_addr_lock_bh(netdev);
+               qlcnic_free_mac_list(adapter);
+               netif_addr_unlock_bh(netdev);
+
+               if (enable)
+                       qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
+               else
+                       qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
+
+               netif_addr_lock_bh(netdev);
+               qlcnic_set_multi(netdev);
+               netif_addr_unlock_bh(netdev);
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+       return ret;
+}
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
+{
+       struct list_head *head = &adapter->mac_list;
+       struct qlcnic_mac_vlan_list *cur;
+
+       while (!list_empty(head)) {
+               cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+               qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+                                         QLCNIC_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+       qlcnic_83xx_disable_mbx_intr(adapter);
+       cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       return 0;
+}
+
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+       err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+       if (err)
+               return err;
+
+       err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+       if (!err) {
+               if (netif_running(netdev)) {
+                       err = qlcnic_up(adapter, netdev);
+                       if (!err)
+                               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+               }
+       }
+
+       netif_device_attach(netdev);
+       qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+                            idc->delay);
+       return err;
+}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf;
+       int i;
+
+       for (i = 0; i < sriov->num_vfs; i++) {
+               vf = &sriov->vf_info[i];
+               vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+                                         sizeof(*vf->sriov_vlans), GFP_KERNEL);
+       }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf;
+       int i;
+
+       for (i = 0; i < sriov->num_vfs; i++) {
+               vf = &sriov->vf_info[i];
+               kfree(vf->sriov_vlans);
+               vf->sriov_vlans = NULL;
+       }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+                             struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+       int i;
+
+       for (i = 0; i < sriov->num_allowed_vlans; i++) {
+               if (!vf->sriov_vlans[i]) {
+                       vf->sriov_vlans[i] = vlan_id;
+                       vf->num_vlan++;
+                       return;
+               }
+       }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+                             struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+       int i;
+
+       for (i = 0; i < sriov->num_allowed_vlans; i++) {
+               if (vf->sriov_vlans[i] == vlan_id) {
+                       vf->sriov_vlans[i] = 0;
+                       vf->num_vlan--;
+                       return;
+               }
+       }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+       bool err = false;
+
+       spin_lock_bh(&vf->vlan_list_lock);
+
+       if (vf->num_vlan)
+               err = true;
+
+       spin_unlock_bh(&vf->vlan_list_lock);
+       return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644 (file)
index 0000000..afd687e
--- /dev/null
@@ -0,0 +1,2048 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
+#define QLC_VF_MIN_TX_RATE     100
+#define QLC_VF_MAX_TX_RATE     9999
+#define QLC_MAC_OPCODE_MASK    0x7
+#define QLC_VF_FLOOD_BIT       BIT_16
+#define QLC_FLOOD_MODE         0x5
+#define QLC_SRIOV_ALLOW_VLAN0  BIT_19
+#define QLC_INTR_COAL_TYPE_MASK        0x7
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+       int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+       u32 cmd;
+       int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_info *npar_info,
+                                         u16 vport_id)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+               return -ENOMEM;
+
+       cmd.req.arg[1] = (vport_id << 16) | 0x1;
+       cmd.req.arg[2] = npar_info->bit_offsets;
+       cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+       cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+       cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+       cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+       cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+                        (npar_info->max_rx_ip_addr << 16);
+       cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+                        (npar_info->max_rx_status_rings << 16);
+       cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+                        (npar_info->max_rx_ques << 16);
+       cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+       cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+       cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to set vport info, err=%d\n", err);
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_info *info, u16 func)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_resources *res = &sriov->ff_max;
+       u16 num_macs = sriov->num_allowed_vlans + 1;
+       int ret = -EIO, vpid, id;
+       struct qlcnic_vport *vp;
+       u32 num_vfs, max, temp;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+       if (vpid < 0)
+               return -EINVAL;
+
+       num_vfs = sriov->num_vfs;
+       max = num_vfs + 1;
+       info->bit_offsets = 0xffff;
+       info->max_tx_ques = res->num_tx_queues / max;
+
+       if (qlcnic_83xx_pf_check(adapter))
+               num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
+
+       info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+
+       if (adapter->ahw->pci_func == func) {
+               info->min_tx_bw = 0;
+               info->max_tx_bw = MAX_BW;
+
+               temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+               info->max_rx_ucast_mac_filters = temp;
+               temp = res->num_tx_mac_filters - num_macs * num_vfs;
+               info->max_tx_mac_filters = temp;
+               temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+               temp = res->num_rx_mcast_mac_filters - temp;
+               info->max_rx_mcast_mac_filters = temp;
+
+               info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
+       } else {
+               id = qlcnic_sriov_func_to_index(adapter, func);
+               if (id < 0)
+                       return id;
+               vp = sriov->vf_info[id].vp;
+               info->min_tx_bw = vp->min_tx_bw;
+               info->max_tx_bw = vp->max_tx_bw;
+
+               info->max_rx_ucast_mac_filters = num_macs;
+               info->max_tx_mac_filters = num_macs;
+               temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+               info->max_rx_mcast_mac_filters = temp;
+
+               info->max_tx_ques = QLCNIC_SINGLE_RING;
+       }
+
+       info->max_rx_ip_addr = res->num_destip / max;
+       info->max_rx_status_rings = res->num_rx_status_rings / max;
+       info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+       info->max_rx_ques = res->num_rx_queues / max;
+       info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+       info->max_tx_vlan_keys = res->num_txvlan_keys;
+       info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+       info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+       ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+                                          struct qlcnic_info *info)
+{
+       struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+       ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+       ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+       ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+       ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+       ff_max->num_rx_queues = info->max_rx_ques;
+       ff_max->num_tx_queues = info->max_tx_ques;
+       ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+       ff_max->num_destip = info->max_rx_ip_addr;
+       ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+       ff_max->num_rx_status_rings = info->max_rx_status_rings;
+       ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+       ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_info *npar_info)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       int temp, total_fn;
+
+       temp = npar_info->max_rx_mcast_mac_filters;
+       total_fn = sriov->num_vfs + 1;
+
+       temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+       sriov->num_allowed_vlans = temp - 1;
+
+       if (qlcnic_83xx_pf_check(adapter))
+               sriov->num_allowed_vlans = 1;
+
+       netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+                   sriov->num_allowed_vlans);
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_info *npar_info)
+{
+       int err;
+       struct qlcnic_cmd_args cmd;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+               return -ENOMEM;
+
+       cmd.req.arg[1] = 0x2;
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to get PF info, err=%d\n", err);
+               goto out;
+       }
+
+       npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+       npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+       npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+       npar_info->max_tx_ques =  LSW(cmd.rsp.arg[3]);
+       npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+       npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+       npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+       npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+       npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+       npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+       npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+       npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+       npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+       npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+       npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+       qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
+       qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
+       dev_info(&adapter->pdev->dev,
+                "\n\ttotal_pf: %d,\n"
+                "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+                "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+                "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+                "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+                "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+                "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+                npar_info->total_pf, npar_info->total_rss_engines,
+                npar_info->max_vports, npar_info->max_tx_ques,
+                npar_info->max_tx_mac_filters,
+                npar_info->max_rx_mcast_mac_filters,
+                npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+                npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+                npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+                npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+                npar_info->max_remote_ipv6_addrs);
+
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+                                              u8 func)
+{
+       struct qlcnic_sriov  *sriov = adapter->ahw->sriov;
+       struct qlcnic_vport *vp;
+       int index;
+
+       if (adapter->ahw->pci_func == func) {
+               sriov->vp_handle = 0;
+       } else {
+               index = qlcnic_sriov_func_to_index(adapter, func);
+               if (index < 0)
+                       return;
+               vp = sriov->vf_info[index].vp;
+               vp->handle = 0;
+       }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+                                            u16 vport_handle, u8 func)
+{
+       struct qlcnic_sriov  *sriov = adapter->ahw->sriov;
+       struct qlcnic_vport *vp;
+       int index;
+
+       if (adapter->ahw->pci_func == func) {
+               sriov->vp_handle = vport_handle;
+       } else {
+               index = qlcnic_sriov_func_to_index(adapter, func);
+               if (index < 0)
+                       return;
+               vp = sriov->vf_info[index].vp;
+               vp->handle = vport_handle;
+       }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+                                           u8 func)
+{
+       struct qlcnic_sriov  *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf_info;
+       int index;
+
+       if (adapter->ahw->pci_func == func) {
+               return sriov->vp_handle;
+       } else {
+               index = qlcnic_sriov_func_to_index(adapter, func);
+               if (index >= 0) {
+                       vf_info = &sriov->vf_info[index];
+                       return vf_info->vp->handle;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+                                       u8 flag, u16 func)
+{
+       struct qlcnic_cmd_args cmd;
+       int ret;
+       int vpid;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+               return -ENOMEM;
+
+       if (flag) {
+               cmd.req.arg[3] = func << 8;
+       } else {
+               vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+               if (vpid < 0) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+       }
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed %s vport, err %d for func 0x%x\n",
+                       (flag ? "enable" : "disable"), ret, func);
+               goto out;
+       }
+
+       if (flag) {
+               vpid = cmd.rsp.arg[2] & 0xffff;
+               qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+       } else {
+               qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+       }
+
+out:
+       qlcnic_free_mbx_args(&cmd);
+       return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
+                                             u8 enable)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = 0x4;
+       if (enable) {
+               adapter->flags |= QLCNIC_VLAN_FILTERING;
+               cmd.req.arg[1] |= BIT_16;
+               if (qlcnic_84xx_check(adapter))
+                       cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+       } else {
+               adapter->flags &= ~QLCNIC_VLAN_FILTERING;
+       }
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to configure VLAN filtering, err=%d\n", err);
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Failed to configure VF Flood bit on PF, err=%d\n",
+                       err);
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+                                      u8 func, u8 enable)
+{
+       struct qlcnic_cmd_args cmd;
+       int err = -EIO;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+               return -ENOMEM;
+
+       cmd.req.arg[0] |= (3 << 29);
+       cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+       if (enable)
+               cmd.req.arg[1] |= BIT_0;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+
+       if (err != QLCNIC_RCODE_SUCCESS) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to enable sriov eswitch%d\n", err);
+               err = -EIO;
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_back_channel *bc = &sriov->bc;
+       int i;
+
+       for (i = 0; i < sriov->num_vfs; i++)
+               cancel_work_sync(&sriov->vf_info[i].flr_work);
+
+       destroy_workqueue(bc->bc_flr_wq);
+}
+
+static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+       struct workqueue_struct *wq;
+
+       wq = create_singlethread_workqueue("qlcnic-flr");
+       if (wq == NULL) {
+               dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
+               return -ENOMEM;
+       }
+
+       bc->bc_flr_wq =  wq;
+       return 0;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+       u8 func = adapter->ahw->pci_func;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return;
+
+       qlcnic_sriov_pf_del_flr_queue(adapter);
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+       qlcnic_sriov_pf_config_vport(adapter, 0, func);
+       qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+       qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+       __qlcnic_sriov_cleanup(adapter);
+       adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+       clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+       if (!qlcnic_sriov_pf_check(adapter))
+               return;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return;
+
+       pci_disable_sriov(adapter->pdev);
+       netdev_info(adapter->netdev,
+                   "SR-IOV is disabled successfully on port %d\n",
+                   adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (pci_vfs_assigned(adapter->pdev)) {
+               netdev_err(adapter->netdev,
+                          "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+                          adapter->portnum);
+               netdev_info(adapter->netdev,
+                           "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+                           adapter->portnum);
+               return -EPERM;
+       }
+
+       qlcnic_sriov_pf_disable(adapter);
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       qlcnic_sriov_free_vlans(adapter);
+
+       qlcnic_sriov_pf_cleanup(adapter);
+
+       /* After disabling SRIOV re-init the driver in default mode
+          configure opmode based on op_mode of function
+        */
+       if (qlcnic_83xx_configure_opmode(adapter)) {
+               rtnl_unlock();
+               return -EIO;
+       }
+
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+
+       rtnl_unlock();
+       return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_info nic_info, pf_info, vp_info;
+       int err;
+       u8 func = ahw->pci_func;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return 0;
+
+       err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
+       if (err)
+               return err;
+
+       if (qlcnic_84xx_check(adapter)) {
+               err = qlcnic_sriov_pf_cfg_flood(adapter);
+               if (err)
+                       goto disable_vlan_filtering;
+       }
+
+       err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+       if (err)
+               goto disable_vlan_filtering;
+
+       err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+       if (err)
+               goto disable_eswitch;
+
+       err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+       if (err)
+               goto delete_vport;
+
+       err = qlcnic_get_nic_info(adapter, &nic_info, func);
+       if (err)
+               goto delete_vport;
+
+       err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+       if (err)
+               goto delete_vport;
+
+       err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+       if (err)
+               goto delete_vport;
+
+       ahw->physical_port = (u8) nic_info.phys_port;
+       ahw->switch_mode = nic_info.switch_mode;
+       ahw->max_mtu = nic_info.max_mtu;
+       ahw->capabilities = nic_info.capabilities;
+       ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+       return err;
+
+delete_vport:
+       qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+       qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+disable_vlan_filtering:
+       qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+
+       return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+       int err;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return 0;
+
+       err = pci_enable_sriov(adapter->pdev, num_vfs);
+       if (err)
+               qlcnic_sriov_pf_cleanup(adapter);
+
+       return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+                                    int num_vfs)
+{
+       int err = 0;
+
+       set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+       adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+       err = qlcnic_sriov_init(adapter, num_vfs);
+       if (err)
+               goto clear_op_mode;
+
+       err = qlcnic_sriov_pf_create_flr_queue(adapter);
+       if (err)
+               goto sriov_cleanup;
+
+       err = qlcnic_sriov_pf_init(adapter);
+       if (err)
+               goto del_flr_queue;
+
+       qlcnic_sriov_alloc_vlans(adapter);
+
+       return err;
+
+del_flr_queue:
+       qlcnic_sriov_pf_del_flr_queue(adapter);
+
+sriov_cleanup:
+       __qlcnic_sriov_cleanup(adapter);
+
+clear_op_mode:
+       clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+       adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+       return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               netdev_err(netdev,
+                          "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+               return -EIO;
+       }
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+       err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+       if (err)
+               goto error;
+
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+
+       rtnl_unlock();
+       err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+       if (!err) {
+               netdev_info(netdev,
+                           "SR-IOV is enabled successfully on port %d\n",
+                           adapter->portnum);
+               /* Return number of vfs enabled */
+               return num_vfs;
+       }
+
+       rtnl_lock();
+       if (netif_running(netdev))
+               __qlcnic_down(adapter, netdev);
+
+error:
+       if (!qlcnic_83xx_configure_opmode(adapter)) {
+               if (netif_running(netdev))
+                       __qlcnic_up(adapter, netdev);
+       }
+
+       rtnl_unlock();
+       netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+                   adapter->portnum);
+
+       return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+       int err;
+
+       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EBUSY;
+
+       if (num_vfs == 0)
+               err = qlcnic_pci_sriov_disable(adapter);
+       else
+               err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       return err;
+}
+
+static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
+{
+       struct qlcnic_cmd_args cmd;
+       struct qlcnic_vport *vp;
+       int err, id;
+       u8 *mac;
+
+       id = qlcnic_sriov_func_to_index(adapter, func);
+       if (id < 0)
+               return id;
+
+       vp = adapter->ahw->sriov->vf_info[id].vp;
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = 0x3 | func << 16;
+       if (vp->spoofchk == true) {
+               mac = vp->mac;
+               cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
+               cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
+                                mac[2] << 24;
+               cmd.req.arg[5] = mac[1] | mac[0] << 8;
+       }
+
+       if (vp->vlan_mode == QLC_PVID_MODE) {
+               cmd.req.arg[2] |= BIT_6;
+               cmd.req.arg[3] |= vp->pvid << 8;
+       }
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
+                       err);
+
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+                                         u16 func)
+{
+       struct qlcnic_info defvp_info;
+       int err;
+
+       err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+       if (err)
+               return -EIO;
+
+       err = qlcnic_sriov_set_vf_acl(adapter, func);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+                                          struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_vport *vp = vf->vp;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_sriov *sriov;
+       u16 func = vf->pci_func;
+       size_t size;
+       int err;
+
+       adapter = vf->adapter;
+       sriov = adapter->ahw->sriov;
+
+       if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+               err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+               if (!err) {
+                       err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+                       if (err)
+                               qlcnic_sriov_pf_config_vport(adapter, 0, func);
+               }
+       } else {
+               if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+                       size = sizeof(*vf->sriov_vlans);
+                       size = size * sriov->num_allowed_vlans;
+                       memset(vf->sriov_vlans, 0, size);
+               }
+
+               err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+       }
+
+       if (err)
+               goto err_out;
+
+       cmd->rsp.arg[0] |= (1 << 25);
+
+       if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+               set_bit(QLC_BC_VF_STATE, &vf->state);
+       else
+               clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+       return err;
+
+err_out:
+       cmd->rsp.arg[0] |= (2 << 25);
+       return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_vf_info *vf,
+                                      u16 vlan, u8 op)
+{
+       struct qlcnic_cmd_args *cmd;
+       struct qlcnic_macvlan_mbx mv;
+       struct qlcnic_vport *vp;
+       u8 *addr;
+       int err;
+       u32 *buf;
+       int vpid;
+
+       vp = vf->vp;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+       if (err)
+               goto free_cmd;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+       if (vpid < 0) {
+               err = -EINVAL;
+               goto free_args;
+       }
+
+       if (vlan)
+               op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+                     QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+       cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+       cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+       addr = vp->mac;
+       mv.vlan = vlan;
+       mv.mac_addr0 = addr[0];
+       mv.mac_addr1 = addr[1];
+       mv.mac_addr2 = addr[2];
+       mv.mac_addr3 = addr[3];
+       mv.mac_addr4 = addr[4];
+       mv.mac_addr5 = addr[5];
+       buf = &cmd->req.arg[2];
+       memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+
+       if (!err)
+               return err;
+
+free_args:
+       qlcnic_free_mbx_args(cmd);
+free_cmd:
+       kfree(cmd);
+       return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[0] >> 29) != 0x3)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+                                            struct qlcnic_vf_info *vf,
+                                            int opcode)
+{
+       struct qlcnic_sriov *sriov;
+       u16 vlan;
+       int i;
+
+       sriov = adapter->ahw->sriov;
+
+       spin_lock_bh(&vf->vlan_list_lock);
+       if (vf->num_vlan) {
+               for (i = 0; i < sriov->num_allowed_vlans; i++) {
+                       vlan = vf->sriov_vlans[i];
+                       if (vlan)
+                               qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+                                                           opcode);
+               }
+       }
+       spin_unlock_bh(&vf->vlan_list_lock);
+
+       if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+               if (qlcnic_83xx_pf_check(adapter) &&
+                   qlcnic_sriov_check_any_vlan(vf))
+                       return;
+               qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+       }
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+                                            struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = tran->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       struct qlcnic_rcv_mbx_out *mbx_out;
+       int err;
+
+       err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       cmd->req.arg[6] = vf->vp->handle;
+       err = qlcnic_issue_cmd(adapter, cmd);
+
+       if (!err) {
+               mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+               vf->rx_ctx_id = mbx_out->ctx_id;
+               qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
+       } else {
+               vf->rx_ctx_id = 0;
+       }
+
+       return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+                                          struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       u8 type, *mac;
+
+       type = cmd->req.arg[1];
+       switch (type) {
+       case QLCNIC_SET_STATION_MAC:
+       case QLCNIC_SET_FAC_DEF_MAC:
+               cmd->rsp.arg[0] = (2 << 25);
+               break;
+       case QLCNIC_GET_CURRENT_MAC:
+               cmd->rsp.arg[0] = (1 << 25);
+               mac = vf->vp->mac;
+               cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+               cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+                                 ((mac[3]) << 16 & 0xff0000) |
+                                 ((mac[2]) << 24 & 0xff000000);
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[0] >> 29) != 0x3)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+                                            struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       struct qlcnic_tx_mbx_out *mbx_out;
+       int err;
+
+       err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       cmd->req.arg[5] |= vf->vp->handle << 16;
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (!err) {
+               mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+               vf->tx_ctx_id = mbx_out->ctx_id;
+       } else {
+               vf->tx_ctx_id = 0;
+       }
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[0] >> 29) != 0x3)
+               return -EINVAL;
+
+       if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+                                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
+       cmd->req.arg[1] |= vf->vp->handle << 16;
+       err = qlcnic_issue_cmd(adapter, cmd);
+
+       if (!err)
+               vf->rx_ctx_id = 0;
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[0] >> 29) != 0x3)
+               return -EINVAL;
+
+       if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+                                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       cmd->req.arg[1] |= vf->vp->handle << 16;
+       err = qlcnic_issue_cmd(adapter, cmd);
+
+       if (!err)
+               vf->tx_ctx_id = 0;
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+                                        struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+                                     struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err = -EIO;
+       u8 op;
+
+       op =  cmd->req.arg[1] & 0xff;
+
+       cmd->req.arg[1] |= vf->vp->handle << 16;
+       cmd->req.arg[1] |= BIT_31;
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+               return -EINVAL;
+
+       if (!(cmd->req.arg[1] & BIT_16))
+               return -EINVAL;
+
+       if ((cmd->req.arg[1] & 0xff) != 0x1)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+                                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+       if (err)
+               cmd->rsp.arg[0] |= (0x6 << 25);
+       else
+               err = qlcnic_issue_cmd(adapter, cmd);
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+                                    struct qlcnic_vf_info *vf,
+                                    struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->req.arg[1] != vf->rx_ctx_id)
+               return -EINVAL;
+
+       if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+       if (err)
+               cmd->rsp.arg[0] |= (0x6 << 25);
+       else
+               err = qlcnic_issue_cmd(adapter, cmd);
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+                                             struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->req.arg[1] & BIT_31) {
+               if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+                       return -EINVAL;
+       } else {
+               cmd->req.arg[1] |= vf->vp->handle << 16;
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+                                        struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->req.arg[1] != vf->rx_ctx_id)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+       if (err)
+               cmd->rsp.arg[0] |= (0x6 << 25);
+       else
+               err = qlcnic_issue_cmd(adapter, cmd);
+
+       return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+                                             struct qlcnic_vf_info *vf,
+                                             struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+       u16 ctx_id, pkts, time;
+       int err = -EINVAL;
+       u8 type;
+
+       type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
+       ctx_id = cmd->req.arg[1] >> 16;
+       pkts = cmd->req.arg[2] & 0xffff;
+       time = cmd->req.arg[2] >> 16;
+
+       switch (type) {
+       case QLCNIC_INTR_COAL_TYPE_RX:
+               if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+                   time < coal->rx_time_us)
+                       goto err_label;
+               break;
+       case QLCNIC_INTR_COAL_TYPE_TX:
+               if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+                   time < coal->tx_time_us)
+                       goto err_label;
+               break;
+       default:
+               netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+                          type);
+               return err;
+       }
+
+       return 0;
+
+err_label:
+       netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+                  vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+                  vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+       netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+                  ctx_id, pkts, time, type);
+
+       return err;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+                                           struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = tran->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+                                            struct qlcnic_vf_info *vf,
+                                            struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vport *vp = vf->vp;
+       u8 op, new_op;
+
+       if (!(cmd->req.arg[1] & BIT_8))
+               return -EINVAL;
+
+       cmd->req.arg[1] |= (vf->vp->handle << 16);
+       cmd->req.arg[1] |= BIT_31;
+
+       if (vp->vlan_mode == QLC_PVID_MODE) {
+               op = cmd->req.arg[1] & 0x7;
+               cmd->req.arg[1] &= ~0x7;
+               new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+                        QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+               cmd->req.arg[3] |= vp->pvid << 16;
+               cmd->req.arg[1] |= new_op;
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+                                          struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+                                          struct qlcnic_cmd_args *cmd)
+{
+       if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+                                        struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       err = qlcnic_sriov_validate_linkevent(vf, cmd);
+       if (err) {
+               cmd->rsp.arg[0] |= (0x6 << 25);
+               return err;
+       }
+
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+                                          struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_adapter *adapter = vf->adapter;
+       int err;
+
+       cmd->req.arg[1] |= vf->vp->handle << 16;
+       cmd->req.arg[1] |= BIT_31;
+       err = qlcnic_issue_cmd(adapter, cmd);
+       return err;
+}
+
+static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info *vf = trans->vf;
+       struct qlcnic_vport *vp = vf->vp;
+       u8 cmd_op, mode = vp->vlan_mode;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_sriov *sriov;
+
+       adapter = vf->adapter;
+       sriov = adapter->ahw->sriov;
+
+       cmd_op = trans->req_hdr->cmd_op;
+       cmd->rsp.arg[0] |= 1 << 25;
+
+       /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+        * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+        */
+       if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+               return 0;
+
+       switch (mode) {
+       case QLC_GUEST_VLAN_MODE:
+               cmd->rsp.arg[1] = mode | 1 << 8;
+               cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
+               break;
+       case QLC_PVID_MODE:
+               cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
+               break;
+       }
+
+       return 0;
+}
+
+static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_vf_info *vf,
+                                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       u16 vlan;
+
+       if (!qlcnic_sriov_check_any_vlan(vf))
+               return -EINVAL;
+
+       vlan = cmd->req.arg[1] >> 16;
+       if (!vf->rx_ctx_id) {
+               qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+               return 0;
+       }
+
+       qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+       qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+       if (qlcnic_83xx_pf_check(adapter))
+               qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+                                           0, QLCNIC_MAC_ADD);
+       return 0;
+}
+
+static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
+                                         struct qlcnic_vf_info *vf,
+                                         struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       int err = -EIO;
+       u16 vlan;
+
+       if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
+               return err;
+
+       vlan = cmd->req.arg[1] >> 16;
+
+       if (!vf->rx_ctx_id) {
+               qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+               return 0;
+       }
+
+       if (qlcnic_83xx_pf_check(adapter)) {
+               err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+                                                 QLCNIC_MAC_DEL);
+               if (err)
+                       return err;
+       }
+
+       err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
+
+       if (err) {
+               if (qlcnic_83xx_pf_check(adapter))
+                       qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+                                                   QLCNIC_MAC_ADD);
+               return err;
+       }
+
+       qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+       return err;
+}
+
+static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
+                                             struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_vf_info  *vf = tran->vf;
+       struct qlcnic_adapter *adapter =  vf->adapter;
+       struct qlcnic_vport *vp = vf->vp;
+       int err = -EIO;
+       u8 op;
+
+       if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
+               cmd->rsp.arg[0] |= 2 << 25;
+               return err;
+       }
+
+       op = cmd->req.arg[1] & 0xf;
+
+       if (op)
+               err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
+       else
+               err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
+
+       cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
+       return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+       QLCNIC_CMD_GET_STATISTICS,
+       QLCNIC_CMD_GET_PORT_CONFIG,
+       QLCNIC_CMD_GET_LINK_STATUS,
+       QLCNIC_CMD_INIT_NIC_FUNC,
+       QLCNIC_CMD_STOP_NIC_FUNC,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+       [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+       [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+       [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
+       [QLCNIC_BC_CMD_CFG_GUEST_VLAN]  = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+       {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+       {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+       {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+       {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+       {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+       {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+       {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+       {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+       {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+       {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+       {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+       {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+       {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+       {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+       {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_bc_trans *trans,
+                                   struct qlcnic_cmd_args *cmd)
+{
+       u8 size, cmd_op;
+
+       cmd_op = trans->req_hdr->cmd_op;
+
+       if (trans->req_hdr->op_type == QLC_BC_CMD) {
+               size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+               if (cmd_op < size) {
+                       qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+                       return;
+               }
+       } else {
+               int i;
+               size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+               for (i = 0; i < size; i++) {
+                       if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+                               qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+                               return;
+                       }
+               }
+
+               size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+               for (i = 0; i < size; i++) {
+                       if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+                               qlcnic_issue_cmd(adapter, cmd);
+                               return;
+                       }
+               }
+       }
+
+       cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+                                            u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+                                          u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+                                             u32 *int_id)
+{
+       int vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+                                          u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+                                       u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+                                      u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+                                       u32 *int_id)
+{
+       u16 vpid;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+                                               adapter->ahw->pci_func);
+       *int_id |= (vpid << 16) | BIT_31;
+}
+
+static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_cmd_args cmd;
+       int vpid;
+
+       if (!vf->rx_ctx_id)
+               return;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+               return;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+       if (vpid >= 0) {
+               cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
+               if (qlcnic_issue_cmd(adapter, &cmd))
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to delete Tx ctx in firmware for func 0x%x\n",
+                               vf->pci_func);
+               else
+                       vf->rx_ctx_id = 0;
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_cmd_args cmd;
+       int vpid;
+
+       if (!vf->tx_ctx_id)
+               return;
+
+       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+               return;
+
+       vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+       if (vpid >= 0) {
+               cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
+               if (qlcnic_issue_cmd(adapter, &cmd))
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to delete Tx ctx in firmware for func 0x%x\n",
+                               vf->pci_func);
+               else
+                       vf->tx_ctx_id = 0;
+       }
+
+       qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
+                                            struct qlcnic_vf_info *vf,
+                                            struct qlcnic_bc_trans *trans)
+{
+       struct qlcnic_trans_list *t_list = &vf->rcv_act;
+       unsigned long flag;
+
+       spin_lock_irqsave(&t_list->lock, flag);
+
+       __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+       spin_unlock_irqrestore(&t_list->lock, flag);
+       return 0;
+}
+
+static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_adapter *adapter = vf->adapter;
+
+       qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+       cancel_work_sync(&vf->trans_work);
+       qlcnic_sriov_cleanup_list(&vf->rcv_act);
+
+       if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+               qlcnic_sriov_del_tx_ctx(adapter, vf);
+               qlcnic_sriov_del_rx_ctx(adapter, vf);
+       }
+
+       qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
+
+       clear_bit(QLC_BC_VF_FLR, &vf->state);
+       if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+               qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
+                                                 vf->flr_trans);
+               clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+               vf->flr_trans = NULL;
+       }
+}
+
+static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
+{
+       struct qlcnic_vf_info *vf;
+
+       vf = container_of(work, struct qlcnic_vf_info, flr_work);
+       __qlcnic_sriov_process_flr(vf);
+       return;
+}
+
+static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
+                                     struct qlcnic_vf_info *vf,
+                                     work_func_t func)
+{
+       if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
+               return;
+
+       INIT_WORK(&vf->flr_work, func);
+       queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
+}
+
+static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_bc_trans *trans,
+                                        struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+       set_bit(QLC_BC_VF_FLR, &vf->state);
+       clear_bit(QLC_BC_VF_STATE, &vf->state);
+       set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+       vf->flr_trans = trans;
+       qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+       netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
+                   vf->pci_func);
+}
+
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+                                struct qlcnic_bc_trans *trans,
+                                struct qlcnic_vf_info *vf)
+{
+       struct qlcnic_bc_hdr *hdr = trans->req_hdr;
+
+       if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+           (hdr->op_type == QLC_BC_CMD) &&
+            test_bit(QLC_BC_VF_STATE, &vf->state)) {
+               qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
+               return true;
+       }
+
+       return false;
+}
+
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+                               struct qlcnic_vf_info *vf)
+{
+       struct net_device *dev = vf->adapter->netdev;
+       struct qlcnic_vport *vp = vf->vp;
+
+       if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
+               clear_bit(QLC_BC_VF_FLR, &vf->state);
+               return;
+       }
+
+       if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
+               netdev_info(dev, "FLR for PCI func %d in progress\n",
+                           vf->pci_func);
+               return;
+       }
+
+       if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+               memset(vf->sriov_vlans, 0,
+                      sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
+
+       qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+       netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
+}
+
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_sriov *sriov = ahw->sriov;
+       struct qlcnic_vf_info *vf;
+       u16 num_vfs = sriov->num_vfs;
+       int i;
+
+       for (i = 0; i < num_vfs; i++) {
+               vf = &sriov->vf_info[i];
+               vf->rx_ctx_id = 0;
+               vf->tx_ctx_id = 0;
+               cancel_work_sync(&vf->flr_work);
+               __qlcnic_sriov_process_flr(vf);
+               clear_bit(QLC_BC_VF_STATE, &vf->state);
+       }
+
+       qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
+       QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
+}
+
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err;
+
+       if (!qlcnic_sriov_enable_check(adapter))
+               return 0;
+
+       ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+       err = qlcnic_sriov_pf_init(adapter);
+       if (err)
+               return err;
+
+       dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
+                __func__, ahw->op_mode);
+       return err;
+}
+
+int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       int i, num_vfs;
+       struct qlcnic_vf_info *vf_info;
+       u8 *curr_mac;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       num_vfs = sriov->num_vfs;
+
+       if (!is_valid_ether_addr(mac) || vf >= num_vfs)
+               return -EINVAL;
+
+       if (ether_addr_equal(adapter->mac_addr, mac)) {
+               netdev_err(netdev, "MAC address is already in use by the PF\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_vfs; i++) {
+               vf_info = &sriov->vf_info[i];
+               if (ether_addr_equal(vf_info->vp->mac, mac)) {
+                       netdev_err(netdev,
+                                  "MAC address is already in use by VF %d\n",
+                                  i);
+                       return -EINVAL;
+               }
+       }
+
+       vf_info = &sriov->vf_info[vf];
+       curr_mac = vf_info->vp->mac;
+
+       if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+               netdev_err(netdev,
+                          "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+                          vf);
+               return -EOPNOTSUPP;
+       }
+
+       memcpy(curr_mac, mac, netdev->addr_len);
+       netdev_info(netdev, "MAC Address %pM  is configured for VF %d\n",
+                   mac, vf);
+       return 0;
+}
+
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+                               int min_tx_rate, int max_tx_rate)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf_info;
+       struct qlcnic_info nic_info;
+       struct qlcnic_vport *vp;
+       u16 vpid;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (vf >= sriov->num_vfs)
+               return -EINVAL;
+
+       vf_info = &sriov->vf_info[vf];
+       vp = vf_info->vp;
+       vpid = vp->handle;
+
+       if (!min_tx_rate)
+               min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+       if (max_tx_rate &&
+           (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+               netdev_err(netdev,
+                          "Invalid max Tx rate, allowed range is [%d - %d]",
+                          min_tx_rate, QLC_VF_MAX_TX_RATE);
+               return -EINVAL;
+       }
+
+       if (!max_tx_rate)
+               max_tx_rate = 10000;
+
+       if (min_tx_rate &&
+           (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+               netdev_err(netdev,
+                          "Invalid min Tx rate, allowed range is [%d - %d]",
+                          QLC_VF_MIN_TX_RATE, max_tx_rate);
+               return -EINVAL;
+       }
+
+       if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+               if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
+                       return -EIO;
+
+               nic_info.max_tx_bw = max_tx_rate / 100;
+               nic_info.min_tx_bw = min_tx_rate / 100;
+               nic_info.bit_offsets = BIT_0;
+
+               if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
+                       return -EIO;
+       }
+
+       vp->max_tx_bw = max_tx_rate / 100;
+       netdev_info(netdev,
+                   "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+                   max_tx_rate, vp->max_tx_bw, vf);
+       vp->min_tx_bw = min_tx_rate / 100;
+       netdev_info(netdev,
+                   "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+                   min_tx_rate, vp->min_tx_bw, vf);
+       return 0;
+}
+
+int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
+                            u16 vlan, u8 qos)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf_info;
+       struct qlcnic_vport *vp;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (vf >= sriov->num_vfs || qos > 7)
+               return -EINVAL;
+
+       if (vlan > MAX_VLAN_ID) {
+               netdev_err(netdev,
+                          "Invalid VLAN ID, allowed range is [0 - %d]\n",
+                          MAX_VLAN_ID);
+               return -EINVAL;
+       }
+
+       vf_info = &sriov->vf_info[vf];
+       vp = vf_info->vp;
+       if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+               netdev_err(netdev,
+                          "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+                          vf);
+               return -EOPNOTSUPP;
+       }
+
+       memset(vf_info->sriov_vlans, 0,
+              sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
+       switch (vlan) {
+       case 4095:
+               vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+               break;
+       case 0:
+               vp->vlan_mode = QLC_NO_VLAN_MODE;
+               vp->qos = 0;
+               break;
+       default:
+               vp->vlan_mode = QLC_PVID_MODE;
+               qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
+               vp->qos = qos;
+               vp->pvid = vlan;
+       }
+
+       netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
+                   vlan, qos, vf);
+       return 0;
+}
+
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_vport *vp, int vf)
+{
+       __u32 vlan = 0;
+
+       switch (vp->vlan_mode) {
+       case QLC_PVID_MODE:
+               vlan = vp->pvid;
+               break;
+       case QLC_GUEST_VLAN_MODE:
+               vlan = MAX_VLAN_ID;
+               break;
+       case QLC_NO_VLAN_MODE:
+               vlan = 0;
+               break;
+       default:
+               netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+                           vp->vlan_mode, vf);
+       }
+
+       return vlan;
+}
+
+int qlcnic_sriov_get_vf_config(struct net_device *netdev,
+                              int vf, struct ifla_vf_info *ivi)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vport *vp;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (vf >= sriov->num_vfs)
+               return -EINVAL;
+
+       vp = sriov->vf_info[vf].vp;
+       memcpy(&ivi->mac, vp->mac, ETH_ALEN);
+       ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
+       ivi->qos = vp->qos;
+       ivi->spoofchk = vp->spoofchk;
+       if (vp->max_tx_bw == MAX_BW)
+               ivi->max_tx_rate = 0;
+       else
+               ivi->max_tx_rate = vp->max_tx_bw * 100;
+       if (vp->min_tx_bw == MIN_BW)
+               ivi->min_tx_rate = 0;
+       else
+               ivi->min_tx_rate = vp->min_tx_bw * 100;
+
+       ivi->vf = vf;
+       return 0;
+}
+
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf_info;
+       struct qlcnic_vport *vp;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (vf >= sriov->num_vfs)
+               return -EINVAL;
+
+       vf_info = &sriov->vf_info[vf];
+       vp = vf_info->vp;
+       if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+               netdev_err(netdev,
+                          "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+                          vf);
+               return -EOPNOTSUPP;
+       }
+
+       vp->spoofchk = chk;
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644 (file)
index 0000000..ccbb045
--- /dev/null
@@ -0,0 +1,1430 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+       return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+       return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned long new;
+       int ret = -EINVAL;
+
+       if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+               goto err_out;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               goto err_out;
+
+       if (kstrtoul(buf, 2, &new))
+               goto err_out;
+
+       if (!qlcnic_config_bridged_mode(adapter, !!new))
+               ret = len;
+
+err_out:
+       return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int bridged_mode = 0;
+
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+       return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned long new;
+
+       if (kstrtoul(buf, 2, &new))
+               return -EINVAL;
+
+       if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+       return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+                                 u8 *state, u8 *rate)
+{
+       *rate = LSB(beacon);
+       *state = MSB(beacon);
+
+       QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+       if (!*state) {
+               *rate = __QLCNIC_MAX_LED_RATE;
+               return 0;
+       } else if (*state > __QLCNIC_MAX_LED_STATE) {
+               return -EINVAL;
+       }
+
+       if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
+                                   const char *buf, size_t len)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       unsigned long h_beacon;
+       int err;
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EIO;
+
+       if (kstrtoul(buf, 2, &h_beacon))
+               return -EINVAL;
+
+       qlcnic_get_beacon_state(adapter);
+
+       if (ahw->beacon_state == h_beacon)
+               return len;
+
+       rtnl_lock();
+       if (!ahw->beacon_state) {
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+                       rtnl_unlock();
+                       return -EBUSY;
+               }
+       }
+
+       if (h_beacon)
+               err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+       else
+               err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+       if (!err)
+               ahw->beacon_state = h_beacon;
+
+       if (!ahw->beacon_state)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+       rtnl_unlock();
+       return len;
+}
+
+static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
+                                   const char *buf, size_t len)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err, drv_sds_rings = adapter->drv_sds_rings;
+       u16 beacon;
+       u8 b_state, b_rate;
+
+       if (len != sizeof(u16))
+               return -EINVAL;
+
+       memcpy(&beacon, buf, sizeof(u16));
+       err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+       if (err)
+               return err;
+
+       qlcnic_get_beacon_state(adapter);
+
+       if (ahw->beacon_state == b_state)
+               return len;
+
+       rtnl_lock();
+       if (!ahw->beacon_state) {
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+                       rtnl_unlock();
+                       return -EBUSY;
+               }
+       }
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               err = -EIO;
+               goto out;
+       }
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+               if (err)
+                       goto out;
+               set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+       }
+
+       err = qlcnic_config_led(adapter, b_state, b_rate);
+       if (!err) {
+               err = len;
+               ahw->beacon_state = b_state;
+       }
+
+       if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+               qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
+
+out:
+       if (!ahw->beacon_state)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+       rtnl_unlock();
+
+       return err;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int err = 0;
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(dev,
+                        "LED test not supported in non privileged mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (qlcnic_82xx_check(adapter))
+               err = qlcnic_82xx_store_beacon(adapter, buf, len);
+       else if (qlcnic_83xx_check(adapter))
+               err = qlcnic_83xx_store_beacon(adapter, buf, len);
+       else
+               return -EIO;
+
+       return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+                                    loff_t offset, size_t size)
+{
+       size_t crb_size = 4;
+
+       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               return -EIO;
+
+       if (offset < QLCNIC_PCI_CRBSPACE) {
+               if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+                                 QLCNIC_PCI_CAMQM_END))
+                       crb_size = 8;
+               else
+                       return -EINVAL;
+       }
+
+       if ((size != crb_size) || (offset & (crb_size-1)))
+               return  -EINVAL;
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+       qlcnic_read_crb(adapter, buf, offset, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       qlcnic_write_crb(adapter, buf, offset, size);
+       return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+                                    loff_t offset, size_t size)
+{
+       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               return -EIO;
+
+       if ((size != 8) || (offset & 0x7))
+               return  -EIO;
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+               return -EIO;
+
+       memcpy(buf, &data, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       memcpy(&data, buf, size);
+
+       if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+               return -EIO;
+
+       return size;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+       int i;
+
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               if (adapter->npars[i].pci_func == pci_func)
+                       return i;
+       }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
+       return -EINVAL;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+                             struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+       u8 src_pci_func, s_esw_id, d_esw_id;
+       u8 dest_pci_func;
+       int i, src_index, dest_index;
+
+       for (i = 0; i < count; i++) {
+               src_pci_func = pm_cfg[i].pci_func;
+               dest_pci_func = pm_cfg[i].dest_npar;
+               src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
+               if (src_index < 0)
+                       return -EINVAL;
+
+               dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+               if (dest_index < 0)
+                       return -EINVAL;
+
+               s_esw_id = adapter->npars[src_index].phy_port;
+               d_esw_id = adapter->npars[dest_index].phy_port;
+
+               if (s_esw_id != d_esw_id)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pm_func_cfg *pm_cfg;
+       u32 id, action, pci_func;
+       int count, rem, i, ret, index;
+
+       count   = size / sizeof(struct qlcnic_pm_func_cfg);
+       rem     = size % sizeof(struct qlcnic_pm_func_cfg);
+       if (rem)
+               return -EINVAL;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+       ret = validate_pm_config(adapter, pm_cfg, count);
+
+       if (ret)
+               return ret;
+       for (i = 0; i < count; i++) {
+               pci_func = pm_cfg[i].pci_func;
+               action = !!pm_cfg[i].action;
+               index = qlcnic_is_valid_nic_func(adapter, pci_func);
+               if (index < 0)
+                       return -EINVAL;
+
+               id = adapter->npars[index].phy_port;
+               ret = qlcnic_config_port_mirroring(adapter, id,
+                                                  action, pci_func);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < count; i++) {
+               pci_func = pm_cfg[i].pci_func;
+               index = qlcnic_is_valid_nic_func(adapter, pci_func);
+               if (index < 0)
+                       return -EINVAL;
+               id = adapter->npars[index].phy_port;
+               adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+               adapter->npars[index].dest_npar = id;
+       }
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+                                          struct kobject *kobj,
+                                          struct bin_attribute *attr,
+                                          char *buf, loff_t offset,
+                                          size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pm_func_cfg *pm_cfg;
+       u8 pci_func;
+       u32 count;
+       int i;
+
+       memset(buf, 0, size);
+       pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+       count = size / sizeof(struct qlcnic_pm_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               pci_func = adapter->npars[i].pci_func;
+               if (pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
+                       continue;
+               }
+               if (!adapter->npars[i].eswitch_status)
+                       continue;
+
+               pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+               pm_cfg[pci_func].dest_npar = 0;
+               pm_cfg[pci_func].pci_func = i;
+       }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+                              struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int i, ret;
+       u32 op_mode;
+       u8 pci_func;
+
+       if (qlcnic_82xx_check(adapter))
+               op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+       else
+               op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+       for (i = 0; i < count; i++) {
+               pci_func = esw_cfg[i].pci_func;
+               if (pci_func >= ahw->max_vnic_func)
+                       return -EINVAL;
+
+               if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+                       if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+                               return -EINVAL;
+
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       if (qlcnic_82xx_check(adapter)) {
+                               ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+                       } else {
+                               ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+                                                                 pci_func);
+                               esw_cfg[i].offload_flags = 0;
+                       }
+
+                       if (ret != QLCNIC_NON_PRIV_FUNC) {
+                               if (esw_cfg[i].mac_anti_spoof != 0)
+                                       return -EINVAL;
+                               if (esw_cfg[i].mac_override != 1)
+                                       return -EINVAL;
+                               if (esw_cfg[i].promisc_mode != 1)
+                                       return -EINVAL;
+                       }
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+                               return -EINVAL;
+                       if (!esw_cfg[i].op_type)
+                               return -EINVAL;
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       if (!esw_cfg[i].op_type)
+                               return -EINVAL;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_func_cfg *esw_cfg;
+       struct qlcnic_npar_info *npar;
+       int count, rem, i, ret;
+       int index;
+       u8 op_mode = 0, pci_func;
+
+       count   = size / sizeof(struct qlcnic_esw_func_cfg);
+       rem     = size % sizeof(struct qlcnic_esw_func_cfg);
+       if (rem)
+               return -EINVAL;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+       ret = validate_esw_config(adapter, esw_cfg, count);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < count; i++) {
+               if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+                       if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+                               return -EINVAL;
+
+               if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+                       continue;
+
+               op_mode = esw_cfg[i].op_mode;
+               qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+               esw_cfg[i].op_mode = op_mode;
+               esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+                       rtnl_lock();
+                       qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+                       rtnl_unlock();
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       esw_cfg[i].vlan_id = 0;
+                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+                       break;
+               }
+       }
+
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               goto out;
+
+       for (i = 0; i < count; i++) {
+               pci_func = esw_cfg[i].pci_func;
+               index = qlcnic_is_valid_nic_func(adapter, pci_func);
+               if (index < 0)
+                       return -EINVAL;
+               npar = &adapter->npars[index];
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       npar->promisc_mode = esw_cfg[i].promisc_mode;
+                       npar->mac_override = esw_cfg[i].mac_override;
+                       npar->offload_flags = esw_cfg[i].offload_flags;
+                       npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+                       npar->discard_tagged = esw_cfg[i].discard_tagged;
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       npar->pvid = esw_cfg[i].vlan_id;
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       npar->pvid = 0;
+                       break;
+               }
+       }
+out:
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_func_cfg *esw_cfg;
+       u8 pci_func;
+       u32 count;
+       int i;
+
+       memset(buf, 0, size);
+       esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+       count = size / sizeof(struct qlcnic_esw_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               pci_func = adapter->npars[i].pci_func;
+               if (pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
+                       continue;
+               }
+               if (!adapter->npars[i].eswitch_status)
+                       continue;
+
+               esw_cfg[pci_func].pci_func = pci_func;
+               if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
+                       return -EINVAL;
+       }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+                               struct qlcnic_npar_func_cfg *np_cfg,
+                               int count)
+{
+       u8 pci_func, i;
+
+       for (i = 0; i < count; i++) {
+               pci_func = np_cfg[i].pci_func;
+               if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+                       return -EINVAL;
+
+               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+                   !IS_VALID_BW(np_cfg[i].max_bw))
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+                                             struct kobject *kobj,
+                                             struct bin_attribute *attr,
+                                             char *buf, loff_t offset,
+                                             size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_info nic_info;
+       struct qlcnic_npar_func_cfg *np_cfg;
+       int i, count, rem, ret, index;
+       u8 pci_func;
+
+       count   = size / sizeof(struct qlcnic_npar_func_cfg);
+       rem     = size % sizeof(struct qlcnic_npar_func_cfg);
+       if (rem)
+               return -EINVAL;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+       ret = validate_npar_config(adapter, np_cfg, count);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < count; i++) {
+               pci_func = np_cfg[i].pci_func;
+
+               memset(&nic_info, 0, sizeof(struct qlcnic_info));
+               ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+               if (ret)
+                       return ret;
+               nic_info.pci_func = pci_func;
+               nic_info.min_tx_bw = np_cfg[i].min_bw;
+               nic_info.max_tx_bw = np_cfg[i].max_bw;
+               ret = qlcnic_set_nic_info(adapter, &nic_info);
+               if (ret)
+                       return ret;
+               index = qlcnic_is_valid_nic_func(adapter, pci_func);
+               if (index < 0)
+                       return -EINVAL;
+               adapter->npars[index].min_bw = nic_info.min_tx_bw;
+               adapter->npars[index].max_bw = nic_info.max_tx_bw;
+       }
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_npar_func_cfg *np_cfg;
+       struct qlcnic_info nic_info;
+       u8 pci_func;
+       int i, ret;
+       u32 count;
+
+       memset(&nic_info, 0, sizeof(struct qlcnic_info));
+       memset(buf, 0, size);
+       np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+
+       count = size / sizeof(struct qlcnic_npar_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+               if (adapter->npars[i].pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
+                       continue;
+               }
+               if (!adapter->npars[i].eswitch_status)
+                       continue;
+               pci_func = adapter->npars[i].pci_func;
+               if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+                       continue;
+               ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+               if (ret)
+                       return ret;
+
+               np_cfg[pci_func].pci_func = pci_func;
+               np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+               np_cfg[pci_func].port_num = nic_info.phys_port;
+               np_cfg[pci_func].fw_capab = nic_info.capabilities;
+               np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+               np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+               np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+               np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
+       }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+                                          struct kobject *kobj,
+                                          struct bin_attribute *attr,
+                                          char *buf, loff_t offset,
+                                          size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_statistics port_stats;
+       int ret;
+
+       if (qlcnic_83xx_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (size != sizeof(struct qlcnic_esw_statistics))
+               return -EINVAL;
+
+       if (offset >= adapter->ahw->max_vnic_func)
+               return -EINVAL;
+
+       memset(&port_stats, 0, size);
+       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+                                   &port_stats.rx);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+                                   &port_stats.tx);
+       if (ret)
+               return ret;
+
+       memcpy(buf, &port_stats, size);
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+                                         struct kobject *kobj,
+                                         struct bin_attribute *attr,
+                                         char *buf, loff_t offset,
+                                         size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_statistics esw_stats;
+       int ret;
+
+       if (qlcnic_83xx_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (size != sizeof(struct qlcnic_esw_statistics))
+               return -EINVAL;
+
+       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+               return -EINVAL;
+
+       memset(&esw_stats, 0, size);
+       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+                                      &esw_stats.rx);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+                                      &esw_stats.tx);
+       if (ret)
+               return ret;
+
+       memcpy(buf, &esw_stats, size);
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       if (qlcnic_83xx_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+               return -EINVAL;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+                                    QLCNIC_QUERY_RX_COUNTER);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+                                    QLCNIC_QUERY_TX_COUNTER);
+       if (ret)
+               return ret;
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       if (qlcnic_83xx_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (offset >= adapter->ahw->max_vnic_func)
+               return -EINVAL;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+                                    QLCNIC_QUERY_RX_COUNTER);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+                                    QLCNIC_QUERY_TX_COUNTER);
+       if (ret)
+               return ret;
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pci_func_cfg *pci_cfg;
+       struct qlcnic_pci_info *pci_info;
+       int i, ret;
+       u32 count;
+
+       pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
+       ret = qlcnic_get_pci_info(adapter, pci_info);
+       if (ret) {
+               kfree(pci_info);
+               return ret;
+       }
+
+       pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+       count = size / sizeof(struct qlcnic_pci_func_cfg);
+       qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
+       for (i = 0; i < count; i++) {
+               pci_cfg[i].pci_func = pci_info[i].id;
+               pci_cfg[i].func_type = pci_info[i].type;
+               pci_cfg[i].func_state = 0;
+               pci_cfg[i].port_num = pci_info[i].default_port;
+               pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+               pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+               memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+       }
+
+       kfree(pci_info);
+       return size;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+                                                   struct kobject *kobj,
+                                                   struct bin_attribute *attr,
+                                                   char *buf, loff_t offset,
+                                                   size_t size)
+{
+       unsigned char *p_read_buf;
+       int  ret, count;
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+       if (!size)
+               return -EINVAL;
+
+       count = size / sizeof(u32);
+
+       if (size % sizeof(u32))
+               count++;
+
+       p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+       if (!p_read_buf)
+               return -ENOMEM;
+       if (qlcnic_83xx_lock_flash(adapter) != 0) {
+               kfree(p_read_buf);
+               return -EIO;
+       }
+
+       ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+                                               count);
+
+       if (ret) {
+               qlcnic_83xx_unlock_flash(adapter);
+               kfree(p_read_buf);
+               return ret;
+       }
+
+       qlcnic_83xx_unlock_flash(adapter);
+       qlcnic_swap32_buffer((u32 *)p_read_buf, count);
+       memcpy(buf, p_read_buf, size);
+       kfree(p_read_buf);
+
+       return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+                                             char *buf, loff_t offset,
+                                             size_t size)
+{
+       int  i, ret, count;
+       unsigned char *p_cache, *p_src;
+
+       p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+       if (!p_cache)
+               return -ENOMEM;
+
+       count = size / sizeof(u32);
+       qlcnic_swap32_buffer((u32 *)buf, count);
+       memcpy(p_cache, buf, size);
+       p_src = p_cache;
+
+       if (qlcnic_83xx_lock_flash(adapter) != 0) {
+               kfree(p_cache);
+               return -EIO;
+       }
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_enable_flash_write(adapter);
+               if (ret) {
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+       }
+
+       for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+               ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+                                                  (u32 *)p_src,
+                                                  QLC_83XX_FLASH_WRITE_MAX);
+
+               if (ret) {
+                       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+                               ret = qlcnic_83xx_disable_flash_write(adapter);
+                               if (ret) {
+                                       kfree(p_cache);
+                                       qlcnic_83xx_unlock_flash(adapter);
+                                       return -EIO;
+                               }
+                       }
+
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+
+               p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+               offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+       }
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_disable_flash_write(adapter);
+               if (ret) {
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+       }
+
+       kfree(p_cache);
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+                                        char *buf, loff_t offset, size_t size)
+{
+       int  i, ret, count;
+       unsigned char *p_cache, *p_src;
+
+       p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+       if (!p_cache)
+               return -ENOMEM;
+
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+       memcpy(p_cache, buf, size);
+       p_src = p_cache;
+       count = size / sizeof(u32);
+
+       if (qlcnic_83xx_lock_flash(adapter) != 0) {
+               kfree(p_cache);
+               return -EIO;
+       }
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_enable_flash_write(adapter);
+               if (ret) {
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+       }
+
+       for (i = 0; i < count; i++) {
+               ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+               if (ret) {
+                       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+                               ret = qlcnic_83xx_disable_flash_write(adapter);
+                               if (ret) {
+                                       kfree(p_cache);
+                                       qlcnic_83xx_unlock_flash(adapter);
+                                       return -EIO;
+                               }
+                       }
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+
+               p_src = p_src + sizeof(u32);
+               offset = offset + sizeof(u32);
+       }
+
+       if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+               ret = qlcnic_83xx_disable_flash_write(adapter);
+               if (ret) {
+                       kfree(p_cache);
+                       qlcnic_83xx_unlock_flash(adapter);
+                       return -EIO;
+               }
+       }
+
+       kfree(p_cache);
+       qlcnic_83xx_unlock_flash(adapter);
+
+       return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+                                                    struct kobject *kobj,
+                                                    struct bin_attribute *attr,
+                                                    char *buf, loff_t offset,
+                                                    size_t size)
+{
+       int  ret;
+       static int flash_mode;
+       unsigned long data;
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+       ret = kstrtoul(buf, 16, &data);
+
+       switch (data) {
+       case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+               flash_mode = QLC_83XX_ERASE_MODE;
+               ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+               if (ret) {
+                       dev_err(&adapter->pdev->dev,
+                               "%s failed at %d\n", __func__, __LINE__);
+                       return -EIO;
+               }
+               break;
+
+       case QLC_83XX_FLASH_BULK_WRITE_CMD:
+               flash_mode = QLC_83XX_BULK_WRITE_MODE;
+               break;
+
+       case QLC_83XX_FLASH_WRITE_CMD:
+               flash_mode = QLC_83XX_WRITE_MODE;
+               break;
+       default:
+               if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+                       ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+                                                                offset, size);
+                       if (ret) {
+                               dev_err(&adapter->pdev->dev,
+                                       "%s failed at %d\n",
+                                       __func__, __LINE__);
+                               return -EIO;
+                       }
+               }
+
+               if (flash_mode == QLC_83XX_WRITE_MODE) {
+                       ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+                                                           offset, size);
+                       if (ret) {
+                               dev_err(&adapter->pdev->dev,
+                                       "%s failed at %d\n", __func__,
+                                       __LINE__);
+                               return -EIO;
+                       }
+               }
+       }
+
+       return size;
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_bridged_mode,
+       .store = qlcnic_store_bridged_mode,
+};
+
+static struct device_attribute dev_attr_diag_mode = {
+       .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_diag_mode,
+       .store = qlcnic_store_diag_mode,
+};
+
+static struct device_attribute dev_attr_beacon = {
+       .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_beacon,
+       .store = qlcnic_store_beacon,
+};
+
+static struct bin_attribute bin_attr_crb = {
+       .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_crb,
+       .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+       .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_mem,
+       .write = qlcnic_sysfs_write_mem,
+};
+
+static struct bin_attribute bin_attr_npar_config = {
+       .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_npar_config,
+       .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+       .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_pci_config,
+       .write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+       .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_get_port_stats,
+       .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+       .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_get_esw_stats,
+       .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+       .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_esw_config,
+       .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+       .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_pm_config,
+       .write = qlcnic_sysfs_write_pm_config,
+};
+
+static struct bin_attribute bin_attr_flash = {
+       .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_83xx_sysfs_flash_read_handler,
+       .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+                                     struct device_attribute *dev_attr,
+                                     char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned int temperature = 0, value = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+       else if (qlcnic_82xx_check(adapter))
+               value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+       temperature = qlcnic_get_temp_val(value);
+       /* display millidegree celcius */
+       temperature *= 1000;
+       return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+                         qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct device *hwmon_dev;
+
+       /* Skip hwmon registration for a VF device */
+       if (qlcnic_sriov_vf_check(adapter)) {
+               adapter->ahw->hwmon_dev = NULL;
+               return;
+       }
+       hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+                                                     adapter,
+                                                     qlcnic_hwmon_groups);
+       if (IS_ERR(hwmon_dev)) {
+               dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+                       PTR_ERR(hwmon_dev));
+               hwmon_dev = NULL;
+       }
+       adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+       if (hwmon_dev) {
+               hwmon_device_unregister(hwmon_dev);
+               adapter->ahw->hwmon_dev = NULL;
+       }
+}
+#endif
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               if (device_create_file(dev, &dev_attr_bridged_mode))
+                       dev_warn(dev,
+                                "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (device_create_bin_file(dev, &bin_attr_port_stats))
+               dev_info(dev, "failed to create port stats sysfs entry");
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+               return;
+       if (device_create_file(dev, &dev_attr_diag_mode))
+               dev_info(dev, "failed to create diag_mode sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_crb))
+               dev_info(dev, "failed to create crb sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_mem))
+               dev_info(dev, "failed to create mem sysfs entry\n");
+
+       if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+               return;
+
+       if (device_create_bin_file(dev, &bin_attr_pci_config))
+               dev_info(dev, "failed to create pci config sysfs entry");
+
+       if (device_create_file(dev, &dev_attr_beacon))
+               dev_info(dev, "failed to create beacon sysfs entry");
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+       if (device_create_bin_file(dev, &bin_attr_esw_config))
+               dev_info(dev, "failed to create esw config sysfs entry");
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return;
+       if (device_create_bin_file(dev, &bin_attr_npar_config))
+               dev_info(dev, "failed to create npar config sysfs entry");
+       if (device_create_bin_file(dev, &bin_attr_pm_config))
+               dev_info(dev, "failed to create pm config sysfs entry");
+       if (device_create_bin_file(dev, &bin_attr_esw_stats))
+               dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       device_remove_bin_file(dev, &bin_attr_port_stats);
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+               return;
+       device_remove_file(dev, &dev_attr_diag_mode);
+       device_remove_bin_file(dev, &bin_attr_crb);
+       device_remove_bin_file(dev, &bin_attr_mem);
+
+       if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+               return;
+
+       device_remove_bin_file(dev, &bin_attr_pci_config);
+       device_remove_file(dev, &dev_attr_beacon);
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+       device_remove_bin_file(dev, &bin_attr_esw_config);
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+               return;
+       device_remove_bin_file(dev, &bin_attr_npar_config);
+       device_remove_bin_file(dev, &bin_attr_pm_config);
+       device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+       qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+       qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       qlcnic_create_diag_entries(adapter);
+
+       if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+               dev_info(dev, "failed to create flash sysfs entry\n");
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       qlcnic_remove_diag_entries(adapter);
+       sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile
new file mode 100644 (file)
index 0000000..8a19765
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
new file mode 100644 (file)
index 0000000..6d31f92
--- /dev/null
@@ -0,0 +1,2338 @@
+/*
+ * QLogic QLA41xx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qlge for copyright and licensing details.
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME       "qlge"
+#define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION    "1.00.00.35"
+
+#define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
+
+#define QLGE_VENDOR_ID    0x1077
+#define QLGE_DEVICE_ID_8012    0x8012
+#define QLGE_DEVICE_ID_8000    0x8000
+#define QLGE_MEZZ_SSYS_ID_068  0x0068
+#define QLGE_MEZZ_SSYS_ID_180  0x0180
+#define MAX_CPUS 8
+#define MAX_TX_RINGS MAX_CPUS
+#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
+
+#define NUM_TX_RING_ENTRIES    256
+#define NUM_RX_RING_ENTRIES    256
+
+#define NUM_SMALL_BUFFERS   512
+#define NUM_LARGE_BUFFERS   512
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+               (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+               (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+
+#define RX_RING_SHADOW_SPACE   (sizeof(u64) + \
+               MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+               MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
+
+#define MAX_CQ 128
+#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10        /* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 100
+
+
+#define TX_DESC_PER_IOCB 8
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#else /* all other page sizes */
+#define TX_DESC_PER_OAL 0
+#endif
+
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x)  ((u16)(x))
+#define MSW(x)  ((u16)((u32)(x) >> 16))
+#define LSD(x)  ((u32)((u64)(x)))
+#define MSD(x)  ((u32)((((u64)(x)) >> 32)))
+
+/* MPI test register definitions. This register
+ * is used for determining alternate NIC function's
+ * PCI->func number.
+ */
+enum {
+       MPI_TEST_FUNC_PORT_CFG = 0x1002,
+       MPI_TEST_FUNC_PRB_CTL = 0x100e,
+               MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+       MPI_TEST_FUNC_RST_STS = 0x100a,
+               MPI_TEST_FUNC_RST_FRC = 0x00000003,
+       MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+       MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+       MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
+       MPI_TEST_NIC1_FUNC_SHIFT = 1,
+       MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+       MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
+       MPI_TEST_NIC2_FUNC_SHIFT = 5,
+       MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+       MPI_TEST_FC1_FUNCTION_MASK      = 0x00000e00,
+       MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+       MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+       MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+       MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+       MPI_NIC_READ = 0x00000000,
+       MPI_NIC_REG_BLOCK = 0x00020000,
+       MPI_NIC_FUNCTION_SHIFT = 6,
+};
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+       /* Misc. stuff */
+       MAILBOX_COUNT = 16,
+       MAILBOX_TIMEOUT = 5,
+
+       PROC_ADDR_RDY = (1 << 31),
+       PROC_ADDR_R = (1 << 30),
+       PROC_ADDR_ERR = (1 << 29),
+       PROC_ADDR_DA = (1 << 28),
+       PROC_ADDR_FUNC0_MBI = 0x00001180,
+       PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+       PROC_ADDR_FUNC0_CTL = 0x000011a1,
+       PROC_ADDR_FUNC2_MBI = 0x00001280,
+       PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+       PROC_ADDR_FUNC2_CTL = 0x000012a1,
+       PROC_ADDR_MPI_RISC = 0x00000000,
+       PROC_ADDR_MDE = 0x00010000,
+       PROC_ADDR_REGBLOCK = 0x00020000,
+       PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+       SYS_EFE = (1 << 0),
+       SYS_FAE = (1 << 1),
+       SYS_MDC = (1 << 2),
+       SYS_DST = (1 << 3),
+       SYS_DWC = (1 << 4),
+       SYS_EVW = (1 << 5),
+       SYS_OMP_DLY_MASK = 0x3f000000,
+       /*
+        * There are no values defined as of edit #15.
+        */
+       SYS_ODI = (1 << 14),
+};
+
+/*
+ *  Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+       RST_FO_TFO = (1 << 0),
+       RST_FO_RR_MASK = 0x00060000,
+       RST_FO_RR_CQ_CAM = 0x00000000,
+       RST_FO_RR_DROP = 0x00000002,
+       RST_FO_RR_DQ = 0x00000004,
+       RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
+       RST_FO_FRB = (1 << 12),
+       RST_FO_MOP = (1 << 13),
+       RST_FO_REG = (1 << 14),
+       RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+       FSC_DBRST_MASK = 0x00070000,
+       FSC_DBRST_256 = 0x00000000,
+       FSC_DBRST_512 = 0x00000001,
+       FSC_DBRST_768 = 0x00000002,
+       FSC_DBRST_1024 = 0x00000003,
+       FSC_DBL_MASK = 0x00180000,
+       FSC_DBL_DBRST = 0x00000000,
+       FSC_DBL_MAX_PLD = 0x00000008,
+       FSC_DBL_MAX_BRST = 0x00000010,
+       FSC_DBL_128_BYTES = 0x00000018,
+       FSC_EC = (1 << 5),
+       FSC_EPC_MASK = 0x00c00000,
+       FSC_EPC_INBOUND = (1 << 6),
+       FSC_EPC_OUTBOUND = (1 << 7),
+       FSC_VM_PAGESIZE_MASK = 0x07000000,
+       FSC_VM_PAGE_2K = 0x00000100,
+       FSC_VM_PAGE_4K = 0x00000200,
+       FSC_VM_PAGE_8K = 0x00000300,
+       FSC_VM_PAGE_64K = 0x00000600,
+       FSC_SH = (1 << 11),
+       FSC_DSB = (1 << 12),
+       FSC_STE = (1 << 13),
+       FSC_FE = (1 << 15),
+};
+
+/*
+ *  Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+       CSR_ERR_STS_MASK = 0x0000003f,
+       /*
+        * There are no valued defined as of edit #15.
+        */
+       CSR_RR = (1 << 8),
+       CSR_HRI = (1 << 9),
+       CSR_RP = (1 << 10),
+       CSR_CMD_PARM_SHIFT = 22,
+       CSR_CMD_NOP = 0x00000000,
+       CSR_CMD_SET_RST = 0x10000000,
+       CSR_CMD_CLR_RST = 0x20000000,
+       CSR_CMD_SET_PAUSE = 0x30000000,
+       CSR_CMD_CLR_PAUSE = 0x40000000,
+       CSR_CMD_SET_H2R_INT = 0x50000000,
+       CSR_CMD_CLR_H2R_INT = 0x60000000,
+       CSR_CMD_PAR_EN = 0x70000000,
+       CSR_CMD_SET_BAD_PAR = 0x80000000,
+       CSR_CMD_CLR_BAD_PAR = 0x90000000,
+       CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ *  Configuration Register (CFG) bit definitions.
+ */
+enum {
+       CFG_LRQ = (1 << 0),
+       CFG_DRQ = (1 << 1),
+       CFG_LR = (1 << 2),
+       CFG_DR = (1 << 3),
+       CFG_LE = (1 << 5),
+       CFG_LCQ = (1 << 6),
+       CFG_DCQ = (1 << 7),
+       CFG_Q_SHIFT = 8,
+       CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ *  Status Register (STS) bit definitions.
+ */
+enum {
+       STS_FE = (1 << 0),
+       STS_PI = (1 << 1),
+       STS_PL0 = (1 << 2),
+       STS_PL1 = (1 << 3),
+       STS_PI0 = (1 << 4),
+       STS_PI1 = (1 << 5),
+       STS_FUNC_ID_MASK = 0x000000c0,
+       STS_FUNC_ID_SHIFT = 6,
+       STS_F0E = (1 << 8),
+       STS_F1E = (1 << 9),
+       STS_F2E = (1 << 10),
+       STS_F3E = (1 << 11),
+       STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+       INTR_EN_INTR_MASK = 0x007f0000,
+       INTR_EN_TYPE_MASK = 0x03000000,
+       INTR_EN_TYPE_ENABLE = 0x00000100,
+       INTR_EN_TYPE_DISABLE = 0x00000200,
+       INTR_EN_TYPE_READ = 0x00000300,
+       INTR_EN_IHD = (1 << 13),
+       INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+       INTR_EN_EI = (1 << 14),
+       INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+       INTR_MASK_PI = (1 << 0),
+       INTR_MASK_HL0 = (1 << 1),
+       INTR_MASK_LH0 = (1 << 2),
+       INTR_MASK_HL1 = (1 << 3),
+       INTR_MASK_LH1 = (1 << 4),
+       INTR_MASK_SE = (1 << 5),
+       INTR_MASK_LSC = (1 << 6),
+       INTR_MASK_MC = (1 << 7),
+       INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ *  Register (REV_ID) bit definitions.
+ */
+enum {
+       REV_ID_MASK = 0x0000000f,
+       REV_ID_NICROLL_SHIFT = 0,
+       REV_ID_NICREV_SHIFT = 4,
+       REV_ID_XGROLL_SHIFT = 8,
+       REV_ID_XGREV_SHIFT = 12,
+       REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ *  Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+       FRC_ECC_ERR_VW = (1 << 12),
+       FRC_ECC_ERR_VB = (1 << 13),
+       FRC_ECC_ERR_NI = (1 << 14),
+       FRC_ECC_ERR_NO = (1 << 15),
+       FRC_ECC_PFE_SHIFT = 16,
+       FRC_ECC_ERR_DO = (1 << 18),
+       FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ *  Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+       ERR_STS_NOF = (1 << 0),
+       ERR_STS_NIF = (1 << 1),
+       ERR_STS_DRP = (1 << 2),
+       ERR_STS_XGP = (1 << 3),
+       ERR_STS_FOU = (1 << 4),
+       ERR_STS_FOC = (1 << 5),
+       ERR_STS_FOF = (1 << 6),
+       ERR_STS_FIU = (1 << 7),
+       ERR_STS_FIC = (1 << 8),
+       ERR_STS_FIF = (1 << 9),
+       ERR_STS_MOF = (1 << 10),
+       ERR_STS_TA = (1 << 11),
+       ERR_STS_MA = (1 << 12),
+       ERR_STS_MPE = (1 << 13),
+       ERR_STS_SCE = (1 << 14),
+       ERR_STS_STE = (1 << 15),
+       ERR_STS_FOW = (1 << 16),
+       ERR_STS_UE = (1 << 17),
+       ERR_STS_MCH = (1 << 26),
+       ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ *  RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+       RAM_DBG_ADDR_FW = (1 << 30),
+       RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+       /*
+        * Example:
+        * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+        */
+       SEM_CLEAR = 0,
+       SEM_SET = 1,
+       SEM_FORCE = 3,
+       SEM_XGMAC0_SHIFT = 0,
+       SEM_XGMAC1_SHIFT = 2,
+       SEM_ICB_SHIFT = 4,
+       SEM_MAC_ADDR_SHIFT = 6,
+       SEM_FLASH_SHIFT = 8,
+       SEM_PROBE_SHIFT = 10,
+       SEM_RT_IDX_SHIFT = 12,
+       SEM_PROC_REG_SHIFT = 14,
+       SEM_XGMAC0_MASK = 0x00030000,
+       SEM_XGMAC1_MASK = 0x000c0000,
+       SEM_ICB_MASK = 0x00300000,
+       SEM_MAC_ADDR_MASK = 0x00c00000,
+       SEM_FLASH_MASK = 0x03000000,
+       SEM_PROBE_MASK = 0x0c000000,
+       SEM_RT_IDX_MASK = 0x30000000,
+       SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ *  10G MAC Address  Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+       XGMAC_ADDR_RDY = (1 << 31),
+       XGMAC_ADDR_R = (1 << 30),
+       XGMAC_ADDR_XME = (1 << 29),
+
+       /* XGMAC control registers */
+       PAUSE_SRC_LO = 0x00000100,
+       PAUSE_SRC_HI = 0x00000104,
+       GLOBAL_CFG = 0x00000108,
+       GLOBAL_CFG_RESET = (1 << 0),
+       GLOBAL_CFG_JUMBO = (1 << 6),
+       GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+       GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+       TX_CFG = 0x0000010c,
+       TX_CFG_RESET = (1 << 0),
+       TX_CFG_EN = (1 << 1),
+       TX_CFG_PREAM = (1 << 2),
+       RX_CFG = 0x00000110,
+       RX_CFG_RESET = (1 << 0),
+       RX_CFG_EN = (1 << 1),
+       RX_CFG_PREAM = (1 << 2),
+       FLOW_CTL = 0x0000011c,
+       PAUSE_OPCODE = 0x00000120,
+       PAUSE_TIMER = 0x00000124,
+       PAUSE_FRM_DEST_LO = 0x00000128,
+       PAUSE_FRM_DEST_HI = 0x0000012c,
+       MAC_TX_PARAMS = 0x00000134,
+       MAC_TX_PARAMS_JUMBO = (1 << 31),
+       MAC_TX_PARAMS_SIZE_SHIFT = 16,
+       MAC_RX_PARAMS = 0x00000138,
+       MAC_SYS_INT = 0x00000144,
+       MAC_SYS_INT_MASK = 0x00000148,
+       MAC_MGMT_INT = 0x0000014c,
+       MAC_MGMT_IN_MASK = 0x00000150,
+       EXT_ARB_MODE = 0x000001fc,
+
+       /* XGMAC TX statistics  registers */
+       TX_PKTS = 0x00000200,
+       TX_BYTES = 0x00000208,
+       TX_MCAST_PKTS = 0x00000210,
+       TX_BCAST_PKTS = 0x00000218,
+       TX_UCAST_PKTS = 0x00000220,
+       TX_CTL_PKTS = 0x00000228,
+       TX_PAUSE_PKTS = 0x00000230,
+       TX_64_PKT = 0x00000238,
+       TX_65_TO_127_PKT = 0x00000240,
+       TX_128_TO_255_PKT = 0x00000248,
+       TX_256_511_PKT = 0x00000250,
+       TX_512_TO_1023_PKT = 0x00000258,
+       TX_1024_TO_1518_PKT = 0x00000260,
+       TX_1519_TO_MAX_PKT = 0x00000268,
+       TX_UNDERSIZE_PKT = 0x00000270,
+       TX_OVERSIZE_PKT = 0x00000278,
+
+       /* XGMAC statistics control registers */
+       RX_HALF_FULL_DET = 0x000002a0,
+       TX_HALF_FULL_DET = 0x000002a4,
+       RX_OVERFLOW_DET = 0x000002a8,
+       TX_OVERFLOW_DET = 0x000002ac,
+       RX_HALF_FULL_MASK = 0x000002b0,
+       TX_HALF_FULL_MASK = 0x000002b4,
+       RX_OVERFLOW_MASK = 0x000002b8,
+       TX_OVERFLOW_MASK = 0x000002bc,
+       STAT_CNT_CTL = 0x000002c0,
+       STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+       STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+       AUX_RX_HALF_FULL_DET = 0x000002d0,
+       AUX_TX_HALF_FULL_DET = 0x000002d4,
+       AUX_RX_OVERFLOW_DET = 0x000002d8,
+       AUX_TX_OVERFLOW_DET = 0x000002dc,
+       AUX_RX_HALF_FULL_MASK = 0x000002f0,
+       AUX_TX_HALF_FULL_MASK = 0x000002f4,
+       AUX_RX_OVERFLOW_MASK = 0x000002f8,
+       AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+       /* XGMAC RX statistics  registers */
+       RX_BYTES = 0x00000300,
+       RX_BYTES_OK = 0x00000308,
+       RX_PKTS = 0x00000310,
+       RX_PKTS_OK = 0x00000318,
+       RX_BCAST_PKTS = 0x00000320,
+       RX_MCAST_PKTS = 0x00000328,
+       RX_UCAST_PKTS = 0x00000330,
+       RX_UNDERSIZE_PKTS = 0x00000338,
+       RX_OVERSIZE_PKTS = 0x00000340,
+       RX_JABBER_PKTS = 0x00000348,
+       RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+       RX_DROP_EVENTS = 0x00000358,
+       RX_FCERR_PKTS = 0x00000360,
+       RX_ALIGN_ERR = 0x00000368,
+       RX_SYMBOL_ERR = 0x00000370,
+       RX_MAC_ERR = 0x00000378,
+       RX_CTL_PKTS = 0x00000380,
+       RX_PAUSE_PKTS = 0x00000388,
+       RX_64_PKTS = 0x00000390,
+       RX_65_TO_127_PKTS = 0x00000398,
+       RX_128_255_PKTS = 0x000003a0,
+       RX_256_511_PKTS = 0x000003a8,
+       RX_512_TO_1023_PKTS = 0x000003b0,
+       RX_1024_TO_1518_PKTS = 0x000003b8,
+       RX_1519_TO_MAX_PKTS = 0x000003c0,
+       RX_LEN_ERR_PKTS = 0x000003c8,
+
+       /* XGMAC MDIO control registers */
+       MDIO_TX_DATA = 0x00000400,
+       MDIO_RX_DATA = 0x00000410,
+       MDIO_CMD = 0x00000420,
+       MDIO_PHY_ADDR = 0x00000430,
+       MDIO_PORT = 0x00000440,
+       MDIO_STATUS = 0x00000450,
+
+       XGMAC_REGISTER_END = 0x00000740,
+};
+
+/*
+ *  Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+       ETS_QUEUE_SHIFT = 29,
+       ETS_REF = (1 << 26),
+       ETS_RS = (1 << 27),
+       ETS_P = (1 << 28),
+       ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ *  Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+       FLASH_ADDR_RDY = (1 << 31),
+       FLASH_ADDR_R = (1 << 30),
+       FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ *  Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+       CQ_STOP_QUEUE_MASK = (0x007f0000),
+       CQ_STOP_TYPE_MASK = (0x03000000),
+       CQ_STOP_TYPE_START = 0x00000100,
+       CQ_STOP_TYPE_STOP = 0x00000200,
+       CQ_STOP_TYPE_READ = 0x00000300,
+       CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ *  MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+       MAC_ADDR_IDX_SHIFT = 4,
+       MAC_ADDR_TYPE_SHIFT = 16,
+       MAC_ADDR_TYPE_COUNT = 10,
+       MAC_ADDR_TYPE_MASK = 0x000f0000,
+       MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+       MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+       MAC_ADDR_TYPE_VLAN = 0x00020000,
+       MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+       MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+       MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+       MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+       MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+       MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+       MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+       MAC_ADDR_ADR = (1 << 25),
+       MAC_ADDR_RS = (1 << 26),
+       MAC_ADDR_E = (1 << 27),
+       MAC_ADDR_MR = (1 << 30),
+       MAC_ADDR_MW = (1 << 31),
+       MAX_MULTICAST_ENTRIES = 32,
+
+       /* Entry count and words per entry
+        * for each address type in the filter.
+        */
+       MAC_ADDR_MAX_CAM_ENTRIES = 512,
+       MAC_ADDR_MAX_CAM_WCOUNT = 3,
+       MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+       MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+       MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+       MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+       MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+       MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+       MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+       MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+       MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+       MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+       MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+       MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+       MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+       MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+       MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+       MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+       MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+       MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
+};
+
+/*
+ *  MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+       SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ *  FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+       FC_RCV_CFG_ECT = (1 << 15),
+       FC_RCV_CFG_DFH = (1 << 20),
+       FC_RCV_CFG_DVF = (1 << 21),
+       FC_RCV_CFG_RCE = (1 << 27),
+       FC_RCV_CFG_RFE = (1 << 28),
+       FC_RCV_CFG_TEE = (1 << 29),
+       FC_RCV_CFG_TCE = (1 << 30),
+       FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ *  NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+       NIC_RCV_CFG_PPE = (1 << 0),
+       NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+       NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+       NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+       NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+       NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+       NIC_RCV_CFG_RV = (1 << 3),
+       NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+       NIC_RCV_CFG_DFQ_SHIFT = 8,
+       NIC_RCV_CFG_DFQ = 0,    /* HARDCODE default queue to 0. */
+};
+
+/*
+ *   Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+       MGMT_RCV_CFG_ARP = (1 << 0),
+       MGMT_RCV_CFG_DHC = (1 << 1),
+       MGMT_RCV_CFG_DHS = (1 << 2),
+       MGMT_RCV_CFG_NP = (1 << 3),
+       MGMT_RCV_CFG_I6N = (1 << 4),
+       MGMT_RCV_CFG_I6R = (1 << 5),
+       MGMT_RCV_CFG_DH6 = (1 << 6),
+       MGMT_RCV_CFG_UD1 = (1 << 7),
+       MGMT_RCV_CFG_UD0 = (1 << 8),
+       MGMT_RCV_CFG_BCT = (1 << 9),
+       MGMT_RCV_CFG_MCT = (1 << 10),
+       MGMT_RCV_CFG_DM = (1 << 11),
+       MGMT_RCV_CFG_RM = (1 << 12),
+       MGMT_RCV_CFG_STL = (1 << 13),
+       MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+       MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+       MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+       MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+       MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ *  Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+       RT_IDX_IDX_SHIFT = 8,
+       RT_IDX_TYPE_MASK = 0x000f0000,
+       RT_IDX_TYPE_SHIFT = 16,
+       RT_IDX_TYPE_RT = 0x00000000,
+       RT_IDX_TYPE_RT_INV = 0x00010000,
+       RT_IDX_TYPE_NICQ = 0x00020000,
+       RT_IDX_TYPE_NICQ_INV = 0x00030000,
+       RT_IDX_DST_MASK = 0x00700000,
+       RT_IDX_DST_RSS = 0x00000000,
+       RT_IDX_DST_CAM_Q = 0x00100000,
+       RT_IDX_DST_COS_Q = 0x00200000,
+       RT_IDX_DST_DFLT_Q = 0x00300000,
+       RT_IDX_DST_DEST_Q = 0x00400000,
+       RT_IDX_RS = (1 << 26),
+       RT_IDX_E = (1 << 27),
+       RT_IDX_MR = (1 << 30),
+       RT_IDX_MW = (1 << 31),
+
+       /* Nic Queue format - type 2 bits */
+       RT_IDX_BCAST = (1 << 0),
+       RT_IDX_MCAST = (1 << 1),
+       RT_IDX_MCAST_MATCH = (1 << 2),
+       RT_IDX_MCAST_REG_MATCH = (1 << 3),
+       RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+       RT_IDX_FC_MACH = (1 << 5),
+       RT_IDX_ETH_FCOE = (1 << 6),
+       RT_IDX_CAM_HIT = (1 << 7),
+       RT_IDX_CAM_BIT0 = (1 << 8),
+       RT_IDX_CAM_BIT1 = (1 << 9),
+       RT_IDX_VLAN_TAG = (1 << 10),
+       RT_IDX_VLAN_MATCH = (1 << 11),
+       RT_IDX_VLAN_FILTER = (1 << 12),
+       RT_IDX_ETH_SKIP1 = (1 << 13),
+       RT_IDX_ETH_SKIP2 = (1 << 14),
+       RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+       RT_IDX_802_3 = (1 << 16),
+       RT_IDX_LLDP = (1 << 17),
+       RT_IDX_UNUSED018 = (1 << 18),
+       RT_IDX_UNUSED019 = (1 << 19),
+       RT_IDX_UNUSED20 = (1 << 20),
+       RT_IDX_UNUSED21 = (1 << 21),
+       RT_IDX_ERR = (1 << 22),
+       RT_IDX_VALID = (1 << 23),
+       RT_IDX_TU_CSUM_ERR = (1 << 24),
+       RT_IDX_IP_CSUM_ERR = (1 << 25),
+       RT_IDX_MAC_ERR = (1 << 26),
+       RT_IDX_RSS_TCP6 = (1 << 27),
+       RT_IDX_RSS_TCP4 = (1 << 28),
+       RT_IDX_RSS_IPV6 = (1 << 29),
+       RT_IDX_RSS_IPV4 = (1 << 30),
+       RT_IDX_RSS_MATCH = (1 << 31),
+
+       /* Hierarchy for the NIC Queue Mask */
+       RT_IDX_ALL_ERR_SLOT = 0,
+       RT_IDX_MAC_ERR_SLOT = 0,
+       RT_IDX_IP_CSUM_ERR_SLOT = 1,
+       RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+       RT_IDX_BCAST_SLOT = 3,
+       RT_IDX_MCAST_MATCH_SLOT = 4,
+       RT_IDX_ALLMULTI_SLOT = 5,
+       RT_IDX_UNUSED6_SLOT = 6,
+       RT_IDX_UNUSED7_SLOT = 7,
+       RT_IDX_RSS_MATCH_SLOT = 8,
+       RT_IDX_RSS_IPV4_SLOT = 8,
+       RT_IDX_RSS_IPV6_SLOT = 9,
+       RT_IDX_RSS_TCP4_SLOT = 10,
+       RT_IDX_RSS_TCP6_SLOT = 11,
+       RT_IDX_CAM_HIT_SLOT = 12,
+       RT_IDX_UNUSED013 = 13,
+       RT_IDX_UNUSED014 = 14,
+       RT_IDX_PROMISCUOUS_SLOT = 15,
+       RT_IDX_MAX_RT_SLOTS = 8,
+       RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+       XG_SERDES_ADDR_RDY = (1 << 31),
+       XG_SERDES_ADDR_R = (1 << 30),
+
+       XG_SERDES_ADDR_STS = 0x00001E06,
+       XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+       XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+       XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+       /* Serdes coredump definitions. */
+       XG_SERDES_XAUI_AN_START = 0x00000000,
+       XG_SERDES_XAUI_AN_END = 0x00000034,
+       XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+       XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+       XG_SERDES_XFI_AN_START = 0x00001000,
+       XG_SERDES_XFI_AN_END = 0x00001034,
+       XG_SERDES_XFI_TRAIN_START = 0x10001050,
+       XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+       XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+       XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+       XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+       XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+       XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+       XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+       XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+       XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ *  NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+       PRB_MX_ADDR_ARE = (1 << 16),
+       PRB_MX_ADDR_UP = (1 << 15),
+       PRB_MX_ADDR_SWP = (1 << 14),
+
+       /* Module select values. */
+       PRB_MX_ADDR_MAX_MODS = 21,
+       PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+       PRB_MX_ADDR_MOD_SEL_TBD = 0,
+       PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+       PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+       PRB_MX_ADDR_MOD_SEL_FRB = 3,
+       PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+       PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+       PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+       PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+       PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+       PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+       PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+       PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+       PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+       PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+       PRB_MX_ADDR_MOD_SEL_REG = 14,
+       PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+       PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+       PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+       PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+       PRB_MX_ADDR_MOD_SEL_MOP = 20,
+       /* Bit fields indicating which modules
+        * are valid for each clock domain.
+        */
+       PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+       PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+       PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+       PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+       PRB_MX_ADDR_VALID_TOTAL = 34,
+
+       /* Clock domain values. */
+       PRB_MX_ADDR_CLOCK_SHIFT = 6,
+       PRB_MX_ADDR_SYS_CLOCK = 0,
+       PRB_MX_ADDR_PCI_CLOCK = 2,
+       PRB_MX_ADDR_FC_CLOCK = 5,
+       PRB_MX_ADDR_XGM_CLOCK = 6,
+
+       PRB_MX_ADDR_MAX_MUX = 64,
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+       PROC_ADDR = 0,          /* Use semaphore */
+       PROC_DATA = 0x04,       /* Use semaphore */
+       SYS = 0x08,
+       RST_FO = 0x0c,
+       FSC = 0x10,
+       CSR = 0x14,
+       LED = 0x18,
+       ICB_RID = 0x1c,         /* Use semaphore */
+       ICB_L = 0x20,           /* Use semaphore */
+       ICB_H = 0x24,           /* Use semaphore */
+       CFG = 0x28,
+       BIOS_ADDR = 0x2c,
+       STS = 0x30,
+       INTR_EN = 0x34,
+       INTR_MASK = 0x38,
+       ISR1 = 0x3c,
+       ISR2 = 0x40,
+       ISR3 = 0x44,
+       ISR4 = 0x48,
+       REV_ID = 0x4c,
+       FRC_ECC_ERR = 0x50,
+       ERR_STS = 0x54,
+       RAM_DBG_ADDR = 0x58,
+       RAM_DBG_DATA = 0x5c,
+       ECC_ERR_CNT = 0x60,
+       SEM = 0x64,
+       GPIO_1 = 0x68,          /* Use semaphore */
+       GPIO_2 = 0x6c,          /* Use semaphore */
+       GPIO_3 = 0x70,          /* Use semaphore */
+       RSVD2 = 0x74,
+       XGMAC_ADDR = 0x78,      /* Use semaphore */
+       XGMAC_DATA = 0x7c,      /* Use semaphore */
+       NIC_ETS = 0x80,
+       CNA_ETS = 0x84,
+       FLASH_ADDR = 0x88,      /* Use semaphore */
+       FLASH_DATA = 0x8c,      /* Use semaphore */
+       CQ_STOP = 0x90,
+       PAGE_TBL_RID = 0x94,
+       WQ_PAGE_TBL_LO = 0x98,
+       WQ_PAGE_TBL_HI = 0x9c,
+       CQ_PAGE_TBL_LO = 0xa0,
+       CQ_PAGE_TBL_HI = 0xa4,
+       MAC_ADDR_IDX = 0xa8,    /* Use semaphore */
+       MAC_ADDR_DATA = 0xac,   /* Use semaphore */
+       COS_DFLT_CQ1 = 0xb0,
+       COS_DFLT_CQ2 = 0xb4,
+       ETYPE_SKIP1 = 0xb8,
+       ETYPE_SKIP2 = 0xbc,
+       SPLT_HDR = 0xc0,
+       FC_PAUSE_THRES = 0xc4,
+       NIC_PAUSE_THRES = 0xc8,
+       FC_ETHERTYPE = 0xcc,
+       FC_RCV_CFG = 0xd0,
+       NIC_RCV_CFG = 0xd4,
+       FC_COS_TAGS = 0xd8,
+       NIC_COS_TAGS = 0xdc,
+       MGMT_RCV_CFG = 0xe0,
+       RT_IDX = 0xe4,
+       RT_DATA = 0xe8,
+       RSVD7 = 0xec,
+       XG_SERDES_ADDR = 0xf0,
+       XG_SERDES_DATA = 0xf4,
+       PRB_MX_ADDR = 0xf8,     /* Use semaphore */
+       PRB_MX_DATA = 0xfc,     /* Use semaphore */
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING  FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING  FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+       min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
+/*
+ * CAM output format.
+ */
+enum {
+       CAM_OUT_ROUTE_FC = 0,
+       CAM_OUT_ROUTE_NIC = 1,
+       CAM_OUT_FUNC_SHIFT = 2,
+       CAM_OUT_RV = (1 << 4),
+       CAM_OUT_SH = (1 << 15),
+       CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox  definitions
+ */
+enum {
+       /* Asynchronous Event Notifications */
+       AEN_SYS_ERR = 0x00008002,
+       AEN_LINK_UP = 0x00008011,
+       AEN_LINK_DOWN = 0x00008012,
+       AEN_IDC_CMPLT = 0x00008100,
+       AEN_IDC_REQ = 0x00008101,
+       AEN_IDC_EXT = 0x00008102,
+       AEN_DCBX_CHG = 0x00008110,
+       AEN_AEN_LOST = 0x00008120,
+       AEN_AEN_SFP_IN = 0x00008130,
+       AEN_AEN_SFP_OUT = 0x00008131,
+       AEN_FW_INIT_DONE = 0x00008400,
+       AEN_FW_INIT_FAIL = 0x00008401,
+
+       /* Mailbox Command Opcodes. */
+       MB_CMD_NOP = 0x00000000,
+       MB_CMD_EX_FW = 0x00000002,
+       MB_CMD_MB_TEST = 0x00000006,
+       MB_CMD_CSUM_TEST = 0x00000007,  /* Verify Checksum */
+       MB_CMD_ABOUT_FW = 0x00000008,
+       MB_CMD_COPY_RISC_RAM = 0x0000000a,
+       MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+       MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+       MB_CMD_WRITE_RAM = 0x0000000d,
+       MB_CMD_INIT_RISC_RAM = 0x0000000e,
+       MB_CMD_READ_RAM = 0x0000000f,
+       MB_CMD_STOP_FW = 0x00000014,
+       MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+       MB_CMD_WRITE_SFP = 0x00000030,
+       MB_CMD_READ_SFP = 0x00000031,
+       MB_CMD_INIT_FW = 0x00000060,
+       MB_CMD_GET_IFCB = 0x00000061,
+       MB_CMD_GET_FW_STATE = 0x00000069,
+       MB_CMD_IDC_REQ = 0x00000100,    /* Inter-Driver Communication */
+       MB_CMD_IDC_ACK = 0x00000101,    /* Inter-Driver Communication */
+       MB_CMD_SET_WOL_MODE = 0x00000110,       /* Wake On Lan */
+       MB_WOL_DISABLE = 0,
+       MB_WOL_MAGIC_PKT = (1 << 1),
+       MB_WOL_FLTR = (1 << 2),
+       MB_WOL_UCAST = (1 << 3),
+       MB_WOL_MCAST = (1 << 4),
+       MB_WOL_BCAST = (1 << 5),
+       MB_WOL_LINK_UP = (1 << 6),
+       MB_WOL_LINK_DOWN = (1 << 7),
+       MB_WOL_MODE_ON = (1 << 16),             /* Wake on Lan Mode on */
+       MB_CMD_SET_WOL_FLTR = 0x00000111,       /* Wake On Lan Filter */
+       MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
+       MB_CMD_SET_WOL_MAGIC = 0x00000113,      /* Wake On Lan Magic Packet */
+       MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
+       MB_CMD_SET_WOL_IMMED = 0x00000115,
+       MB_CMD_PORT_RESET = 0x00000120,
+       MB_CMD_SET_PORT_CFG = 0x00000122,
+       MB_CMD_GET_PORT_CFG = 0x00000123,
+       MB_CMD_GET_LINK_STS = 0x00000124,
+       MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+               QL_LED_BLINK = 0x03e803e8,
+       MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
+       MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
+       MB_SET_MPI_TFK_STOP = (1 << 0),
+       MB_SET_MPI_TFK_RESUME = (1 << 1),
+       MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
+       MB_GET_MPI_TFK_STOPPED = (1 << 0),
+       MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
+       /* Sub-commands for IDC request.
+        * This describes the reason for the
+        * IDC request.
+        */
+       MB_CMD_IOP_NONE = 0x0000,
+       MB_CMD_IOP_PREP_UPDATE_MPI      = 0x0001,
+       MB_CMD_IOP_COMP_UPDATE_MPI      = 0x0002,
+       MB_CMD_IOP_PREP_LINK_DOWN       = 0x0010,
+       MB_CMD_IOP_DVR_START     = 0x0100,
+       MB_CMD_IOP_FLASH_ACC     = 0x0101,
+       MB_CMD_IOP_RESTART_MPI  = 0x0102,
+       MB_CMD_IOP_CORE_DUMP_MPI        = 0x0103,
+
+       /* Mailbox Command Status. */
+       MB_CMD_STS_GOOD = 0x00004000,   /* Success. */
+       MB_CMD_STS_INTRMDT = 0x00001000,        /* Intermediate Complete. */
+       MB_CMD_STS_INVLD_CMD = 0x00004001,      /* Invalid. */
+       MB_CMD_STS_XFC_ERR = 0x00004002,        /* Interface Error. */
+       MB_CMD_STS_CSUM_ERR = 0x00004003,       /* Csum Error. */
+       MB_CMD_STS_ERR = 0x00004005,    /* System Error. */
+       MB_CMD_STS_PARAM_ERR = 0x00004006,      /* Parameter Error. */
+};
+
+struct mbox_params {
+       u32 mbox_in[MAILBOX_COUNT];
+       u32 mbox_out[MAILBOX_COUNT];
+       int in_count;
+       int out_count;
+};
+
+struct flash_params_8012 {
+       u8 dev_id_str[4];
+       __le16 size;
+       __le16 csum;
+       __le16 ver;
+       __le16 sub_dev_id;
+       u8 mac_addr[6];
+       __le16 res;
+};
+
+/* 8000 device's flash is a different structure
+ * at a different offset in flash.
+ */
+#define FUNC0_FLASH_OFFSET 0x140200
+#define FUNC1_FLASH_OFFSET 0x140600
+
+/* Flash related data structures. */
+struct flash_params_8000 {
+       u8 dev_id_str[4];       /* "8000" */
+       __le16 ver;
+       __le16 size;
+       __le16 csum;
+       __le16 reserved0;
+       __le16 total_size;
+       __le16 entry_count;
+       u8 data_type0;
+       u8 data_size0;
+       u8 mac_addr[6];
+       u8 data_type1;
+       u8 data_size1;
+       u8 mac_addr1[6];
+       u8 data_type2;
+       u8 data_size2;
+       __le16 vlan_id;
+       u8 data_type3;
+       u8 data_size3;
+       __le16 last;
+       u8 reserved1[464];
+       __le16  subsys_ven_id;
+       __le16  subsys_dev_id;
+       u8 reserved2[4];
+};
+
+union flash_params {
+       struct flash_params_8012 flash_params_8012;
+       struct flash_params_8000 flash_params_8000;
+};
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+       u32 cnsmr_idx;          /* 0x00 */
+       u32 valid;              /* 0x04 */
+       u32 reserved[4];        /* 0x08-0x14 */
+       u32 lbq_prod_idx;       /* 0x18 */
+       u32 sbq_prod_idx;       /* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+       u32 prod_idx;           /* 0x00 */
+       u32 valid;              /* 0x04 */
+       u32 reserved[4];        /* 0x08-0x14 */
+       u32 lbq_prod_idx;       /* 0x18 */
+       u32 sbq_prod_idx;       /* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+struct tx_buf_desc {
+       __le64 addr;
+       __le32 len;
+#define TX_DESC_LEN_MASK       0x000fffff
+#define TX_DESC_C      0x40000000
+#define TX_DESC_E      0x80000000
+} __packed;
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB                     0x01
+#define OPCODE_OB_MAC_TSO_IOCB         0x02
+#define OPCODE_IB_MAC_IOCB                     0x20
+#define OPCODE_IB_MPI_IOCB                     0x21
+#define OPCODE_IB_AE_IOCB                      0x3f
+
+struct ob_mac_iocb_req {
+       u8 opcode;
+       u8 flags1;
+#define OB_MAC_IOCB_REQ_OI     0x01
+#define OB_MAC_IOCB_REQ_I      0x02
+#define OB_MAC_IOCB_REQ_D      0x08
+#define OB_MAC_IOCB_REQ_F      0x10
+       u8 flags2;
+       u8 flags3;
+#define OB_MAC_IOCB_DFP        0x02
+#define OB_MAC_IOCB_V  0x04
+       __le32 reserved1[2];
+       __le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+       __le16 reserved2;
+       u32 tid;
+       u32 txq_idx;
+       __le32 reserved3;
+       __le16 vlan_tci;
+       __le16 reserved4;
+       struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_iocb_rsp {
+       u8 opcode;              /* */
+       u8 flags1;              /* */
+#define OB_MAC_IOCB_RSP_OI     0x01    /* */
+#define OB_MAC_IOCB_RSP_I      0x02    /* */
+#define OB_MAC_IOCB_RSP_E      0x08    /* */
+#define OB_MAC_IOCB_RSP_S      0x10    /* too Short */
+#define OB_MAC_IOCB_RSP_L      0x20    /* too Large */
+#define OB_MAC_IOCB_RSP_P      0x40    /* Padded */
+       u8 flags2;              /* */
+       u8 flags3;              /* */
+#define OB_MAC_IOCB_RSP_B      0x80    /* */
+       u32 tid;
+       u32 txq_idx;
+       __le32 reserved[13];
+} __packed;
+
+struct ob_mac_tso_iocb_req {
+       u8 opcode;
+       u8 flags1;
+#define OB_MAC_TSO_IOCB_OI     0x01
+#define OB_MAC_TSO_IOCB_I      0x02
+#define OB_MAC_TSO_IOCB_D      0x08
+#define OB_MAC_TSO_IOCB_IP4    0x40
+#define OB_MAC_TSO_IOCB_IP6    0x80
+       u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO    0x20
+#define OB_MAC_TSO_IOCB_UC     0x40
+#define OB_MAC_TSO_IOCB_TC     0x80
+       u8 flags3;
+#define OB_MAC_TSO_IOCB_IC     0x01
+#define OB_MAC_TSO_IOCB_DFP    0x02
+#define OB_MAC_TSO_IOCB_V      0x04
+       __le32 reserved1[2];
+       __le32 frame_len;
+       u32 tid;
+       u32 txq_idx;
+       __le16 total_hdrs_len;
+       __le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+       __le16 vlan_tci;
+       __le16 mss;
+       struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_tso_iocb_rsp {
+       u8 opcode;
+       u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI 0x01
+#define OB_MAC_TSO_IOCB_RSP_I  0x02
+#define OB_MAC_TSO_IOCB_RSP_E  0x08
+#define OB_MAC_TSO_IOCB_RSP_S  0x10
+#define OB_MAC_TSO_IOCB_RSP_L  0x20
+#define OB_MAC_TSO_IOCB_RSP_P  0x40
+       u8 flags2;              /* */
+       u8 flags3;              /* */
+#define OB_MAC_TSO_IOCB_RSP_B  0x8000
+       u32 tid;
+       u32 txq_idx;
+       __le32 reserved2[13];
+} __packed;
+
+struct ib_mac_iocb_rsp {
+       u8 opcode;              /* 0x20 */
+       u8 flags1;
+#define IB_MAC_IOCB_RSP_OI     0x01    /* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I      0x02    /* Disble Intr Generation */
+#define IB_MAC_CSUM_ERR_MASK 0x1c      /* A mask to use for csum errs */
+#define IB_MAC_IOCB_RSP_TE     0x04    /* Checksum error */
+#define IB_MAC_IOCB_RSP_NU     0x08    /* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE     0x10    /* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK 0x60    /* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00    /* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH 0x20    /* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG  0x40    /* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM         0x60    /* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B      0x80    /* Broadcast frame */
+       u8 flags2;
+#define IB_MAC_IOCB_RSP_P      0x01    /* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V      0x02    /* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK       0x1c    /*  */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR   0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE   0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE  0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE   0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN  0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC                0x1c
+#define IB_MAC_IOCB_RSP_U      0x20    /* UDP packet */
+#define IB_MAC_IOCB_RSP_T      0x40    /* TCP packet */
+#define IB_MAC_IOCB_RSP_FO     0x80    /* Failover port */
+       u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK       0x07    /* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00    /* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4 0x04    /* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6 0x02    /* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4       0x05    /* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6       0x03    /* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4     0x08    /* IPV4 */
+#define IB_MAC_IOCB_RSP_V6     0x10    /* IPV6 */
+#define IB_MAC_IOCB_RSP_IH     0x20    /* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS     0x40    /* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL     0x80    /* data is in large buffer */
+       __le32 data_len;        /* */
+       __le64 data_addr;       /* */
+       __le32 rss;             /* */
+       __le16 vlan_id;         /* 12 bits */
+#define IB_MAC_IOCB_RSP_C      0x1000  /* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT      12      /* class of service value */
+#define IB_MAC_IOCB_RSP_VLAN_MASK      0x0ffff
+
+       __le16 reserved1;
+       __le32 reserved2[6];
+       u8 reserved3[3];
+       u8 flags4;
+#define IB_MAC_IOCB_RSP_HV     0x20
+#define IB_MAC_IOCB_RSP_HS     0x40
+#define IB_MAC_IOCB_RSP_HL     0x80
+       __le32 hdr_len;         /* */
+       __le64 hdr_addr;        /* */
+} __packed;
+
+struct ib_ae_iocb_rsp {
+       u8 opcode;
+       u8 flags1;
+#define IB_AE_IOCB_RSP_OI              0x01
+#define IB_AE_IOCB_RSP_I               0x02
+       u8 event;
+#define LINK_UP_EVENT              0x00
+#define LINK_DOWN_EVENT            0x01
+#define CAM_LOOKUP_ERR_EVENT       0x06
+#define SOFT_ECC_ERROR_EVENT       0x07
+#define MGMT_ERR_EVENT             0x08
+#define TEN_GIG_MAC_EVENT          0x09
+#define GPI0_H2L_EVENT         0x10
+#define GPI0_L2H_EVENT         0x20
+#define GPI1_H2L_EVENT         0x11
+#define GPI1_L2H_EVENT         0x21
+#define PCI_ERR_ANON_BUF_RD        0x40
+       u8 q_id;
+       __le32 reserved[15];
+} __packed;
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+       u8 opcode;
+       u8 flags0;
+       __le16 length;
+       __le32 tid;
+       __le32 reserved[14];
+} __packed;
+
+struct net_req_iocb {
+       u8 opcode;
+       u8 flags0;
+       __le16 flags1;
+       __le32 tid;
+       __le32 reserved1[30];
+} __packed;
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+       __le16 len;
+#define Q_LEN_V                (1 << 4)
+#define Q_LEN_CPP_CONT 0x0000
+#define Q_LEN_CPP_16   0x0001
+#define Q_LEN_CPP_32   0x0002
+#define Q_LEN_CPP_64   0x0003
+#define Q_LEN_CPP_512  0x0006
+       __le16 flags;
+#define Q_PRI_SHIFT    1
+#define Q_FLAGS_LC     0x1000
+#define Q_FLAGS_LB     0x2000
+#define Q_FLAGS_LI     0x4000
+#define Q_FLAGS_LO     0x8000
+       __le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+       __le16 rid;
+       __le64 addr;
+       __le64 cnsmr_idx_addr;
+} __packed;
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+       u8 msix_vect;
+       u8 reserved1;
+       u8 reserved2;
+       u8 flags;
+#define FLAGS_LV       0x08
+#define FLAGS_LS       0x10
+#define FLAGS_LL       0x20
+#define FLAGS_LI       0x40
+#define FLAGS_LC       0x80
+       __le16 len;
+#define LEN_V          (1 << 4)
+#define LEN_CPP_CONT   0x0000
+#define LEN_CPP_32     0x0001
+#define LEN_CPP_64     0x0002
+#define LEN_CPP_128    0x0003
+       __le16 rid;
+       __le64 addr;
+       __le64 prod_idx_addr;
+       __le16 pkt_delay;
+       __le16 irq_delay;
+       __le64 lbq_addr;
+       __le16 lbq_buf_size;
+       __le16 lbq_len;         /* entry count */
+       __le64 sbq_addr;
+       __le16 sbq_buf_size;
+       __le16 sbq_len;         /* entry count */
+} __packed;
+
+struct ricb {
+       u8 base_cq;
+#define RSS_L4K 0x80
+       u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI  0x02
+#define RSS_LB  0x04
+#define RSS_LM  0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+       __le16 mask;
+       u8 hash_cq_id[1024];
+       __le32 ipv6_hash_key[10];
+       __le32 ipv4_hash_key[4];
+} __packed;
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+       struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+       DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+       struct sk_buff *skb;
+       struct ob_mac_iocb_req *queue_entry;
+       u32 index;
+       struct oal oal;
+       struct map_list map[MAX_SKB_FRAGS + 2];
+       int map_cnt;
+       struct tx_ring_desc *next;
+};
+
+struct page_chunk {
+       struct page *page;      /* master page */
+       char *va;               /* virt addr for this chunk */
+       u64 map;                /* mapping for master */
+       unsigned int offset;    /* offset for this chunk */
+       unsigned int last_flag; /* flag set for last chunk in page */
+};
+
+struct bq_desc {
+       union {
+               struct page_chunk pg_chunk;
+               struct sk_buff *skb;
+       } p;
+       __le64 *addr;
+       u32 index;
+       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+       DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+       /*
+        * queue info.
+        */
+       struct wqicb wqicb;     /* structure used to inform chip of new queue */
+       void *wq_base;          /* pci_alloc:virtual addr for tx */
+       dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
+       __le32 *cnsmr_idx_sh_reg;       /* shadow copy of consumer idx */
+       dma_addr_t cnsmr_idx_sh_reg_dma;        /* dma-shadow copy of consumer */
+       u32 wq_size;            /* size in bytes of queue area */
+       u32 wq_len;             /* number of entries in queue */
+       void __iomem *prod_idx_db_reg;  /* doorbell area index reg at offset 0x00 */
+       void __iomem *valid_db_reg;     /* doorbell area valid reg at offset 0x04 */
+       u16 prod_idx;           /* current value for prod idx */
+       u16 cq_id;              /* completion (rx) queue for tx completions */
+       u8 wq_id;               /* queue id for this entry */
+       u8 reserved1[3];
+       struct tx_ring_desc *q; /* descriptor list for the queue */
+       spinlock_t lock;
+       atomic_t tx_count;      /* counts down for every outstanding IO */
+       struct delayed_work tx_work;
+       struct ql_adapter *qdev;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 tx_errors;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+       DEFAULT_Q = 2,          /* Handles slow queue and chip/MPI events. */
+       TX_Q = 3,               /* Handles outbound completions. */
+       RX_Q = 4,               /* Handles inbound completions. */
+};
+
+struct rx_ring {
+       struct cqicb cqicb;     /* The chip's completion queue init control block. */
+
+       /* Completion queue elements. */
+       void *cq_base;
+       dma_addr_t cq_base_dma;
+       u32 cq_size;
+       u32 cq_len;
+       u16 cq_id;
+       __le32 *prod_idx_sh_reg;        /* Shadowed producer register. */
+       dma_addr_t prod_idx_sh_reg_dma;
+       void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
+       u32 cnsmr_idx;          /* current sw idx */
+       struct ql_net_rsp_iocb *curr_entry;     /* next entry on queue */
+       void __iomem *valid_db_reg;     /* PCI doorbell mem area + 0x04 */
+
+       /* Large buffer queue elements. */
+       u32 lbq_len;            /* entry count */
+       u32 lbq_size;           /* size in bytes of queue */
+       u32 lbq_buf_size;
+       void *lbq_base;
+       dma_addr_t lbq_base_dma;
+       void *lbq_base_indirect;
+       dma_addr_t lbq_base_indirect_dma;
+       struct page_chunk pg_chunk; /* current page for chunks */
+       struct bq_desc *lbq;    /* array of control blocks */
+       void __iomem *lbq_prod_idx_db_reg;      /* PCI doorbell mem area + 0x18 */
+       u32 lbq_prod_idx;       /* current sw prod idx */
+       u32 lbq_curr_idx;       /* next entry we expect */
+       u32 lbq_clean_idx;      /* beginning of new descs */
+       u32 lbq_free_cnt;       /* free buffer desc cnt */
+
+       /* Small buffer queue elements. */
+       u32 sbq_len;            /* entry count */
+       u32 sbq_size;           /* size in bytes of queue */
+       u32 sbq_buf_size;
+       void *sbq_base;
+       dma_addr_t sbq_base_dma;
+       void *sbq_base_indirect;
+       dma_addr_t sbq_base_indirect_dma;
+       struct bq_desc *sbq;    /* array of control blocks */
+       void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+       u32 sbq_prod_idx;       /* current sw prod idx */
+       u32 sbq_curr_idx;       /* next entry we expect */
+       u32 sbq_clean_idx;      /* beginning of new descs */
+       u32 sbq_free_cnt;       /* free buffer desc cnt */
+
+       /* Misc. handler elements. */
+       u32 type;               /* Type of queue, tx, rx. */
+       u32 irq;                /* Which vector this ring is assigned. */
+       u32 cpu;                /* Which CPU this should run on. */
+       char name[IFNAMSIZ + 5];
+       struct napi_struct napi;
+       u8 reserved;
+       struct ql_adapter *qdev;
+       u64 rx_packets;
+       u64 rx_multicast;
+       u64 rx_bytes;
+       u64 rx_dropped;
+       u64 rx_errors;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+       u8 value[4];
+};
+
+struct nic_stats {
+       /*
+        * These stats come from offset 200h to 278h
+        * in the XGMAC register.
+        */
+       u64 tx_pkts;
+       u64 tx_bytes;
+       u64 tx_mcast_pkts;
+       u64 tx_bcast_pkts;
+       u64 tx_ucast_pkts;
+       u64 tx_ctl_pkts;
+       u64 tx_pause_pkts;
+       u64 tx_64_pkt;
+       u64 tx_65_to_127_pkt;
+       u64 tx_128_to_255_pkt;
+       u64 tx_256_511_pkt;
+       u64 tx_512_to_1023_pkt;
+       u64 tx_1024_to_1518_pkt;
+       u64 tx_1519_to_max_pkt;
+       u64 tx_undersize_pkt;
+       u64 tx_oversize_pkt;
+
+       /*
+        * These stats come from offset 300h to 3C8h
+        * in the XGMAC register.
+        */
+       u64 rx_bytes;
+       u64 rx_bytes_ok;
+       u64 rx_pkts;
+       u64 rx_pkts_ok;
+       u64 rx_bcast_pkts;
+       u64 rx_mcast_pkts;
+       u64 rx_ucast_pkts;
+       u64 rx_undersize_pkts;
+       u64 rx_oversize_pkts;
+       u64 rx_jabber_pkts;
+       u64 rx_undersize_fcerr_pkts;
+       u64 rx_drop_events;
+       u64 rx_fcerr_pkts;
+       u64 rx_align_err;
+       u64 rx_symbol_err;
+       u64 rx_mac_err;
+       u64 rx_ctl_pkts;
+       u64 rx_pause_pkts;
+       u64 rx_64_pkts;
+       u64 rx_65_to_127_pkts;
+       u64 rx_128_255_pkts;
+       u64 rx_256_511_pkts;
+       u64 rx_512_to_1023_pkts;
+       u64 rx_1024_to_1518_pkts;
+       u64 rx_1519_to_max_pkts;
+       u64 rx_len_err_pkts;
+       /* Receive Mac Err stats */
+       u64 rx_code_err;
+       u64 rx_oversize_err;
+       u64 rx_undersize_err;
+       u64 rx_preamble_err;
+       u64 rx_frame_len_err;
+       u64 rx_crc_err;
+       u64 rx_err_count;
+       /*
+        * These stats come from offset 500h to 5C8h
+        * in the XGMAC register.
+        */
+       u64 tx_cbfc_pause_frames0;
+       u64 tx_cbfc_pause_frames1;
+       u64 tx_cbfc_pause_frames2;
+       u64 tx_cbfc_pause_frames3;
+       u64 tx_cbfc_pause_frames4;
+       u64 tx_cbfc_pause_frames5;
+       u64 tx_cbfc_pause_frames6;
+       u64 tx_cbfc_pause_frames7;
+       u64 rx_cbfc_pause_frames0;
+       u64 rx_cbfc_pause_frames1;
+       u64 rx_cbfc_pause_frames2;
+       u64 rx_cbfc_pause_frames3;
+       u64 rx_cbfc_pause_frames4;
+       u64 rx_cbfc_pause_frames5;
+       u64 rx_cbfc_pause_frames6;
+       u64 rx_cbfc_pause_frames7;
+       u64 rx_nic_fifo_drop;
+};
+
+/* Firmware coredump internal register address/length pairs. */
+enum {
+       MPI_CORE_REGS_ADDR = 0x00030000,
+       MPI_CORE_REGS_CNT = 127,
+       MPI_CORE_SH_REGS_CNT = 16,
+       TEST_REGS_ADDR = 0x00001000,
+       TEST_REGS_CNT = 23,
+       RMII_REGS_ADDR = 0x00001040,
+       RMII_REGS_CNT = 64,
+       FCMAC1_REGS_ADDR = 0x00001080,
+       FCMAC2_REGS_ADDR = 0x000010c0,
+       FCMAC_REGS_CNT = 64,
+       FC1_MBX_REGS_ADDR = 0x00001100,
+       FC2_MBX_REGS_ADDR = 0x00001240,
+       FC_MBX_REGS_CNT = 64,
+       IDE_REGS_ADDR = 0x00001140,
+       IDE_REGS_CNT = 64,
+       NIC1_MBX_REGS_ADDR = 0x00001180,
+       NIC2_MBX_REGS_ADDR = 0x00001280,
+       NIC_MBX_REGS_CNT = 64,
+       SMBUS_REGS_ADDR = 0x00001200,
+       SMBUS_REGS_CNT = 64,
+       I2C_REGS_ADDR = 0x00001fc0,
+       I2C_REGS_CNT = 64,
+       MEMC_REGS_ADDR = 0x00003000,
+       MEMC_REGS_CNT = 256,
+       PBUS_REGS_ADDR = 0x00007c00,
+       PBUS_REGS_CNT = 256,
+       MDE_REGS_ADDR = 0x00010000,
+       MDE_REGS_CNT = 6,
+       CODE_RAM_ADDR = 0x00020000,
+       CODE_RAM_CNT = 0x2000,
+       MEMC_RAM_ADDR = 0x00100000,
+       MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+       u32     cookie;
+       u8      idString[16];
+       u32     timeLo;
+       u32     timeHi;
+       u32     imageSize;
+       u32     headerSize;
+       u8      info[220];
+};
+
+struct mpi_coredump_segment_header {
+       u32     cookie;
+       u32     segNum;
+       u32     segSize;
+       u32     extra;
+       u8      description[16];
+};
+
+/* Firmware coredump header segment numbers. */
+enum {
+       CORE_SEG_NUM = 1,
+       TEST_LOGIC_SEG_NUM = 2,
+       RMII_SEG_NUM = 3,
+       FCMAC1_SEG_NUM = 4,
+       FCMAC2_SEG_NUM = 5,
+       FC1_MBOX_SEG_NUM = 6,
+       IDE_SEG_NUM = 7,
+       NIC1_MBOX_SEG_NUM = 8,
+       SMBUS_SEG_NUM = 9,
+       FC2_MBOX_SEG_NUM = 10,
+       NIC2_MBOX_SEG_NUM = 11,
+       I2C_SEG_NUM = 12,
+       MEMC_SEG_NUM = 13,
+       PBUS_SEG_NUM = 14,
+       MDE_SEG_NUM = 15,
+       NIC1_CONTROL_SEG_NUM = 16,
+       NIC2_CONTROL_SEG_NUM = 17,
+       NIC1_XGMAC_SEG_NUM = 18,
+       NIC2_XGMAC_SEG_NUM = 19,
+       WCS_RAM_SEG_NUM = 20,
+       MEMC_RAM_SEG_NUM = 21,
+       XAUI_AN_SEG_NUM = 22,
+       XAUI_HSS_PCS_SEG_NUM = 23,
+       XFI_AN_SEG_NUM = 24,
+       XFI_TRAIN_SEG_NUM = 25,
+       XFI_HSS_PCS_SEG_NUM = 26,
+       XFI_HSS_TX_SEG_NUM = 27,
+       XFI_HSS_RX_SEG_NUM = 28,
+       XFI_HSS_PLL_SEG_NUM = 29,
+       MISC_NIC_INFO_SEG_NUM = 30,
+       INTR_STATES_SEG_NUM = 31,
+       CAM_ENTRIES_SEG_NUM = 32,
+       ROUTING_WORDS_SEG_NUM = 33,
+       ETS_SEG_NUM = 34,
+       PROBE_DUMP_SEG_NUM = 35,
+       ROUTING_INDEX_SEG_NUM = 36,
+       MAC_PROTOCOL_SEG_NUM = 37,
+       XAUI2_AN_SEG_NUM = 38,
+       XAUI2_HSS_PCS_SEG_NUM = 39,
+       XFI2_AN_SEG_NUM = 40,
+       XFI2_TRAIN_SEG_NUM = 41,
+       XFI2_HSS_PCS_SEG_NUM = 42,
+       XFI2_HSS_TX_SEG_NUM = 43,
+       XFI2_HSS_RX_SEG_NUM = 44,
+       XFI2_HSS_PLL_SEG_NUM = 45,
+       SEM_REGS_SEG_NUM = 50
+
+};
+
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT               64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT          (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT                14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT   33
+#define XG_SERDES_XFI_AN_COUNT         14
+#define XG_SERDES_XFI_TRAIN_COUNT              12
+#define XG_SERDES_XFI_HSS_PCS_COUNT    15
+#define XG_SERDES_XFI_HSS_TX_COUNT             32
+#define XG_SERDES_XFI_HSS_RX_COUNT             32
+#define XG_SERDES_XFI_HSS_PLL_COUNT    32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT               10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT             (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT          (PRB_MX_ADDR_PRB_WORD_COUNT * \
+                                                       PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ *  2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES                    48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY    4
+#define RT_IDX_DUMP_TOT_WORDS          (RT_IDX_DUMP_ENTRIES * \
+                                               RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+       ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+       (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+       (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+       (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+       (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+       (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+       (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+       (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+       (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+       (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY  2
+#define MAC_ADDR_DUMP_TOT_WORDS                (MAC_ADDR_DUMP_ENTRIES * \
+                                               MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS                4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124               0x0003007c
+#define RISC_127               0x0003007f
+#define SHADOW_OFFSET  0xb0000000
+#define SHADOW_REG_SHIFT       20
+
+struct ql_nic_misc {
+       u32 rx_ring_count;
+       u32 tx_ring_count;
+       u32 intr_count;
+       u32 function;
+};
+
+struct ql_reg_dump {
+
+       /* segment 0 */
+       struct mpi_coredump_global_header mpi_global_header;
+
+       /* segment 16 */
+       struct mpi_coredump_segment_header nic_regs_seg_hdr;
+       u32 nic_regs[64];
+
+       /* segment 30 */
+       struct mpi_coredump_segment_header misc_nic_seg_hdr;
+       struct ql_nic_misc misc_nic_info;
+
+       /* segment 31 */
+       /* one interrupt state for each CQ */
+       struct mpi_coredump_segment_header intr_states_seg_hdr;
+       u32 intr_states[MAX_CPUS];
+
+       /* segment 32 */
+       /* 3 cam words each for 16 unicast,
+        * 2 cam words for each of 32 multicast.
+        */
+       struct mpi_coredump_segment_header cam_entries_seg_hdr;
+       u32 cam_entries[(16 * 3) + (32 * 3)];
+
+       /* segment 33 */
+       struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+       u32 nic_routing_words[16];
+
+       /* segment 34 */
+       struct mpi_coredump_segment_header ets_seg_hdr;
+       u32 ets[8+2];
+};
+
+struct ql_mpi_coredump {
+       /* segment 0 */
+       struct mpi_coredump_global_header mpi_global_header;
+
+       /* segment 1 */
+       struct mpi_coredump_segment_header core_regs_seg_hdr;
+       u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+       u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+       /* segment 2 */
+       struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+       u32 test_logic_regs[TEST_REGS_CNT];
+
+       /* segment 3 */
+       struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+       u32 rmii_regs[RMII_REGS_CNT];
+
+       /* segment 4 */
+       struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+       u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+       /* segment 5 */
+       struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+       u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+       /* segment 6 */
+       struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+       u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+       /* segment 7 */
+       struct mpi_coredump_segment_header ide_regs_seg_hdr;
+       u32 ide_regs[IDE_REGS_CNT];
+
+       /* segment 8 */
+       struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+       u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+       /* segment 9 */
+       struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+       u32 smbus_regs[SMBUS_REGS_CNT];
+
+       /* segment 10 */
+       struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+       u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+       /* segment 11 */
+       struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+       u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+       /* segment 12 */
+       struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+       u32 i2c_regs[I2C_REGS_CNT];
+       /* segment 13 */
+       struct mpi_coredump_segment_header memc_regs_seg_hdr;
+       u32 memc_regs[MEMC_REGS_CNT];
+
+       /* segment 14 */
+       struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+       u32 pbus_regs[PBUS_REGS_CNT];
+
+       /* segment 15 */
+       struct mpi_coredump_segment_header mde_regs_seg_hdr;
+       u32 mde_regs[MDE_REGS_CNT];
+
+       /* segment 16 */
+       struct mpi_coredump_segment_header nic_regs_seg_hdr;
+       u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+       /* segment 17 */
+       struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+       u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+       /* segment 18 */
+       struct mpi_coredump_segment_header xgmac1_seg_hdr;
+       u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+       /* segment 19 */
+       struct mpi_coredump_segment_header xgmac2_seg_hdr;
+       u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+       /* segment 20 */
+       struct mpi_coredump_segment_header code_ram_seg_hdr;
+       u32 code_ram[CODE_RAM_CNT];
+
+       /* segment 21 */
+       struct mpi_coredump_segment_header memc_ram_seg_hdr;
+       u32 memc_ram[MEMC_RAM_CNT];
+
+       /* segment 22 */
+       struct mpi_coredump_segment_header xaui_an_hdr;
+       u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+       /* segment 23 */
+       struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+       u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+       /* segment 24 */
+       struct mpi_coredump_segment_header xfi_an_hdr;
+       u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+       /* segment 25 */
+       struct mpi_coredump_segment_header xfi_train_hdr;
+       u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+       /* segment 26 */
+       struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+       u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+       /* segment 27 */
+       struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+       u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+       /* segment 28 */
+       struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+       u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+       /* segment 29 */
+       struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+       u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+       /* segment 30 */
+       struct mpi_coredump_segment_header misc_nic_seg_hdr;
+       struct ql_nic_misc misc_nic_info;
+
+       /* segment 31 */
+       /* one interrupt state for each CQ */
+       struct mpi_coredump_segment_header intr_states_seg_hdr;
+       u32 intr_states[MAX_RX_RINGS];
+
+       /* segment 32 */
+       /* 3 cam words each for 16 unicast,
+        * 2 cam words for each of 32 multicast.
+        */
+       struct mpi_coredump_segment_header cam_entries_seg_hdr;
+       u32 cam_entries[(16 * 3) + (32 * 3)];
+
+       /* segment 33 */
+       struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+       u32 nic_routing_words[16];
+       /* segment 34 */
+       struct mpi_coredump_segment_header ets_seg_hdr;
+       u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+       /* segment 35 */
+       struct mpi_coredump_segment_header probe_dump_seg_hdr;
+       u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+       /* segment 36 */
+       struct mpi_coredump_segment_header routing_reg_seg_hdr;
+       u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+       /* segment 37 */
+       struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+       u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+       /* segment 38 */
+       struct mpi_coredump_segment_header xaui2_an_hdr;
+       u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+       /* segment 39 */
+       struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+       u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+       /* segment 40 */
+       struct mpi_coredump_segment_header xfi2_an_hdr;
+       u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+       /* segment 41 */
+       struct mpi_coredump_segment_header xfi2_train_hdr;
+       u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+       /* segment 42 */
+       struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+       u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+       /* segment 43 */
+       struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+       u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+       /* segment 44 */
+       struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+       u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+       /* segment 45 */
+       struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+       u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+       /* segment 50 */
+       /* semaphore register for all 5 functions */
+       struct mpi_coredump_segment_header sem_regs_seg_hdr;
+       u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts.  It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+       struct ql_adapter *qdev;
+       u32 intr;
+       u32 irq_mask;           /* Mask of which rings the vector services. */
+       u32 hooked;
+       u32 intr_en_mask;       /* value/mask used to enable this intr */
+       u32 intr_dis_mask;      /* value/mask used to disable this intr */
+       u32 intr_read_mask;     /* value/mask used to read this intr */
+       char name[IFNAMSIZ * 2];
+       atomic_t irq_cnt;       /* irq_cnt is used in single vector
+                                * environment.  It's incremented for each
+                                * irq handler that is scheduled.  When each
+                                * handler finishes it decrements irq_cnt and
+                                * enables interrupts if it's zero. */
+       irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+       QL_ADAPTER_UP = 0,      /* Adapter has been brought up. */
+       QL_LEGACY_ENABLED = 1,
+       QL_MSI_ENABLED = 2,
+       QL_MSIX_ENABLED = 3,
+       QL_DMA64 = 4,
+       QL_PROMISCUOUS = 5,
+       QL_ALLMULTI = 6,
+       QL_PORT_CFG = 7,
+       QL_CAM_RT_SET = 8,
+       QL_SELFTEST = 9,
+       QL_LB_LINK_UP = 10,
+       QL_FRC_COREDUMP = 11,
+       QL_EEH_FATAL = 12,
+       QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
+};
+
+/* link_status bit definitions */
+enum {
+       STS_LOOPBACK_MASK = 0x00000700,
+       STS_LOOPBACK_PCS = 0x00000100,
+       STS_LOOPBACK_HSS = 0x00000200,
+       STS_LOOPBACK_EXT = 0x00000300,
+       STS_PAUSE_MASK = 0x000000c0,
+       STS_PAUSE_STD = 0x00000040,
+       STS_PAUSE_PRI = 0x00000080,
+       STS_SPEED_MASK = 0x00000038,
+       STS_SPEED_100Mb = 0x00000000,
+       STS_SPEED_1Gb = 0x00000008,
+       STS_SPEED_10Gb = 0x00000010,
+       STS_LINK_TYPE_MASK = 0x00000007,
+       STS_LINK_TYPE_XFI = 0x00000001,
+       STS_LINK_TYPE_XAUI = 0x00000002,
+       STS_LINK_TYPE_XFI_BP = 0x00000003,
+       STS_LINK_TYPE_XAUI_BP = 0x00000004,
+       STS_LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/* link_config bit definitions */
+enum {
+       CFG_JUMBO_FRAME_SIZE = 0x00010000,
+       CFG_PAUSE_MASK = 0x00000060,
+       CFG_PAUSE_STD = 0x00000020,
+       CFG_PAUSE_PRI = 0x00000040,
+       CFG_DCBX = 0x00000010,
+       CFG_LOOPBACK_MASK = 0x00000007,
+       CFG_LOOPBACK_PCS = 0x00000002,
+       CFG_LOOPBACK_HSS = 0x00000004,
+       CFG_LOOPBACK_EXT = 0x00000006,
+       CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
+};
+
+struct nic_operations {
+
+       int (*get_flash) (struct ql_adapter *);
+       int (*port_initialize) (struct ql_adapter *);
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+       struct ricb ricb;
+       unsigned long flags;
+       u32 wol;
+
+       struct nic_stats nic_stats;
+
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+       /* PCI Configuration information for this device */
+       struct pci_dev *pdev;
+       struct net_device *ndev;        /* Parent NET device */
+
+       /* Hardware information */
+       u32 chip_rev_id;
+       u32 fw_rev_id;
+       u32 func;               /* PCI function for this adapter */
+       u32 alt_func;           /* PCI function for alternate adapter */
+       u32 port;               /* Port number this adapter */
+
+       spinlock_t adapter_lock;
+       spinlock_t hw_lock;
+       spinlock_t stats_lock;
+
+       /* PCI Bus Relative Register Addresses */
+       void __iomem *reg_base;
+       void __iomem *doorbell_area;
+       u32 doorbell_area_size;
+
+       u32 msg_enable;
+
+       /* Page for Shadow Registers */
+       void *rx_ring_shadow_reg_area;
+       dma_addr_t rx_ring_shadow_reg_dma;
+       void *tx_ring_shadow_reg_area;
+       dma_addr_t tx_ring_shadow_reg_dma;
+
+       u32 mailbox_in;
+       u32 mailbox_out;
+       struct mbox_params idc_mbc;
+       struct mutex    mpi_mutex;
+
+       int tx_ring_size;
+       int rx_ring_size;
+       u32 intr_count;
+       struct msix_entry *msi_x_entry;
+       struct intr_context intr_context[MAX_RX_RINGS];
+
+       int tx_ring_count;      /* One per online CPU. */
+       u32 rss_ring_count;     /* One per irq vector.  */
+       /*
+        * rx_ring_count =
+        *  (CPU count * outbound completion rx_ring) +
+        *  (irq_vector_cnt * inbound (RSS) completion rx_ring)
+        */
+       int rx_ring_count;
+       int ring_mem_size;
+       void *ring_mem;
+
+       struct rx_ring rx_ring[MAX_RX_RINGS];
+       struct tx_ring tx_ring[MAX_TX_RINGS];
+       unsigned int lbq_buf_order;
+
+       int rx_csum;
+       u32 default_rx_queue;
+
+       u16 rx_coalesce_usecs;  /* cqicb->int_delay */
+       u16 rx_max_coalesced_frames;    /* cqicb->pkt_int_delay */
+       u16 tx_coalesce_usecs;  /* cqicb->int_delay */
+       u16 tx_max_coalesced_frames;    /* cqicb->pkt_int_delay */
+
+       u32 xg_sem_mask;
+       u32 port_link_up;
+       u32 port_init;
+       u32 link_status;
+       struct ql_mpi_coredump *mpi_coredump;
+       u32 core_is_dumped;
+       u32 link_config;
+       u32 led_config;
+       u32 max_frame_size;
+
+       union flash_params flash;
+
+       struct workqueue_struct *workqueue;
+       struct delayed_work asic_reset_work;
+       struct delayed_work mpi_reset_work;
+       struct delayed_work mpi_work;
+       struct delayed_work mpi_port_cfg_work;
+       struct delayed_work mpi_idc_work;
+       struct delayed_work mpi_core_to_log;
+       struct completion ide_completion;
+       const struct nic_operations *nic_ops;
+       u16 device_id;
+       struct timer_list timer;
+       atomic_t lb_count;
+       /* Keep local copy of current mac address. */
+       char current_mac_addr[ETH_ALEN];
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+       return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+       writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization.  The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue.  Each queue uses
+ * 1 4k chunk of memory.  The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+       writel(val, addr);
+       mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline u32 ql_read_sh_reg(__le32  *addr)
+{
+       u32 reg;
+       reg =  le32_to_cpu(*addr);
+       rmb();
+       return reg;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern const struct ethtool_ops qlge_ethtool_ops;
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+                       u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+                u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+void ql_mpi_idc_work(struct work_struct *work);
+void ql_mpi_port_cfg_work(struct work_struct *work);
+int ql_mb_get_fw_state(struct ql_adapter *qdev);
+int ql_cam_route_initialize(struct ql_adapter *qdev);
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+                         int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
+int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
+
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
+/* #define QL_IB_DUMP */
+/* #define QL_OB_DUMP */
+
+#ifdef QL_REG_DUMP
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+               ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
new file mode 100644 (file)
index 0000000..829be21
--- /dev/null
@@ -0,0 +1,2042 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "qlge.h"
+
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+                                               u32 reg)
+{
+       u32 register_to_read;
+       u32 reg_val;
+       unsigned int status = 0;
+
+       register_to_read = MPI_NIC_REG_BLOCK
+                               | MPI_NIC_READ
+                               | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+                               | reg;
+       status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+       if (status != 0)
+               return 0xffffffff;
+
+       return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+                                       u32 reg, u32 reg_val)
+{
+       u32 register_to_read;
+       int status = 0;
+
+       register_to_read = MPI_NIC_REG_BLOCK
+                               | MPI_NIC_READ
+                               | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+                               | reg;
+       status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+       return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+                                       u32 bit, u32 err_bit)
+{
+       u32 temp;
+       int count = 10;
+
+       while (count) {
+               temp = ql_read_other_func_reg(qdev, reg);
+
+               /* check for errors */
+               if (temp & err_bit)
+                       return -1;
+               else if (temp & bit)
+                       return 0;
+               mdelay(10);
+               count--;
+       }
+       return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+                                                       u32 *data)
+{
+       int status;
+
+       /* wait for reg to come ready */
+       status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+                                               XG_SERDES_ADDR_RDY, 0);
+       if (status)
+               goto exit;
+
+       /* set up for reg read */
+       ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+       /* wait for reg to come ready */
+       status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+                                               XG_SERDES_ADDR_RDY, 0);
+       if (status)
+               goto exit;
+
+       /* get the data */
+       *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+       return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+       int status;
+
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+       if (status)
+               goto exit;
+
+       /* set up for reg read */
+       ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+       if (status)
+               goto exit;
+
+       /* get the data */
+       *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+       return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+                       u32 *direct_ptr, u32 *indirect_ptr,
+                       unsigned int direct_valid, unsigned int indirect_valid)
+{
+       unsigned int status;
+
+       status = 1;
+       if (direct_valid)
+               status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+       /* Dead fill any failures or invalids. */
+       if (status)
+               *direct_ptr = 0xDEADBEEF;
+
+       status = 1;
+       if (indirect_valid)
+               status = ql_read_other_func_serdes_reg(
+                                               qdev, addr, indirect_ptr);
+       /* Dead fill any failures or invalids. */
+       if (status)
+               *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+                               struct ql_mpi_coredump *mpi_coredump)
+{
+       int status;
+       unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+       unsigned int xaui_indirect_valid, i;
+       u32 *direct_ptr, temp;
+       u32 *indirect_ptr;
+
+       xfi_direct_valid = xfi_indirect_valid = 0;
+       xaui_direct_valid = xaui_indirect_valid = 1;
+
+       /* The XAUI needs to be read out per port */
+       if (qdev->func & 1) {
+               /* We are NIC 2 */
+               status = ql_read_other_func_serdes_reg(qdev,
+                               XG_SERDES_XAUI_HSS_PCS_START, &temp);
+               if (status)
+                       temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+               if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+                                       XG_SERDES_ADDR_XAUI_PWR_DOWN)
+                       xaui_indirect_valid = 0;
+
+               status = ql_read_serdes_reg(qdev,
+                               XG_SERDES_XAUI_HSS_PCS_START, &temp);
+               if (status)
+                       temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+               if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+                                       XG_SERDES_ADDR_XAUI_PWR_DOWN)
+                       xaui_direct_valid = 0;
+       } else {
+               /* We are NIC 1 */
+               status = ql_read_other_func_serdes_reg(qdev,
+                               XG_SERDES_XAUI_HSS_PCS_START, &temp);
+               if (status)
+                       temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+               if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+                                       XG_SERDES_ADDR_XAUI_PWR_DOWN)
+                       xaui_indirect_valid = 0;
+
+               status = ql_read_serdes_reg(qdev,
+                               XG_SERDES_XAUI_HSS_PCS_START, &temp);
+               if (status)
+                       temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+               if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+                                       XG_SERDES_ADDR_XAUI_PWR_DOWN)
+                       xaui_direct_valid = 0;
+       }
+
+       /*
+        * XFI register is shared so only need to read one
+        * functions and then check the bits.
+        */
+       status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+       if (status)
+               temp = 0;
+
+       if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+                                       XG_SERDES_ADDR_XFI1_PWR_UP) {
+               /* now see if i'm NIC 1 or NIC 2 */
+               if (qdev->func & 1)
+                       /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+                       xfi_indirect_valid = 1;
+               else
+                       xfi_direct_valid = 1;
+       }
+       if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+                                       XG_SERDES_ADDR_XFI2_PWR_UP) {
+               /* now see if i'm NIC 1 or NIC 2 */
+               if (qdev->func & 1)
+                       /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+                       xfi_direct_valid = 1;
+               else
+                       xfi_indirect_valid = 1;
+       }
+
+       /* Get XAUI_AN register block. */
+       if (qdev->func & 1) {
+               /* Function 2 is direct */
+               direct_ptr = mpi_coredump->serdes2_xaui_an;
+               indirect_ptr = mpi_coredump->serdes_xaui_an;
+       } else {
+               /* Function 1 is direct */
+               direct_ptr = mpi_coredump->serdes_xaui_an;
+               indirect_ptr = mpi_coredump->serdes2_xaui_an;
+       }
+
+       for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xaui_direct_valid, xaui_indirect_valid);
+
+       /* Get XAUI_HSS_PCS register block. */
+       if (qdev->func & 1) {
+               direct_ptr =
+                       mpi_coredump->serdes2_xaui_hss_pcs;
+               indirect_ptr =
+                       mpi_coredump->serdes_xaui_hss_pcs;
+       } else {
+               direct_ptr =
+                       mpi_coredump->serdes_xaui_hss_pcs;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xaui_hss_pcs;
+       }
+
+       for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xaui_direct_valid, xaui_indirect_valid);
+
+       /* Get XAUI_XFI_AN register block. */
+       if (qdev->func & 1) {
+               direct_ptr = mpi_coredump->serdes2_xfi_an;
+               indirect_ptr = mpi_coredump->serdes_xfi_an;
+       } else {
+               direct_ptr = mpi_coredump->serdes_xfi_an;
+               indirect_ptr = mpi_coredump->serdes2_xfi_an;
+       }
+
+       for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+
+       /* Get XAUI_XFI_TRAIN register block. */
+       if (qdev->func & 1) {
+               direct_ptr = mpi_coredump->serdes2_xfi_train;
+               indirect_ptr =
+                       mpi_coredump->serdes_xfi_train;
+       } else {
+               direct_ptr = mpi_coredump->serdes_xfi_train;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xfi_train;
+       }
+
+       for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+
+       /* Get XAUI_XFI_HSS_PCS register block. */
+       if (qdev->func & 1) {
+               direct_ptr =
+                       mpi_coredump->serdes2_xfi_hss_pcs;
+               indirect_ptr =
+                       mpi_coredump->serdes_xfi_hss_pcs;
+       } else {
+               direct_ptr =
+                       mpi_coredump->serdes_xfi_hss_pcs;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xfi_hss_pcs;
+       }
+
+       for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+
+       /* Get XAUI_XFI_HSS_TX register block. */
+       if (qdev->func & 1) {
+               direct_ptr =
+                       mpi_coredump->serdes2_xfi_hss_tx;
+               indirect_ptr =
+                       mpi_coredump->serdes_xfi_hss_tx;
+       } else {
+               direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xfi_hss_tx;
+       }
+       for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+
+       /* Get XAUI_XFI_HSS_RX register block. */
+       if (qdev->func & 1) {
+               direct_ptr =
+                       mpi_coredump->serdes2_xfi_hss_rx;
+               indirect_ptr =
+                       mpi_coredump->serdes_xfi_hss_rx;
+       } else {
+               direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xfi_hss_rx;
+       }
+
+       for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+
+
+       /* Get XAUI_XFI_HSS_PLL register block. */
+       if (qdev->func & 1) {
+               direct_ptr =
+                       mpi_coredump->serdes2_xfi_hss_pll;
+               indirect_ptr =
+                       mpi_coredump->serdes_xfi_hss_pll;
+       } else {
+               direct_ptr =
+                       mpi_coredump->serdes_xfi_hss_pll;
+               indirect_ptr =
+                       mpi_coredump->serdes2_xfi_hss_pll;
+       }
+       for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+               ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+                                       xfi_direct_valid, xfi_indirect_valid);
+       return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+                                                       u32 *data)
+{
+       int status = 0;
+
+       /* wait for reg to come ready */
+       status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+                                               XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+       if (status)
+               goto exit;
+
+       /* set up for reg read */
+       ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+       /* wait for reg to come ready */
+       status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+                                               XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+       if (status)
+               goto exit;
+
+       /* get the data */
+       *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+       return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
+                                       unsigned int other_function)
+{
+       int status = 0;
+       int i;
+
+       for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+               /* We're reading 400 xgmac registers, but we filter out
+                * serveral locations that are non-responsive to reads.
+                */
+               if ((i == 0x00000114) ||
+                       (i == 0x00000118) ||
+                       (i == 0x0000013c) ||
+                       (i == 0x00000140) ||
+                       (i > 0x00000150 && i < 0x000001fc) ||
+                       (i > 0x00000278 && i < 0x000002a0) ||
+                       (i > 0x000002c0 && i < 0x000002cf) ||
+                       (i > 0x000002dc && i < 0x000002f0) ||
+                       (i > 0x000003c8 && i < 0x00000400) ||
+                       (i > 0x00000400 && i < 0x00000410) ||
+                       (i > 0x00000410 && i < 0x00000420) ||
+                       (i > 0x00000420 && i < 0x00000430) ||
+                       (i > 0x00000430 && i < 0x00000440) ||
+                       (i > 0x00000440 && i < 0x00000450) ||
+                       (i > 0x00000450 && i < 0x00000500) ||
+                       (i > 0x0000054c && i < 0x00000568) ||
+                       (i > 0x000005c8 && i < 0x00000600)) {
+                       if (other_function)
+                               status =
+                               ql_read_other_func_xgmac_reg(qdev, i, buf);
+                       else
+                               status = ql_read_xgmac_reg(qdev, i, buf);
+
+                       if (status)
+                               *buf = 0xdeadbeef;
+                       break;
+               }
+       }
+       return status;
+}
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
+{
+       int status = 0;
+       int i;
+
+       for (i = 0; i < 8; i++, buf++) {
+               ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+               *buf = ql_read32(qdev, NIC_ETS);
+       }
+
+       for (i = 0; i < 2; i++, buf++) {
+               ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+               *buf = ql_read32(qdev, CNA_ETS);
+       }
+
+       return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
+{
+       int i;
+
+       for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+               ql_write32(qdev, INTR_EN,
+                               qdev->intr_context[i].intr_read_mask);
+               *buf = ql_read32(qdev, INTR_EN);
+       }
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
+{
+       int i, status;
+       u32 value[3];
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+
+       for (i = 0; i < 16; i++) {
+               status = ql_get_mac_addr_reg(qdev,
+                                       MAC_ADDR_TYPE_CAM_MAC, i, value);
+               if (status) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Failed read of mac index register\n");
+                       goto err;
+               }
+               *buf++ = value[0];      /* lower MAC address */
+               *buf++ = value[1];      /* upper MAC address */
+               *buf++ = value[2];      /* output */
+       }
+       for (i = 0; i < 32; i++) {
+               status = ql_get_mac_addr_reg(qdev,
+                                       MAC_ADDR_TYPE_MULTI_MAC, i, value);
+               if (status) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Failed read of mac index register\n");
+                       goto err;
+               }
+               *buf++ = value[0];      /* lower Mcast address */
+               *buf++ = value[1];      /* upper Mcast address */
+       }
+err:
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+       return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
+{
+       int status;
+       u32 value, i;
+
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return status;
+
+       for (i = 0; i < 16; i++) {
+               status = ql_get_routing_reg(qdev, i, &value);
+               if (status) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Failed read of routing index register\n");
+                       goto err;
+               } else {
+                       *buf++ = value;
+               }
+       }
+err:
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+       return status;
+}
+
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
+{
+       u32 i;
+       int status;
+
+       for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+               status = ql_write_mpi_reg(qdev, RISC_124,
+                               (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+               if (status)
+                       goto end;
+               status = ql_read_mpi_reg(qdev, RISC_127, buf);
+               if (status)
+                       goto end;
+       }
+end:
+       return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
+                               u32 offset, u32 count)
+{
+       int i, status = 0;
+       for (i = 0; i < count; i++, buf++) {
+               status = ql_read_mpi_reg(qdev, offset + i, buf);
+               if (status)
+                       return status;
+       }
+       return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+                                       u32 valid, u32 *buf)
+{
+       u32 module, mux_sel, probe, lo_val, hi_val;
+
+       for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+               if (!((valid >> module) & 1))
+                       continue;
+               for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+                       probe = clock
+                               | PRB_MX_ADDR_ARE
+                               | mux_sel
+                               | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+                       ql_write32(qdev, PRB_MX_ADDR, probe);
+                       lo_val = ql_read32(qdev, PRB_MX_DATA);
+                       if (mux_sel == 0) {
+                               *buf = probe;
+                               buf++;
+                       }
+                       probe |= PRB_MX_ADDR_UP;
+                       ql_write32(qdev, PRB_MX_ADDR, probe);
+                       hi_val = ql_read32(qdev, PRB_MX_DATA);
+                       *buf = lo_val;
+                       buf++;
+                       *buf = hi_val;
+                       buf++;
+               }
+       }
+       return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+       /* First we have to enable the probe mux */
+       ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+       buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+                       PRB_MX_ADDR_VALID_SYS_MOD, buf);
+       buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+                       PRB_MX_ADDR_VALID_PCI_MOD, buf);
+       buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+                       PRB_MX_ADDR_VALID_XGM_MOD, buf);
+       buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+                       PRB_MX_ADDR_VALID_FC_MOD, buf);
+       return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+       int status;
+       u32 type, index, index_max;
+       u32 result_index;
+       u32 result_data;
+       u32 val;
+
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return status;
+
+       for (type = 0; type < 4; type++) {
+               if (type < 2)
+                       index_max = 8;
+               else
+                       index_max = 16;
+               for (index = 0; index < index_max; index++) {
+                       val = RT_IDX_RS
+                               | (type << RT_IDX_TYPE_SHIFT)
+                               | (index << RT_IDX_IDX_SHIFT);
+                       ql_write32(qdev, RT_IDX, val);
+                       result_index = 0;
+                       while ((result_index & RT_IDX_MR) == 0)
+                               result_index = ql_read32(qdev, RT_IDX);
+                       result_data = ql_read32(qdev, RT_DATA);
+                       *buf = type;
+                       buf++;
+                       *buf = index;
+                       buf++;
+                       *buf = result_index;
+                       buf++;
+                       *buf = result_data;
+                       buf++;
+               }
+       }
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+       return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+       u32 result_index, result_data;
+       u32 type;
+       u32 index;
+       u32 offset;
+       u32 val;
+       u32 initial_val = MAC_ADDR_RS;
+       u32 max_index;
+       u32 max_offset;
+
+       for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+               switch (type) {
+
+               case 0: /* CAM */
+                       initial_val |= MAC_ADDR_ADR;
+                       max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+                       break;
+               case 1: /* Multicast MAC Address */
+                       max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+                       max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+                       break;
+               case 2: /* VLAN filter mask */
+               case 3: /* MC filter mask */
+                       max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+                       max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+                       break;
+               case 4: /* FC MAC addresses */
+                       max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+                       break;
+               case 5: /* Mgmt MAC addresses */
+                       max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+                       break;
+               case 6: /* Mgmt VLAN addresses */
+                       max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+                       break;
+               case 7: /* Mgmt IPv4 address */
+                       max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+                       break;
+               case 8: /* Mgmt IPv6 address */
+                       max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+                       break;
+               case 9: /* Mgmt TCP/UDP Dest port */
+                       max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+                       max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+                       break;
+               default:
+                       pr_err("Bad type!!! 0x%08x\n", type);
+                       max_index = 0;
+                       max_offset = 0;
+                       break;
+               }
+               for (index = 0; index < max_index; index++) {
+                       for (offset = 0; offset < max_offset; offset++) {
+                               val = initial_val
+                                       | (type << MAC_ADDR_TYPE_SHIFT)
+                                       | (index << MAC_ADDR_IDX_SHIFT)
+                                       | (offset);
+                               ql_write32(qdev, MAC_ADDR_IDX, val);
+                               result_index = 0;
+                               while ((result_index & MAC_ADDR_MR) == 0) {
+                                       result_index = ql_read32(qdev,
+                                                               MAC_ADDR_IDX);
+                               }
+                               result_data = ql_read32(qdev, MAC_ADDR_DATA);
+                               *buf = result_index;
+                               buf++;
+                               *buf = result_data;
+                               buf++;
+                       }
+               }
+       }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+       u32 func_num, reg, reg_val;
+       int status;
+
+       for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+               reg = MPI_NIC_REG_BLOCK
+                       | (func_num << MPI_NIC_FUNCTION_SHIFT)
+                       | (SEM / 4);
+               status = ql_read_mpi_reg(qdev, reg, &reg_val);
+               *buf = reg_val;
+               /* if the read failed then dead fill the element. */
+               if (!status)
+                       *buf = 0xdeadbeef;
+               buf++;
+       }
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+               struct mpi_coredump_segment_header *seg_hdr,
+               u32 seg_number, u32 seg_size, u8 *desc)
+{
+       memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+       seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+       seg_hdr->segNum = seg_number;
+       seg_hdr->segSize = seg_size;
+       memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+       int status;
+       int i;
+
+       if (!mpi_coredump) {
+               netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+               return -EINVAL;
+       }
+
+       /* Try to get the spinlock, but dont worry if
+        * it isn't available.  If the firmware died it
+        * might be holding the sem.
+        */
+       ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+       status = ql_pause_mpi_risc(qdev);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed RISC pause. Status = 0x%.08x\n", status);
+               goto err;
+       }
+
+       /* Insert the global header */
+       memset(&(mpi_coredump->mpi_global_header), 0,
+               sizeof(struct mpi_coredump_global_header));
+       mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+       mpi_coredump->mpi_global_header.headerSize =
+               sizeof(struct mpi_coredump_global_header);
+       mpi_coredump->mpi_global_header.imageSize =
+               sizeof(struct ql_mpi_coredump);
+       memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+               sizeof(mpi_coredump->mpi_global_header.idString));
+
+       /* Get generic NIC reg dump */
+       ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+                       NIC1_CONTROL_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+                       NIC2_CONTROL_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+       /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+       ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+                       NIC1_XGMAC_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+                       NIC2_XGMAC_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+       if (qdev->func & 1) {
+               /* Odd means our function is NIC 2 */
+               for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+                       mpi_coredump->nic2_regs[i] =
+                                        ql_read32(qdev, i * sizeof(u32));
+
+               for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+                       mpi_coredump->nic_regs[i] =
+                       ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+               ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+               ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+       } else {
+               /* Even means our function is NIC 1 */
+               for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+                       mpi_coredump->nic_regs[i] =
+                                       ql_read32(qdev, i * sizeof(u32));
+               for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+                       mpi_coredump->nic2_regs[i] =
+                       ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+               ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+               ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+       }
+
+       /* Rev C. Step 20a */
+       ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+                       XAUI_AN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xaui_an),
+                       "XAUI AN Registers");
+
+       /* Rev C. Step 20b */
+       ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+                       XAUI_HSS_PCS_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+                       "XAUI HSS PCS Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_an),
+                       "XFI AN Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+                       XFI_TRAIN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_train),
+                       "XFI TRAIN Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+                       XFI_HSS_PCS_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+                       "XFI HSS PCS Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+                       XFI_HSS_TX_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_hss_tx),
+                       "XFI HSS TX Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+                       XFI_HSS_RX_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_hss_rx),
+                       "XFI HSS RX Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+                       XFI_HSS_PLL_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes_xfi_hss_pll),
+                       "XFI HSS PLL Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+                       XAUI2_AN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xaui_an),
+                       "XAUI2 AN Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+                       XAUI2_HSS_PCS_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+                       "XAUI2 HSS PCS Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+                       XFI2_AN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_an),
+                       "XFI2 AN Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+                       XFI2_TRAIN_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_train),
+                       "XFI2 TRAIN Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+                       XFI2_HSS_PCS_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+                       "XFI2 HSS PCS Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+                       XFI2_HSS_TX_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+                       "XFI2 HSS TX Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+                       XFI2_HSS_RX_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+                       "XFI2 HSS RX Registers");
+
+       ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+                       XFI2_HSS_PLL_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+                       "XFI2 HSS PLL Registers");
+
+       status = ql_get_serdes_regs(qdev, mpi_coredump);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+                         status);
+               goto err;
+       }
+
+       ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+                               CORE_SEG_NUM,
+                               sizeof(mpi_coredump->core_regs_seg_hdr) +
+                               sizeof(mpi_coredump->mpi_core_regs) +
+                               sizeof(mpi_coredump->mpi_core_sh_regs),
+                               "Core Registers");
+
+       /* Get the MPI Core Registers */
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+                                MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+       if (status)
+               goto err;
+       /* Get the 16 MPI shadow registers */
+       status = ql_get_mpi_shadow_regs(qdev,
+                                       &mpi_coredump->mpi_core_sh_regs[0]);
+       if (status)
+               goto err;
+
+       /* Get the Test Logic Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+                               TEST_LOGIC_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->test_logic_regs),
+                               "Test Logic Regs");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+                                TEST_REGS_ADDR, TEST_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the RMII Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+                               RMII_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->rmii_regs),
+                               "RMII Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+                                RMII_REGS_ADDR, RMII_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the FCMAC1 Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+                               FCMAC1_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->fcmac1_regs),
+                               "FCMAC1 Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+                                FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the FCMAC2 Registers */
+
+       ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+                               FCMAC2_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->fcmac2_regs),
+                               "FCMAC2 Registers");
+
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+                                FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the FC1 MBX Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+                               FC1_MBOX_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->fc1_mbx_regs),
+                               "FC1 MBox Regs");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+                                FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the IDE Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+                               IDE_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->ide_regs),
+                               "IDE Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+                                IDE_REGS_ADDR, IDE_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the NIC1 MBX Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+                               NIC1_MBOX_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->nic1_mbx_regs),
+                               "NIC1 MBox Regs");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+                                NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the SMBus Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+                               SMBUS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->smbus_regs),
+                               "SMBus Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+                                SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the FC2 MBX Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+                               FC2_MBOX_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->fc2_mbx_regs),
+                               "FC2 MBox Regs");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+                                FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the NIC2 MBX Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+                               NIC2_MBOX_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->nic2_mbx_regs),
+                               "NIC2 MBox Regs");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+                                NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the I2C Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+                               I2C_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->i2c_regs),
+                               "I2C Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+                                I2C_REGS_ADDR, I2C_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the MEMC Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+                               MEMC_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->memc_regs),
+                               "MEMC Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+                                MEMC_REGS_ADDR, MEMC_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the PBus Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+                               PBUS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->pbus_regs),
+                               "PBUS Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+                                PBUS_REGS_ADDR, PBUS_REGS_CNT);
+       if (status)
+               goto err;
+
+       /* Get the MDE Registers */
+       ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+                               MDE_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->mde_regs),
+                               "MDE Registers");
+       status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+                                MDE_REGS_ADDR, MDE_REGS_CNT);
+       if (status)
+               goto err;
+
+       ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+                               MISC_NIC_INFO_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->misc_nic_info),
+                               "MISC NIC INFO");
+       mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+       mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+       mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+       mpi_coredump->misc_nic_info.function = qdev->func;
+
+       /* Segment 31 */
+       /* Get indexed register values. */
+       ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+                               INTR_STATES_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->intr_states),
+                               "INTR States");
+       ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+       ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+                               CAM_ENTRIES_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->cam_entries),
+                               "CAM Entries");
+       status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+       if (status)
+               goto err;
+
+       ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+                               ROUTING_WORDS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->nic_routing_words),
+                               "Routing Words");
+       status = ql_get_routing_entries(qdev,
+                        &mpi_coredump->nic_routing_words[0]);
+       if (status)
+               goto err;
+
+       /* Segment 34 (Rev C. step 23) */
+       ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+                               ETS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->ets),
+                               "ETS Registers");
+       status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+       if (status)
+               goto err;
+
+       ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+                               PROBE_DUMP_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->probe_dump),
+                               "Probe Dump");
+       ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+       ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+                               ROUTING_INDEX_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->routing_regs),
+                               "Routing Regs");
+       status = ql_get_routing_index_registers(qdev,
+                                       &mpi_coredump->routing_regs[0]);
+       if (status)
+               goto err;
+
+       ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+                               MAC_PROTOCOL_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->mac_prot_regs),
+                               "MAC Prot Regs");
+       ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+       /* Get the semaphore registers for all 5 functions */
+       ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+                       SEM_REGS_SEG_NUM,
+                       sizeof(struct mpi_coredump_segment_header) +
+                       sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+       ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+       /* Prevent the mpi restarting while we dump the memory.*/
+       ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+       /* clear the pause */
+       status = ql_unpause_mpi_risc(qdev);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed RISC unpause. Status = 0x%.08x\n", status);
+               goto err;
+       }
+
+       /* Reset the RISC so we can dump RAM */
+       status = ql_hard_reset_mpi_risc(qdev);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed RISC reset. Status = 0x%.08x\n", status);
+               goto err;
+       }
+
+       ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+                               WCS_RAM_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->code_ram),
+                               "WCS RAM");
+       status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+                                       CODE_RAM_ADDR, CODE_RAM_CNT);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+                         status);
+               goto err;
+       }
+
+       /* Insert the segment header */
+       ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+                               MEMC_RAM_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->memc_ram),
+                               "MEMC RAM");
+       status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+                                       MEMC_RAM_ADDR, MEMC_RAM_CNT);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+                         status);
+               goto err;
+       }
+err:
+       ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+       return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+       if (!ql_own_firmware(qdev)) {
+               netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+               return;
+       }
+
+       if (!netif_running(qdev->ndev)) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Force Coredump can only be done from interface that is up\n");
+               return;
+       }
+       ql_queue_fw_error(qdev);
+}
+
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+                           struct ql_reg_dump *mpi_coredump)
+{
+       int i, status;
+
+
+       memset(&(mpi_coredump->mpi_global_header), 0,
+               sizeof(struct mpi_coredump_global_header));
+       mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+       mpi_coredump->mpi_global_header.headerSize =
+               sizeof(struct mpi_coredump_global_header);
+       mpi_coredump->mpi_global_header.imageSize =
+               sizeof(struct ql_reg_dump);
+       memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+               sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+       /* segment 16 */
+       ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+                               MISC_NIC_INFO_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->misc_nic_info),
+                               "MISC NIC INFO");
+       mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+       mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+       mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+       mpi_coredump->misc_nic_info.function = qdev->func;
+
+       /* Segment 16, Rev C. Step 18 */
+       ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+                               NIC1_CONTROL_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->nic_regs),
+                               "NIC Registers");
+       /* Get generic reg dump */
+       for (i = 0; i < 64; i++)
+               mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+       /* Segment 31 */
+       /* Get indexed register values. */
+       ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+                               INTR_STATES_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->intr_states),
+                               "INTR States");
+       ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+       ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+                               CAM_ENTRIES_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->cam_entries),
+                               "CAM Entries");
+       status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+       if (status)
+               return;
+
+       ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+                               ROUTING_WORDS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->nic_routing_words),
+                               "Routing Words");
+       status = ql_get_routing_entries(qdev,
+                        &mpi_coredump->nic_routing_words[0]);
+       if (status)
+               return;
+
+       /* Segment 34 (Rev C. step 23) */
+       ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+                               ETS_SEG_NUM,
+                               sizeof(struct mpi_coredump_segment_header)
+                               + sizeof(mpi_coredump->ets),
+                               "ETS Registers");
+       status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+       if (status)
+               return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+       /*
+        * If the dump has already been taken and is stored
+        * in our internal buffer and if force dump is set then
+        * just start the spool to dump it to the log file
+        * and also, take a snapshot of the general regs to
+        * to the user's buffer or else take complete dump
+        * to the user's buffer if force is not set.
+        */
+
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+               if (!ql_core_dump(qdev, buff))
+                       ql_soft_reset_mpi_risc(qdev);
+               else
+                       netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+       } else {
+               ql_gen_reg_dump(qdev, buff);
+               ql_get_core_dump(qdev);
+       }
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+               container_of(work, struct ql_adapter, mpi_core_to_log.work);
+       u32 *tmp, count;
+       int i;
+
+       count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+       tmp = (u32 *)qdev->mpi_coredump;
+       netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+                    "Core is dumping to log file!\n");
+
+       for (i = 0; i < count; i += 8) {
+               pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
+                       "%.08x %.08x %.08x\n", i,
+                       tmp[i + 0],
+                       tmp[i + 1],
+                       tmp[i + 2],
+                       tmp[i + 3],
+                       tmp[i + 4],
+                       tmp[i + 5],
+                       tmp[i + 6],
+                       tmp[i + 7]);
+               msleep(5);
+       }
+}
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+       int i;
+       u32 value;
+       for (i = 0; i < qdev->intr_count; i++) {
+               ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
+               value = ql_read32(qdev, INTR_EN);
+               pr_err("%s: Interrupt %d is %s\n",
+                      qdev->ndev->name, i,
+                      (value & INTR_EN_EN ? "enabled" : "disabled"));
+       }
+}
+
+#define DUMP_XGMAC(qdev, reg)                                  \
+do {                                                           \
+       u32 data;                                               \
+       ql_read_xgmac_reg(qdev, reg, &data);                    \
+       pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+} while (0)
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+       if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+               pr_err("%s: Couldn't get xgmac sem\n", __func__);
+               return;
+       }
+       DUMP_XGMAC(qdev, PAUSE_SRC_LO);
+       DUMP_XGMAC(qdev, PAUSE_SRC_HI);
+       DUMP_XGMAC(qdev, GLOBAL_CFG);
+       DUMP_XGMAC(qdev, TX_CFG);
+       DUMP_XGMAC(qdev, RX_CFG);
+       DUMP_XGMAC(qdev, FLOW_CTL);
+       DUMP_XGMAC(qdev, PAUSE_OPCODE);
+       DUMP_XGMAC(qdev, PAUSE_TIMER);
+       DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
+       DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
+       DUMP_XGMAC(qdev, MAC_TX_PARAMS);
+       DUMP_XGMAC(qdev, MAC_RX_PARAMS);
+       DUMP_XGMAC(qdev, MAC_SYS_INT);
+       DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
+       DUMP_XGMAC(qdev, MAC_MGMT_INT);
+       DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
+       DUMP_XGMAC(qdev, EXT_ARB_MODE);
+       ql_sem_unlock(qdev, qdev->xg_sem_mask);
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+       int i;
+       u32 value[3];
+
+       i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (i)
+               return;
+       for (i = 0; i < 4; i++) {
+               if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
+                       pr_err("%s: Failed read of mac index register\n",
+                              __func__);
+                       return;
+               } else {
+                       if (value[0])
+                               pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
+                                      qdev->ndev->name, i, value[1], value[0],
+                                      value[2]);
+               }
+       }
+       for (i = 0; i < 32; i++) {
+               if (ql_get_mac_addr_reg
+                   (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+                       pr_err("%s: Failed read of mac index register\n",
+                              __func__);
+                       return;
+               } else {
+                       if (value[0])
+                               pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
+                                      qdev->ndev->name, i, value[1], value[0]);
+               }
+       }
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+       int i;
+       u32 value;
+       i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (i)
+               return;
+       for (i = 0; i < 16; i++) {
+               value = 0;
+               if (ql_get_routing_reg(qdev, i, &value)) {
+                       pr_err("%s: Failed read of routing index register\n",
+                              __func__);
+                       return;
+               } else {
+                       if (value)
+                               pr_err("%s: Routing Mask %d = 0x%.08x\n",
+                                      qdev->ndev->name, i, value);
+               }
+       }
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+#define DUMP_REG(qdev, reg)                    \
+       pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+       pr_err("reg dump for function #%d\n", qdev->func);
+       DUMP_REG(qdev, SYS);
+       DUMP_REG(qdev, RST_FO);
+       DUMP_REG(qdev, FSC);
+       DUMP_REG(qdev, CSR);
+       DUMP_REG(qdev, ICB_RID);
+       DUMP_REG(qdev, ICB_L);
+       DUMP_REG(qdev, ICB_H);
+       DUMP_REG(qdev, CFG);
+       DUMP_REG(qdev, BIOS_ADDR);
+       DUMP_REG(qdev, STS);
+       DUMP_REG(qdev, INTR_EN);
+       DUMP_REG(qdev, INTR_MASK);
+       DUMP_REG(qdev, ISR1);
+       DUMP_REG(qdev, ISR2);
+       DUMP_REG(qdev, ISR3);
+       DUMP_REG(qdev, ISR4);
+       DUMP_REG(qdev, REV_ID);
+       DUMP_REG(qdev, FRC_ECC_ERR);
+       DUMP_REG(qdev, ERR_STS);
+       DUMP_REG(qdev, RAM_DBG_ADDR);
+       DUMP_REG(qdev, RAM_DBG_DATA);
+       DUMP_REG(qdev, ECC_ERR_CNT);
+       DUMP_REG(qdev, SEM);
+       DUMP_REG(qdev, GPIO_1);
+       DUMP_REG(qdev, GPIO_2);
+       DUMP_REG(qdev, GPIO_3);
+       DUMP_REG(qdev, XGMAC_ADDR);
+       DUMP_REG(qdev, XGMAC_DATA);
+       DUMP_REG(qdev, NIC_ETS);
+       DUMP_REG(qdev, CNA_ETS);
+       DUMP_REG(qdev, FLASH_ADDR);
+       DUMP_REG(qdev, FLASH_DATA);
+       DUMP_REG(qdev, CQ_STOP);
+       DUMP_REG(qdev, PAGE_TBL_RID);
+       DUMP_REG(qdev, WQ_PAGE_TBL_LO);
+       DUMP_REG(qdev, WQ_PAGE_TBL_HI);
+       DUMP_REG(qdev, CQ_PAGE_TBL_LO);
+       DUMP_REG(qdev, CQ_PAGE_TBL_HI);
+       DUMP_REG(qdev, COS_DFLT_CQ1);
+       DUMP_REG(qdev, COS_DFLT_CQ2);
+       DUMP_REG(qdev, SPLT_HDR);
+       DUMP_REG(qdev, FC_PAUSE_THRES);
+       DUMP_REG(qdev, NIC_PAUSE_THRES);
+       DUMP_REG(qdev, FC_ETHERTYPE);
+       DUMP_REG(qdev, FC_RCV_CFG);
+       DUMP_REG(qdev, NIC_RCV_CFG);
+       DUMP_REG(qdev, FC_COS_TAGS);
+       DUMP_REG(qdev, NIC_COS_TAGS);
+       DUMP_REG(qdev, MGMT_RCV_CFG);
+       DUMP_REG(qdev, XG_SERDES_ADDR);
+       DUMP_REG(qdev, XG_SERDES_DATA);
+       DUMP_REG(qdev, PRB_MX_ADDR);
+       DUMP_REG(qdev, PRB_MX_DATA);
+       ql_dump_intr_states(qdev);
+       ql_dump_xgmac_control_regs(qdev);
+       ql_dump_ets_regs(qdev);
+       ql_dump_cam_entries(qdev);
+       ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+
+#define DUMP_STAT(qdev, stat)  \
+       pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+       pr_err("%s: Enter\n", __func__);
+       DUMP_STAT(qdev, tx_pkts);
+       DUMP_STAT(qdev, tx_bytes);
+       DUMP_STAT(qdev, tx_mcast_pkts);
+       DUMP_STAT(qdev, tx_bcast_pkts);
+       DUMP_STAT(qdev, tx_ucast_pkts);
+       DUMP_STAT(qdev, tx_ctl_pkts);
+       DUMP_STAT(qdev, tx_pause_pkts);
+       DUMP_STAT(qdev, tx_64_pkt);
+       DUMP_STAT(qdev, tx_65_to_127_pkt);
+       DUMP_STAT(qdev, tx_128_to_255_pkt);
+       DUMP_STAT(qdev, tx_256_511_pkt);
+       DUMP_STAT(qdev, tx_512_to_1023_pkt);
+       DUMP_STAT(qdev, tx_1024_to_1518_pkt);
+       DUMP_STAT(qdev, tx_1519_to_max_pkt);
+       DUMP_STAT(qdev, tx_undersize_pkt);
+       DUMP_STAT(qdev, tx_oversize_pkt);
+       DUMP_STAT(qdev, rx_bytes);
+       DUMP_STAT(qdev, rx_bytes_ok);
+       DUMP_STAT(qdev, rx_pkts);
+       DUMP_STAT(qdev, rx_pkts_ok);
+       DUMP_STAT(qdev, rx_bcast_pkts);
+       DUMP_STAT(qdev, rx_mcast_pkts);
+       DUMP_STAT(qdev, rx_ucast_pkts);
+       DUMP_STAT(qdev, rx_undersize_pkts);
+       DUMP_STAT(qdev, rx_oversize_pkts);
+       DUMP_STAT(qdev, rx_jabber_pkts);
+       DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
+       DUMP_STAT(qdev, rx_drop_events);
+       DUMP_STAT(qdev, rx_fcerr_pkts);
+       DUMP_STAT(qdev, rx_align_err);
+       DUMP_STAT(qdev, rx_symbol_err);
+       DUMP_STAT(qdev, rx_mac_err);
+       DUMP_STAT(qdev, rx_ctl_pkts);
+       DUMP_STAT(qdev, rx_pause_pkts);
+       DUMP_STAT(qdev, rx_64_pkts);
+       DUMP_STAT(qdev, rx_65_to_127_pkts);
+       DUMP_STAT(qdev, rx_128_255_pkts);
+       DUMP_STAT(qdev, rx_256_511_pkts);
+       DUMP_STAT(qdev, rx_512_to_1023_pkts);
+       DUMP_STAT(qdev, rx_1024_to_1518_pkts);
+       DUMP_STAT(qdev, rx_1519_to_max_pkts);
+       DUMP_STAT(qdev, rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+
+#define DUMP_QDEV_FIELD(qdev, type, field)             \
+       pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+#define DUMP_QDEV_DMA_FIELD(qdev, field)               \
+       pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
+       pr_err("%s[%d].%s = " type "\n",                 \
+              #array, index, #field, qdev->array[index].field);
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+       int i;
+       DUMP_QDEV_FIELD(qdev, "%lx", flags);
+       DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
+       DUMP_QDEV_FIELD(qdev, "%p", pdev);
+       DUMP_QDEV_FIELD(qdev, "%p", ndev);
+       DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
+       DUMP_QDEV_FIELD(qdev, "%p", reg_base);
+       DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
+       DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
+       DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
+       DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
+       DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
+       DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
+       DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
+       DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+       if (qdev->msi_x_entry)
+               for (i = 0; i < qdev->intr_count; i++) {
+                       DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
+                       DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
+               }
+       for (i = 0; i < qdev->intr_count; i++) {
+               DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
+               DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
+               DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
+       }
+       DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
+       DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
+       DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+       DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
+       DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
+       DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+       pr_err("Dumping wqicb stuff...\n");
+       pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+       pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
+       pr_err("wqicb->cq_id_rss = %d\n",
+              le16_to_cpu(wqicb->cq_id_rss));
+       pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+       pr_err("wqicb->wq_addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(wqicb->addr));
+       pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+       if (tx_ring == NULL)
+               return;
+       pr_err("===================== Dumping tx_ring %d ===============\n",
+              tx_ring->wq_id);
+       pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
+       pr_err("tx_ring->base_dma = 0x%llx\n",
+              (unsigned long long) tx_ring->wq_base_dma);
+       pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
+              tx_ring->cnsmr_idx_sh_reg,
+              tx_ring->cnsmr_idx_sh_reg
+                       ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
+       pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
+       pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
+       pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+       pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+       pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+       pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
+       pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
+       pr_err("tx_ring->q = %p\n", tx_ring->q);
+       pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+       int i;
+       pr_err("===================== Dumping ricb ===============\n");
+       pr_err("Dumping ricb stuff...\n");
+
+       pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+       pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
+              ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+              ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+              ricb->flags & RSS_LI ? "RSS_LI " : "",
+              ricb->flags & RSS_LB ? "RSS_LB " : "",
+              ricb->flags & RSS_LM ? "RSS_LM " : "",
+              ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+              ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+              ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+              ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+       pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
+       for (i = 0; i < 16; i++)
+               pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
+                      le32_to_cpu(ricb->hash_cq_id[i]));
+       for (i = 0; i < 10; i++)
+               pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
+                      le32_to_cpu(ricb->ipv6_hash_key[i]));
+       for (i = 0; i < 4; i++)
+               pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
+                      le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+       pr_err("Dumping cqicb stuff...\n");
+
+       pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
+       pr_err("cqicb->flags = %x\n", cqicb->flags);
+       pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+       pr_err("cqicb->addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(cqicb->addr));
+       pr_err("cqicb->prod_idx_addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
+       pr_err("cqicb->pkt_delay = 0x%.04x\n",
+              le16_to_cpu(cqicb->pkt_delay));
+       pr_err("cqicb->irq_delay = 0x%.04x\n",
+              le16_to_cpu(cqicb->irq_delay));
+       pr_err("cqicb->lbq_addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
+       pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
+              le16_to_cpu(cqicb->lbq_buf_size));
+       pr_err("cqicb->lbq_len = 0x%.04x\n",
+              le16_to_cpu(cqicb->lbq_len));
+       pr_err("cqicb->sbq_addr = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
+       pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
+              le16_to_cpu(cqicb->sbq_buf_size));
+       pr_err("cqicb->sbq_len = 0x%.04x\n",
+              le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+       if (rx_ring == NULL)
+               return;
+       pr_err("===================== Dumping rx_ring %d ===============\n",
+              rx_ring->cq_id);
+       pr_err("Dumping rx_ring %d, type = %s%s%s\n",
+              rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+              rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+              rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+       pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+       pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
+       pr_err("rx_ring->cq_base_dma = %llx\n",
+              (unsigned long long) rx_ring->cq_base_dma);
+       pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
+       pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
+       pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
+              rx_ring->prod_idx_sh_reg,
+              rx_ring->prod_idx_sh_reg
+                       ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
+       pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
+              (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+       pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
+              rx_ring->cnsmr_idx_db_reg);
+       pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+       pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+       pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
+
+       pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
+       pr_err("rx_ring->lbq_base_dma = %llx\n",
+              (unsigned long long) rx_ring->lbq_base_dma);
+       pr_err("rx_ring->lbq_base_indirect = %p\n",
+              rx_ring->lbq_base_indirect);
+       pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
+              (unsigned long long) rx_ring->lbq_base_indirect_dma);
+       pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
+       pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
+       pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
+       pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
+              rx_ring->lbq_prod_idx_db_reg);
+       pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
+       pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+       pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
+       pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
+       pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
+
+       pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
+       pr_err("rx_ring->sbq_base_dma = %llx\n",
+              (unsigned long long) rx_ring->sbq_base_dma);
+       pr_err("rx_ring->sbq_base_indirect = %p\n",
+              rx_ring->sbq_base_indirect);
+       pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
+              (unsigned long long) rx_ring->sbq_base_indirect_dma);
+       pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
+       pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
+       pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
+       pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
+              rx_ring->sbq_prod_idx_db_reg);
+       pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
+       pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
+       pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
+       pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
+       pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+       pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
+       pr_err("rx_ring->irq = %d\n", rx_ring->irq);
+       pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
+       pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+       void *ptr;
+
+       pr_err("%s: Enter\n", __func__);
+
+       ptr = kmalloc(size, GFP_ATOMIC);
+       if (ptr == NULL)
+               return;
+
+       if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+               pr_err("%s: Failed to upload control block!\n", __func__);
+               goto fail_it;
+       }
+       switch (bit) {
+       case CFG_DRQ:
+               ql_dump_wqicb((struct wqicb *)ptr);
+               break;
+       case CFG_DCQ:
+               ql_dump_cqicb((struct cqicb *)ptr);
+               break;
+       case CFG_DR:
+               ql_dump_ricb((struct ricb *)ptr);
+               break;
+       default:
+               pr_err("%s: Invalid bit value = %x\n", __func__, bit);
+               break;
+       }
+fail_it:
+       kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+       pr_err("tbd->addr  = 0x%llx\n",
+              le64_to_cpu((u64) tbd->addr));
+       pr_err("tbd->len   = %d\n",
+              le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+       pr_err("tbd->flags = %s %s\n",
+              tbd->len & TX_DESC_C ? "C" : ".",
+              tbd->len & TX_DESC_E ? "E" : ".");
+       tbd++;
+       pr_err("tbd->addr  = 0x%llx\n",
+              le64_to_cpu((u64) tbd->addr));
+       pr_err("tbd->len   = %d\n",
+              le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+       pr_err("tbd->flags = %s %s\n",
+              tbd->len & TX_DESC_C ? "C" : ".",
+              tbd->len & TX_DESC_E ? "E" : ".");
+       tbd++;
+       pr_err("tbd->addr  = 0x%llx\n",
+              le64_to_cpu((u64) tbd->addr));
+       pr_err("tbd->len   = %d\n",
+              le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+       pr_err("tbd->flags = %s %s\n",
+              tbd->len & TX_DESC_C ? "C" : ".",
+              tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+       struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+           (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+       struct tx_buf_desc *tbd;
+       u16 frame_len;
+
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = %s\n",
+              (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+       pr_err("flags1          = %s %s %s %s %s\n",
+              ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+              ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+              ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+              ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+              ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+       pr_err("flags2          = %s %s %s\n",
+              ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+              ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+              ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+       pr_err("flags3          = %s %s %s\n",
+              ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+              ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+              ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+       pr_err("tid = %x\n", ob_mac_iocb->tid);
+       pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
+       pr_err("vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
+       if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+               pr_err("frame_len      = %d\n",
+                      le32_to_cpu(ob_mac_tso_iocb->frame_len));
+               pr_err("mss      = %d\n",
+                      le16_to_cpu(ob_mac_tso_iocb->mss));
+               pr_err("prot_hdr_len   = %d\n",
+                      le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+               pr_err("hdr_offset     = 0x%.04x\n",
+                      le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+               frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+       } else {
+               pr_err("frame_len      = %d\n",
+                      le16_to_cpu(ob_mac_iocb->frame_len));
+               frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+       }
+       tbd = &ob_mac_iocb->tbd[0];
+       ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = %d\n", ob_mac_rsp->opcode);
+       pr_err("flags          = %s %s %s %s %s %s %s\n",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+              ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+              ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+       pr_err("tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = 0x%x\n", ib_mac_rsp->opcode);
+       pr_err("flags1 = %s%s%s%s%s%s\n",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+              ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+       if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+               pr_err("%s%s%s Multicast\n",
+                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                      IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                      IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                      IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+       pr_err("flags2 = %s%s%s%s%s\n",
+              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+               pr_err("%s%s%s%s%s error\n",
+                      (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+                      IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+                      (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+                      IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+                      (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+                      IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+                      (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+                      IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+                      (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+                      IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+       pr_err("flags3 = %s%s\n",
+              ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+              ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+       if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+               pr_err("RSS flags = %s%s%s%s\n",
+                      ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+                       IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+                      ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+                       IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+                      ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+                       IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+                      ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+                       IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+       pr_err("data_len        = %d\n",
+              le32_to_cpu(ib_mac_rsp->data_len));
+       pr_err("data_addr    = 0x%llx\n",
+              (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
+       if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+               pr_err("rss    = %x\n",
+                      le32_to_cpu(ib_mac_rsp->rss));
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+               pr_err("vlan_id    = %x\n",
+                      le16_to_cpu(ib_mac_rsp->vlan_id));
+
+       pr_err("flags4 = %s%s%s\n",
+               ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+               ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+               ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+       if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+               pr_err("hdr length      = %d\n",
+                      le32_to_cpu(ib_mac_rsp->hdr_len));
+               pr_err("hdr addr    = 0x%llx\n",
+                      (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
+       }
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+       int i;
+
+       QL_DUMP_REGS(qdev);
+       QL_DUMP_QDEV(qdev);
+       for (i = 0; i < qdev->tx_ring_count; i++) {
+               QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+               QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+       }
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+               QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+       }
+}
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
new file mode 100644 (file)
index 0000000..5dade1f
--- /dev/null
@@ -0,0 +1,728 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+
+#include "qlge.h"
+
+struct ql_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+       {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+       {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+       {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+                                       QL_OFF(nic_stats.tx_mcast_pkts)},
+       {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+                                       QL_OFF(nic_stats.tx_bcast_pkts)},
+       {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+                                       QL_OFF(nic_stats.tx_ucast_pkts)},
+       {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+                                       QL_OFF(nic_stats.tx_ctl_pkts)},
+       {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+                                       QL_OFF(nic_stats.tx_pause_pkts)},
+       {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+                                       QL_OFF(nic_stats.tx_64_pkt)},
+       {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+                                       QL_OFF(nic_stats.tx_65_to_127_pkt)},
+       {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+                                       QL_OFF(nic_stats.tx_128_to_255_pkt)},
+       {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+                                       QL_OFF(nic_stats.tx_256_511_pkt)},
+       {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+                                       QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+       {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+                                       QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+       {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+                                       QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+       {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+                                       QL_OFF(nic_stats.tx_undersize_pkt)},
+       {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+                                       QL_OFF(nic_stats.tx_oversize_pkt)},
+       {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+       {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+                                       QL_OFF(nic_stats.rx_bytes_ok)},
+       {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+       {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+                                       QL_OFF(nic_stats.rx_pkts_ok)},
+       {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+                                       QL_OFF(nic_stats.rx_bcast_pkts)},
+       {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+                                       QL_OFF(nic_stats.rx_mcast_pkts)},
+       {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+                                       QL_OFF(nic_stats.rx_ucast_pkts)},
+       {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+                                       QL_OFF(nic_stats.rx_undersize_pkts)},
+       {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+                                       QL_OFF(nic_stats.rx_oversize_pkts)},
+       {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+                                       QL_OFF(nic_stats.rx_jabber_pkts)},
+       {"rx_undersize_fcerr_pkts",
+               QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+                               QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+       {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+                                       QL_OFF(nic_stats.rx_drop_events)},
+       {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+                                       QL_OFF(nic_stats.rx_fcerr_pkts)},
+       {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+                                       QL_OFF(nic_stats.rx_align_err)},
+       {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+                                       QL_OFF(nic_stats.rx_symbol_err)},
+       {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+                                       QL_OFF(nic_stats.rx_mac_err)},
+       {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+                                       QL_OFF(nic_stats.rx_ctl_pkts)},
+       {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+                                       QL_OFF(nic_stats.rx_pause_pkts)},
+       {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+                                       QL_OFF(nic_stats.rx_64_pkts)},
+       {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+                                       QL_OFF(nic_stats.rx_65_to_127_pkts)},
+       {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+                                       QL_OFF(nic_stats.rx_128_255_pkts)},
+       {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+                                       QL_OFF(nic_stats.rx_256_511_pkts)},
+       {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+                                       QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+       {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+                                       QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+       {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+                                       QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+       {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+                                       QL_OFF(nic_stats.rx_len_err_pkts)},
+       {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+                                       QL_OFF(nic_stats.rx_code_err)},
+       {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+                                       QL_OFF(nic_stats.rx_oversize_err)},
+       {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+                                       QL_OFF(nic_stats.rx_undersize_err)},
+       {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+                                       QL_OFF(nic_stats.rx_preamble_err)},
+       {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+                                       QL_OFF(nic_stats.rx_frame_len_err)},
+       {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+                                       QL_OFF(nic_stats.rx_crc_err)},
+       {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+                                       QL_OFF(nic_stats.rx_err_count)},
+       {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+       {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+       {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+       {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+       {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+       {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+       {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+       {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+                               QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+       {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+       {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+       {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+       {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+       {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+       {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+       {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+       {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+                               QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+       {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+                                       QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+       "Loopback test  (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_RCV_MAC_ERR_STATS 7
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+       int i, status = 0;
+       struct rx_ring *rx_ring;
+       struct cqicb *cqicb;
+
+       if (!netif_running(qdev->ndev))
+               return status;
+
+       /* Skip the default queue, and update the outbound handler
+        * queues if they changed.
+        */
+       cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
+       if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+               le16_to_cpu(cqicb->pkt_delay) !=
+                               qdev->tx_max_coalesced_frames) {
+               for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+                       rx_ring = &qdev->rx_ring[i];
+                       cqicb = (struct cqicb *)rx_ring;
+                       cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+                       cqicb->pkt_delay =
+                           cpu_to_le16(qdev->tx_max_coalesced_frames);
+                       cqicb->flags = FLAGS_LI;
+                       status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+                                               CFG_LCQ, rx_ring->cq_id);
+                       if (status) {
+                               netif_err(qdev, ifup, qdev->ndev,
+                                         "Failed to load CQICB.\n");
+                               goto exit;
+                       }
+               }
+       }
+
+       /* Update the inbound (RSS) handler queues if they changed. */
+       cqicb = (struct cqicb *)&qdev->rx_ring[0];
+       if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+               le16_to_cpu(cqicb->pkt_delay) !=
+                                       qdev->rx_max_coalesced_frames) {
+               for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+                       rx_ring = &qdev->rx_ring[i];
+                       cqicb = (struct cqicb *)rx_ring;
+                       cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+                       cqicb->pkt_delay =
+                           cpu_to_le16(qdev->rx_max_coalesced_frames);
+                       cqicb->flags = FLAGS_LI;
+                       status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+                                               CFG_LCQ, rx_ring->cq_id);
+                       if (status) {
+                               netif_err(qdev, ifup, qdev->ndev,
+                                         "Failed to load CQICB.\n");
+                               goto exit;
+                       }
+               }
+       }
+exit:
+       return status;
+}
+
+static void ql_update_stats(struct ql_adapter *qdev)
+{
+       u32 i;
+       u64 data;
+       u64 *iter = &qdev->nic_stats.tx_pkts;
+
+       spin_lock(&qdev->stats_lock);
+       if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Couldn't get xgmac sem.\n");
+               goto quit;
+       }
+       /*
+        * Get TX statistics.
+        */
+       for (i = 0x200; i < 0x280; i += 8) {
+               if (ql_read_xgmac_reg64(qdev, i, &data)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Error reading status register 0x%.04x.\n",
+                                 i);
+                       goto end;
+               } else
+                       *iter = data;
+               iter++;
+       }
+
+       /*
+        * Get RX statistics.
+        */
+       for (i = 0x300; i < 0x3d0; i += 8) {
+               if (ql_read_xgmac_reg64(qdev, i, &data)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Error reading status register 0x%.04x.\n",
+                                 i);
+                       goto end;
+               } else
+                       *iter = data;
+               iter++;
+       }
+
+       /* Update receive mac error statistics */
+       iter += QLGE_RCV_MAC_ERR_STATS;
+
+       /*
+        * Get Per-priority TX pause frame counter statistics.
+        */
+       for (i = 0x500; i < 0x540; i += 8) {
+               if (ql_read_xgmac_reg64(qdev, i, &data)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Error reading status register 0x%.04x.\n",
+                                 i);
+                       goto end;
+               } else
+                       *iter = data;
+               iter++;
+       }
+
+       /*
+        * Get Per-priority RX pause frame counter statistics.
+        */
+       for (i = 0x568; i < 0x5a8; i += 8) {
+               if (ql_read_xgmac_reg64(qdev, i, &data)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "Error reading status register 0x%.04x.\n",
+                                 i);
+                       goto end;
+               } else
+                       *iter = data;
+               iter++;
+       }
+
+       /*
+        * Get RX NIC FIFO DROP statistics.
+        */
+       if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Error reading status register 0x%.04x.\n", i);
+               goto end;
+       } else
+               *iter = data;
+end:
+       ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+       spin_unlock(&qdev->stats_lock);
+
+       QL_DUMP_STAT(qdev);
+}
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       int index;
+       switch (stringset) {
+       case ETH_SS_TEST:
+               memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+               break;
+       case ETH_SS_STATS:
+               for (index = 0; index < QLGE_STATS_LEN; index++) {
+                       memcpy(buf + index * ETH_GSTRING_LEN,
+                               ql_gstrings_stats[index].stat_string,
+                               ETH_GSTRING_LEN);
+               }
+               break;
+       }
+}
+
+static int ql_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_TEST:
+               return QLGE_TEST_LEN;
+       case ETH_SS_STATS:
+               return QLGE_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+                    struct ethtool_stats *stats, u64 *data)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int index, length;
+
+       length = QLGE_STATS_LEN;
+       ql_update_stats(qdev);
+
+       for (index = 0; index < length; index++) {
+               char *p = (char *)qdev +
+                       ql_gstrings_stats[index].stat_offset;
+               *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+       }
+}
+
+static int ql_get_settings(struct net_device *ndev,
+                             struct ethtool_cmd *ecmd)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       ecmd->supported = SUPPORTED_10000baseT_Full;
+       ecmd->advertising = ADVERTISED_10000baseT_Full;
+       ecmd->transceiver = XCVR_EXTERNAL;
+       if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
+                               STS_LINK_TYPE_10GBASET) {
+               ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+               ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+               ecmd->port = PORT_TP;
+               ecmd->autoneg = AUTONEG_ENABLE;
+       } else {
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+       }
+
+       ethtool_cmd_speed_set(ecmd, SPEED_10000);
+       ecmd->duplex = DUPLEX_FULL;
+
+       return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+                          struct ethtool_drvinfo *drvinfo)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, qlge_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "v%d.%d.%d",
+                (qdev->fw_rev_id & 0x00ff0000) >> 16,
+                (qdev->fw_rev_id & 0x0000ff00) >> 8,
+                (qdev->fw_rev_id & 0x000000ff));
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+       /* WOL is only supported for mezz card. */
+       if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+                       ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = qdev->wol;
+       }
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+       /* WOL is only supported for mezz card. */
+       if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
+                       ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+               netif_info(qdev, drv, qdev->ndev,
+                               "WOL is only supported for mezz card\n");
+               return -EOPNOTSUPP;
+       }
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+       qdev->wol = wol->wolopts;
+
+       netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
+       return 0;
+}
+
+static int ql_set_phys_id(struct net_device *ndev,
+                         enum ethtool_phys_id_state state)
+
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               /* Save the current LED settings */
+               if (ql_mb_get_led_cfg(qdev))
+                       return -EIO;
+
+               /* Start blinking */
+               ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+               return 0;
+
+       case ETHTOOL_ID_INACTIVE:
+               /* Restore LED settings */
+               if (ql_mb_set_led_cfg(qdev, qdev->led_config))
+                       return -EIO;
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int ql_start_loopback(struct ql_adapter *qdev)
+{
+       if (netif_carrier_ok(qdev->ndev)) {
+               set_bit(QL_LB_LINK_UP, &qdev->flags);
+               netif_carrier_off(qdev->ndev);
+       } else
+               clear_bit(QL_LB_LINK_UP, &qdev->flags);
+       qdev->link_config |= CFG_LOOPBACK_PCS;
+       return ql_mb_set_port_cfg(qdev);
+}
+
+static void ql_stop_loopback(struct ql_adapter *qdev)
+{
+       qdev->link_config &= ~CFG_LOOPBACK_PCS;
+       ql_mb_set_port_cfg(qdev);
+       if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
+               netif_carrier_on(qdev->ndev);
+               clear_bit(QL_LB_LINK_UP, &qdev->flags);
+       }
+}
+
+static void ql_create_lb_frame(struct sk_buff *skb,
+                                       unsigned int frame_size)
+{
+       memset(skb->data, 0xFF, frame_size);
+       frame_size &= ~1;
+       memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+       memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+       memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+void ql_check_lb_frame(struct ql_adapter *qdev,
+                                       struct sk_buff *skb)
+{
+       unsigned int frame_size = skb->len;
+
+       if ((*(skb->data + 3) == 0xFF) &&
+               (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+               (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+                       atomic_dec(&qdev->lb_count);
+                       return;
+       }
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+       int i;
+       netdev_tx_t rc;
+       struct sk_buff *skb;
+       unsigned int size = SMALL_BUF_MAP_SIZE;
+
+       for (i = 0; i < 64; i++) {
+               skb = netdev_alloc_skb(qdev->ndev, size);
+               if (!skb)
+                       return -ENOMEM;
+
+               skb->queue_mapping = 0;
+               skb_put(skb, size);
+               ql_create_lb_frame(skb, size);
+               rc = ql_lb_send(skb, qdev->ndev);
+               if (rc != NETDEV_TX_OK)
+                       return -EPIPE;
+               atomic_inc(&qdev->lb_count);
+       }
+       /* Give queue time to settle before testing results. */
+       msleep(2);
+       ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+       return atomic_read(&qdev->lb_count) ? -EIO : 0;
+}
+
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+       *data = ql_start_loopback(qdev);
+       if (*data)
+               goto out;
+       *data = ql_run_loopback_test(qdev);
+out:
+       ql_stop_loopback(qdev);
+       return *data;
+}
+
+static void ql_self_test(struct net_device *ndev,
+                               struct ethtool_test *eth_test, u64 *data)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
+       if (netif_running(ndev)) {
+               set_bit(QL_SELFTEST, &qdev->flags);
+               if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+                       /* Offline tests */
+                       if (ql_loopback_test(qdev, &data[0]))
+                               eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               } else {
+                       /* Online tests */
+                       data[0] = 0;
+               }
+               clear_bit(QL_SELFTEST, &qdev->flags);
+               /* Give link time to come up after
+                * port configuration changes.
+                */
+               msleep_interruptible(4 * 1000);
+       } else {
+               netif_err(qdev, drv, qdev->ndev,
+                         "is down, Loopback test will fail.\n");
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+       }
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               return sizeof(struct ql_mpi_coredump);
+       else
+               return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+                       struct ethtool_regs *regs, void *p)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       ql_get_dump(qdev, p);
+       qdev->core_is_dumped = 0;
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               regs->len = sizeof(struct ql_mpi_coredump);
+       else
+               regs->len = sizeof(struct ql_reg_dump);
+}
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+       struct ql_adapter *qdev = netdev_priv(dev);
+
+       c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+       c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+       /* This chip coalesces as follows:
+        * If a packet arrives, hold off interrupts until
+        * cqicb->int_delay expires, but if no other packets arrive don't
+        * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+        * timer to coalesce on a frame basis.  So, we have to take ethtool's
+        * max_coalesced_frames value and convert it to a delay in microseconds.
+        * We do this by using a basic thoughput of 1,000,000 frames per
+        * second @ (1024 bytes).  This means one frame per usec. So it's a
+        * simple one to one ratio.
+        */
+       c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+       c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+       return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       /* Validate user parameters. */
+       if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+               return -EINVAL;
+       /* Don't wait more than 10 usec. */
+       if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+               return -EINVAL;
+       if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+               return -EINVAL;
+       if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+               return -EINVAL;
+
+       /* Verify a change took place before updating the hardware. */
+       if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+           qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+           qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+           qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+               return 0;
+
+       qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+       qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+       qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+       qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+       return ql_update_ring_coalescing(qdev);
+}
+
+static void ql_get_pauseparam(struct net_device *netdev,
+                       struct ethtool_pauseparam *pause)
+{
+       struct ql_adapter *qdev = netdev_priv(netdev);
+
+       ql_mb_get_port_cfg(qdev);
+       if (qdev->link_config & CFG_PAUSE_STD) {
+               pause->rx_pause = 1;
+               pause->tx_pause = 1;
+       }
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+                       struct ethtool_pauseparam *pause)
+{
+       struct ql_adapter *qdev = netdev_priv(netdev);
+       int status = 0;
+
+       if ((pause->rx_pause) && (pause->tx_pause))
+               qdev->link_config |= CFG_PAUSE_STD;
+       else if (!pause->rx_pause && !pause->tx_pause)
+               qdev->link_config &= ~CFG_PAUSE_STD;
+       else
+               return -EINVAL;
+
+       status = ql_mb_set_port_cfg(qdev);
+       return status;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       qdev->msg_enable = value;
+}
+
+const struct ethtool_ops qlge_ethtool_ops = {
+       .get_settings = ql_get_settings,
+       .get_drvinfo = ql_get_drvinfo,
+       .get_wol = ql_get_wol,
+       .set_wol = ql_set_wol,
+       .get_regs_len   = ql_get_regs_len,
+       .get_regs = ql_get_regs,
+       .get_msglevel = ql_get_msglevel,
+       .set_msglevel = ql_set_msglevel,
+       .get_link = ethtool_op_get_link,
+       .set_phys_id             = ql_set_phys_id,
+       .self_test               = ql_self_test,
+       .get_pauseparam          = ql_get_pauseparam,
+       .set_pauseparam          = ql_set_pauseparam,
+       .get_coalesce = ql_get_coalesce,
+       .set_coalesce = ql_set_coalesce,
+       .get_sset_count = ql_get_sset_count,
+       .get_strings = ql_get_strings,
+       .get_ethtool_stats = ql_get_ethtool_stats,
+};
+
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
new file mode 100644 (file)
index 0000000..fd4a8e4
--- /dev/null
@@ -0,0 +1,5026 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:     Linux qlge network device driver by
+ *                      Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/prefetch.h>
+#include <net/ip6_checksum.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg =
+    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+/* NETIF_MSG_TIMER |   */
+    NETIF_MSG_IFDOWN |
+    NETIF_MSG_IFUP |
+    NETIF_MSG_RX_ERR |
+    NETIF_MSG_TX_ERR |
+/*  NETIF_MSG_TX_QUEUED | */
+/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
+/* NETIF_MSG_PKTDATA | */
+    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int qlge_irq_type = MSIX_IRQ;
+module_param(qlge_irq_type, int, 0664);
+MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+               "Option to enable MPI firmware dump. "
+               "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+               "Option to allow force of firmware core dump. "
+               "Default is OFF - Do not allow.");
+
+static const struct pci_device_id qlge_pci_tbl[] = {
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
+       /* required last entry */
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+       u32 sem_bits = 0;
+
+       switch (sem_mask) {
+       case SEM_XGMAC0_MASK:
+               sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+               break;
+       case SEM_XGMAC1_MASK:
+               sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+               break;
+       case SEM_ICB_MASK:
+               sem_bits = SEM_SET << SEM_ICB_SHIFT;
+               break;
+       case SEM_MAC_ADDR_MASK:
+               sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+               break;
+       case SEM_FLASH_MASK:
+               sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+               break;
+       case SEM_PROBE_MASK:
+               sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+               break;
+       case SEM_RT_IDX_MASK:
+               sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+               break;
+       case SEM_PROC_REG_MASK:
+               sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+               break;
+       default:
+               netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
+               return -EINVAL;
+       }
+
+       ql_write32(qdev, SEM, sem_bits | sem_mask);
+       return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+       unsigned int wait_count = 30;
+       do {
+               if (!ql_sem_trylock(qdev, sem_mask))
+                       return 0;
+               udelay(100);
+       } while (--wait_count);
+       return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+       ql_write32(qdev, SEM, sem_mask);
+       ql_read32(qdev, SEM);   /* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register.  It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+       u32 temp;
+       int count = UDELAY_COUNT;
+
+       while (count) {
+               temp = ql_read32(qdev, reg);
+
+               /* check for errors */
+               if (temp & err_bit) {
+                       netif_alert(qdev, probe, qdev->ndev,
+                                   "register 0x%.08x access error, value = 0x%.08x!.\n",
+                                   reg, temp);
+                       return -EIO;
+               } else if (temp & bit)
+                       return 0;
+               udelay(UDELAY_DELAY);
+               count--;
+       }
+       netif_alert(qdev, probe, qdev->ndev,
+                   "Timed out waiting for reg %x to come ready.\n", reg);
+       return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+       int count = UDELAY_COUNT;
+       u32 temp;
+
+       while (count) {
+               temp = ql_read32(qdev, CFG);
+               if (temp & CFG_LE)
+                       return -EIO;
+               if (!(temp & bit))
+                       return 0;
+               udelay(UDELAY_DELAY);
+               count--;
+       }
+       return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+                u16 q_id)
+{
+       u64 map;
+       int status = 0;
+       int direction;
+       u32 mask;
+       u32 value;
+
+       direction =
+           (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+           PCI_DMA_FROMDEVICE;
+
+       map = pci_map_single(qdev->pdev, ptr, size, direction);
+       if (pci_dma_mapping_error(qdev->pdev, map)) {
+               netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
+               return -ENOMEM;
+       }
+
+       status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+       if (status)
+               return status;
+
+       status = ql_wait_cfg(qdev, bit);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Timed out waiting for CFG to come ready.\n");
+               goto exit;
+       }
+
+       ql_write32(qdev, ICB_L, (u32) map);
+       ql_write32(qdev, ICB_H, (u32) (map >> 32));
+
+       mask = CFG_Q_MASK | (bit << 16);
+       value = bit | (q_id << CFG_Q_SHIFT);
+       ql_write32(qdev, CFG, (mask | value));
+
+       /*
+        * Wait for the bit to clear after signaling hw.
+        */
+       status = ql_wait_cfg(qdev, bit);
+exit:
+       ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
+       pci_unmap_single(qdev->pdev, map, size, direction);
+       return status;
+}
+
+/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+                       u32 *value)
+{
+       u32 offset = 0;
+       int status;
+
+       switch (type) {
+       case MAC_ADDR_TYPE_MULTI_MAC:
+       case MAC_ADDR_TYPE_CAM_MAC:
+               {
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) | /* index */
+                                  MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+                       if (status)
+                               goto exit;
+                       *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) | /* index */
+                                  MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+                       if (status)
+                               goto exit;
+                       *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+                       if (type == MAC_ADDR_TYPE_CAM_MAC) {
+                               status =
+                                   ql_wait_reg_rdy(qdev,
+                                       MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                               if (status)
+                                       goto exit;
+                               ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+                                          (index << MAC_ADDR_IDX_SHIFT) | /* index */
+                                          MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+                               status =
+                                   ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+                                                   MAC_ADDR_MR, 0);
+                               if (status)
+                                       goto exit;
+                               *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+                       }
+                       break;
+               }
+       case MAC_ADDR_TYPE_VLAN:
+       case MAC_ADDR_TYPE_MULTI_FLTR:
+       default:
+               netif_crit(qdev, ifup, qdev->ndev,
+                          "Address type %d not yet supported.\n", type);
+               status = -EPERM;
+       }
+exit:
+       return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+                              u16 index)
+{
+       u32 offset = 0;
+       int status = 0;
+
+       switch (type) {
+       case MAC_ADDR_TYPE_MULTI_MAC:
+               {
+                       u32 upper = (addr[0] << 8) | addr[1];
+                       u32 lower = (addr[2] << 24) | (addr[3] << 16) |
+                                       (addr[4] << 8) | (addr[5]);
+
+                       status =
+                               ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+                               (index << MAC_ADDR_IDX_SHIFT) |
+                               type | MAC_ADDR_E);
+                       ql_write32(qdev, MAC_ADDR_DATA, lower);
+                       status =
+                               ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+                               (index << MAC_ADDR_IDX_SHIFT) |
+                               type | MAC_ADDR_E);
+
+                       ql_write32(qdev, MAC_ADDR_DATA, upper);
+                       status =
+                               ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       break;
+               }
+       case MAC_ADDR_TYPE_CAM_MAC:
+               {
+                       u32 cam_output;
+                       u32 upper = (addr[0] << 8) | addr[1];
+                       u32 lower =
+                           (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+                           (addr[5]);
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) | /* index */
+                                  type);       /* type */
+                       ql_write32(qdev, MAC_ADDR_DATA, lower);
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) | /* index */
+                                  type);       /* type */
+                       ql_write32(qdev, MAC_ADDR_DATA, upper);
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) |      /* index */
+                                  type);       /* type */
+                       /* This field should also include the queue id
+                          and possibly the function id.  Right now we hardcode
+                          the route field to NIC core.
+                        */
+                       cam_output = (CAM_OUT_ROUTE_NIC |
+                                     (qdev->
+                                      func << CAM_OUT_FUNC_SHIFT) |
+                                       (0 << CAM_OUT_CQ_ID_SHIFT));
+                       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+                               cam_output |= CAM_OUT_RV;
+                       /* route to NIC core */
+                       ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+                       break;
+               }
+       case MAC_ADDR_TYPE_VLAN:
+               {
+                       u32 enable_bit = *((u32 *) &addr[0]);
+                       /* For VLAN, the addr actually holds a bit that
+                        * either enables or disables the vlan id we are
+                        * addressing. It's either MAC_ADDR_E on or off.
+                        * That's bit-27 we're talking about.
+                        */
+                       status =
+                           ql_wait_reg_rdy(qdev,
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+                       if (status)
+                               goto exit;
+                       ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
+                                  (index << MAC_ADDR_IDX_SHIFT) |      /* index */
+                                  type |       /* type */
+                                  enable_bit); /* enable/disable */
+                       break;
+               }
+       case MAC_ADDR_TYPE_MULTI_FLTR:
+       default:
+               netif_crit(qdev, ifup, qdev->ndev,
+                          "Address type %d not yet supported.\n", type);
+               status = -EPERM;
+       }
+exit:
+       return status;
+}
+
+/* Set or clear MAC address in hardware. We sometimes
+ * have to clear it to prevent wrong frame routing
+ * especially in a bonding environment.
+ */
+static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+{
+       int status;
+       char zero_mac_addr[ETH_ALEN];
+       char *addr;
+
+       if (set) {
+               addr = &qdev->current_mac_addr[0];
+               netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                            "Set Mac addr %pM\n", addr);
+       } else {
+               eth_zero_addr(zero_mac_addr);
+               addr = &zero_mac_addr[0];
+               netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                            "Clearing MAC address\n");
+       }
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+       status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+                       MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to init mac address.\n");
+       return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+       netif_err(qdev, link, qdev->ndev, "Link is up.\n");
+       netif_carrier_on(qdev->ndev);
+       ql_set_mac_addr(qdev, 1);
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+       netif_err(qdev, link, qdev->ndev, "Link is down.\n");
+       netif_carrier_off(qdev->ndev);
+       ql_set_mac_addr(qdev, 0);
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+       int status = 0;
+
+       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+       if (status)
+               goto exit;
+
+       ql_write32(qdev, RT_IDX,
+                  RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
+       if (status)
+               goto exit;
+       *value = ql_read32(qdev, RT_DATA);
+exit:
+       return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes.  Each one can be used
+ * to route different frame types to various inbound queues.  We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+                             int enable)
+{
+       int status = -EINVAL; /* Return error if no mask match. */
+       u32 value = 0;
+
+       switch (mask) {
+       case RT_IDX_CAM_HIT:
+               {
+                       value = RT_IDX_DST_CAM_Q |      /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_VALID:      /* Promiscuous Mode frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q | /* dest */
+                               RT_IDX_TYPE_NICQ | /* type */
+                               (RT_IDX_IP_CSUM_ERR_SLOT <<
+                               RT_IDX_IDX_SHIFT); /* index */
+                       break;
+               }
+       case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q | /* dest */
+                               RT_IDX_TYPE_NICQ | /* type */
+                               (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
+                               RT_IDX_IDX_SHIFT); /* index */
+                       break;
+               }
+       case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
+               {
+                       value = RT_IDX_DST_RSS |        /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       case 0:         /* Clear the E-bit on an entry. */
+               {
+                       value = RT_IDX_DST_DFLT_Q |     /* dest */
+                           RT_IDX_TYPE_NICQ |  /* type */
+                           (index << RT_IDX_IDX_SHIFT);/* index */
+                       break;
+               }
+       default:
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Mask type %d not yet supported.\n", mask);
+               status = -EPERM;
+               goto exit;
+       }
+
+       if (value) {
+               status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+               if (status)
+                       goto exit;
+               value |= (enable ? RT_IDX_E : 0);
+               ql_write32(qdev, RT_IDX, value);
+               ql_write32(qdev, RT_DATA, enable ? mask : 0);
+       }
+exit:
+       return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+       ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+       ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented every time we queue a worker and decremented every time
+ * a worker finishes.  Once it hits zero we enable the interrupt.
+ */
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+       u32 var = 0;
+       unsigned long hw_flags = 0;
+       struct intr_context *ctx = qdev->intr_context + intr;
+
+       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
+               /* Always enable if we're MSIX multi interrupts and
+                * it's not the default (zeroeth) interrupt.
+                */
+               ql_write32(qdev, INTR_EN,
+                          ctx->intr_en_mask);
+               var = ql_read32(qdev, STS);
+               return var;
+       }
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if (atomic_dec_and_test(&ctx->irq_cnt)) {
+               ql_write32(qdev, INTR_EN,
+                          ctx->intr_en_mask);
+               var = ql_read32(qdev, STS);
+       }
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return var;
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+       u32 var = 0;
+       struct intr_context *ctx;
+
+       /* HW disables for us if we're MSIX multi interrupts and
+        * it's not the default (zeroeth) interrupt.
+        */
+       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
+               return 0;
+
+       ctx = qdev->intr_context + intr;
+       spin_lock(&qdev->hw_lock);
+       if (!atomic_read(&ctx->irq_cnt)) {
+               ql_write32(qdev, INTR_EN,
+               ctx->intr_dis_mask);
+               var = ql_read32(qdev, STS);
+       }
+       atomic_inc(&ctx->irq_cnt);
+       spin_unlock(&qdev->hw_lock);
+       return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+       int i;
+       for (i = 0; i < qdev->intr_count; i++) {
+               /* The enable call does a atomic_dec_and_test
+                * and enables only if the result is zero.
+                * So we precharge it here.
+                */
+               if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
+                       i == 0))
+                       atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+               ql_enable_completion_interrupt(qdev, i);
+       }
+
+}
+
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+       int status, i;
+       u16 csum = 0;
+       __le16 *flash = (__le16 *)&qdev->flash;
+
+       status = strncmp((char *)&qdev->flash, str, 4);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
+               return  status;
+       }
+
+       for (i = 0; i < size; i++)
+               csum += le16_to_cpu(*flash++);
+
+       if (csum)
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+
+       return csum;
+}
+
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
+{
+       int status = 0;
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev,
+                       FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+       if (status)
+               goto exit;
+       /* set up for reg read */
+       ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev,
+                       FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+       if (status)
+               goto exit;
+        /* This data is stored on flash as an array of
+        * __le32.  Since ql_read32() returns cpu endian
+        * we need to swap it back.
+        */
+       *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+exit:
+       return status;
+}
+
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+       u32 i, size;
+       int status;
+       __le32 *p = (__le32 *)&qdev->flash;
+       u32 offset;
+       u8 mac_addr[6];
+
+       /* Get flash offset for function and adjust
+        * for dword access.
+        */
+       if (!qdev->port)
+               offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+       else
+               offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+       if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+               return -ETIMEDOUT;
+
+       size = sizeof(struct flash_params_8000) / sizeof(u32);
+       for (i = 0; i < size; i++, p++) {
+               status = ql_read_flash_word(qdev, i+offset, p);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Error reading flash.\n");
+                       goto exit;
+               }
+       }
+
+       status = ql_validate_flash(qdev,
+                       sizeof(struct flash_params_8000) / sizeof(u16),
+                       "8000");
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       /* Extract either manufacturer or BOFM modified
+        * MAC address.
+        */
+       if (qdev->flash.flash_params_8000.data_type1 == 2)
+               memcpy(mac_addr,
+                       qdev->flash.flash_params_8000.mac_addr1,
+                       qdev->ndev->addr_len);
+       else
+               memcpy(mac_addr,
+                       qdev->flash.flash_params_8000.mac_addr,
+                       qdev->ndev->addr_len);
+
+       if (!is_valid_ether_addr(mac_addr)) {
+               netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       memcpy(qdev->ndev->dev_addr,
+               mac_addr,
+               qdev->ndev->addr_len);
+
+exit:
+       ql_sem_unlock(qdev, SEM_FLASH_MASK);
+       return status;
+}
+
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
+{
+       int i;
+       int status;
+       __le32 *p = (__le32 *)&qdev->flash;
+       u32 offset = 0;
+       u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
+
+       /* Second function's parameters follow the first
+        * function's.
+        */
+       if (qdev->port)
+               offset = size;
+
+       if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+               return -ETIMEDOUT;
+
+       for (i = 0; i < size; i++, p++) {
+               status = ql_read_flash_word(qdev, i+offset, p);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Error reading flash.\n");
+                       goto exit;
+               }
+
+       }
+
+       status = ql_validate_flash(qdev,
+                       sizeof(struct flash_params_8012) / sizeof(u16),
+                       "8012");
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+               status = -EINVAL;
+               goto exit;
+       }
+
+       memcpy(qdev->ndev->dev_addr,
+               qdev->flash.flash_params_8012.mac_addr,
+               qdev->ndev->addr_len);
+
+exit:
+       ql_sem_unlock(qdev, SEM_FLASH_MASK);
+       return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+       int status;
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev,
+                       XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+       if (status)
+               return status;
+       /* write the data to the data reg */
+       ql_write32(qdev, XGMAC_DATA, data);
+       /* trigger the write */
+       ql_write32(qdev, XGMAC_ADDR, reg);
+       return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+       int status = 0;
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev,
+                       XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+       if (status)
+               goto exit;
+       /* set up for reg read */
+       ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev,
+                       XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+       if (status)
+               goto exit;
+       /* get the data */
+       *data = ql_read32(qdev, XGMAC_DATA);
+exit:
+       return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+       int status = 0;
+       u32 hi = 0;
+       u32 lo = 0;
+
+       status = ql_read_xgmac_reg(qdev, reg, &lo);
+       if (status)
+               goto exit;
+
+       status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+       if (status)
+               goto exit;
+
+       *data = (u64) lo | ((u64) hi << 32);
+
+exit:
+       return status;
+}
+
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+       int status;
+       /*
+        * Get MPI firmware version for driver banner
+        * and ethool info.
+        */
+       status = ql_mb_about_fw(qdev);
+       if (status)
+               goto exit;
+       status = ql_mb_get_fw_state(qdev);
+       if (status)
+               goto exit;
+       /* Wake up a worker to get/set the TX/RX frame sizes. */
+       queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+       return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
+{
+       int status = 0;
+       u32 data;
+
+       if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+               /* Another function has the semaphore, so
+                * wait for the port init bit to come ready.
+                */
+               netif_info(qdev, link, qdev->ndev,
+                          "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+               status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+               if (status) {
+                       netif_crit(qdev, link, qdev->ndev,
+                                  "Port initialize timed out.\n");
+               }
+               return status;
+       }
+
+       netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
+       /* Set the core reset. */
+       status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+       if (status)
+               goto end;
+       data |= GLOBAL_CFG_RESET;
+       status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+       if (status)
+               goto end;
+
+       /* Clear the core reset and turn on jumbo for receiver. */
+       data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
+       data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
+       data |= GLOBAL_CFG_TX_STAT_EN;
+       data |= GLOBAL_CFG_RX_STAT_EN;
+       status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+       if (status)
+               goto end;
+
+       /* Enable transmitter, and clear it's reset. */
+       status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+       if (status)
+               goto end;
+       data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
+       data |= TX_CFG_EN;      /* Enable the transmitter. */
+       status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+       if (status)
+               goto end;
+
+       /* Enable receiver and clear it's reset. */
+       status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+       if (status)
+               goto end;
+       data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
+       data |= RX_CFG_EN;      /* Enable the receiver. */
+       status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+       if (status)
+               goto end;
+
+       /* Turn on jumbo. */
+       status =
+           ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+       if (status)
+               goto end;
+       status =
+           ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+       if (status)
+               goto end;
+
+       /* Signal to the world that the port is enabled.        */
+       ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+       ql_sem_unlock(qdev, qdev->xg_sem_mask);
+       return status;
+}
+
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+       return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
+/* Get the next large buffer. */
+static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+       struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+       rx_ring->lbq_curr_idx++;
+       if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+               rx_ring->lbq_curr_idx = 0;
+       rx_ring->lbq_free_cnt++;
+       return lbq_desc;
+}
+
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+               struct rx_ring *rx_ring)
+{
+       struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+       pci_dma_sync_single_for_cpu(qdev->pdev,
+                                       dma_unmap_addr(lbq_desc, mapaddr),
+                                   rx_ring->lbq_buf_size,
+                                       PCI_DMA_FROMDEVICE);
+
+       /* If it's the last chunk of our master page then
+        * we unmap it.
+        */
+       if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+                                       == ql_lbq_block_size(qdev))
+               pci_unmap_page(qdev->pdev,
+                               lbq_desc->p.pg_chunk.map,
+                               ql_lbq_block_size(qdev),
+                               PCI_DMA_FROMDEVICE);
+       return lbq_desc;
+}
+
+/* Get the next small buffer. */
+static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+       struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+       rx_ring->sbq_curr_idx++;
+       if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+               rx_ring->sbq_curr_idx = 0;
+       rx_ring->sbq_free_cnt++;
+       return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+       rx_ring->cnsmr_idx++;
+       rx_ring->curr_entry++;
+       if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+               rx_ring->cnsmr_idx = 0;
+               rx_ring->curr_entry = rx_ring->cq_base;
+       }
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+       ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+                                               struct bq_desc *lbq_desc)
+{
+       if (!rx_ring->pg_chunk.page) {
+               u64 map;
+               rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+                                               GFP_ATOMIC,
+                                               qdev->lbq_buf_order);
+               if (unlikely(!rx_ring->pg_chunk.page)) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "page allocation failed.\n");
+                       return -ENOMEM;
+               }
+               rx_ring->pg_chunk.offset = 0;
+               map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+                                       0, ql_lbq_block_size(qdev),
+                                       PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(qdev->pdev, map)) {
+                       __free_pages(rx_ring->pg_chunk.page,
+                                       qdev->lbq_buf_order);
+                       rx_ring->pg_chunk.page = NULL;
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "PCI mapping failed.\n");
+                       return -ENOMEM;
+               }
+               rx_ring->pg_chunk.map = map;
+               rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+       }
+
+       /* Copy the current master pg_chunk info
+        * to the current descriptor.
+        */
+       lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+       /* Adjust the master page chunk for next
+        * buffer get.
+        */
+       rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+       if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+               rx_ring->pg_chunk.page = NULL;
+               lbq_desc->p.pg_chunk.last_flag = 1;
+       } else {
+               rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+               get_page(rx_ring->pg_chunk.page);
+               lbq_desc->p.pg_chunk.last_flag = 0;
+       }
+       return 0;
+}
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+       u32 clean_idx = rx_ring->lbq_clean_idx;
+       u32 start_idx = clean_idx;
+       struct bq_desc *lbq_desc;
+       u64 map;
+       int i;
+
+       while (rx_ring->lbq_free_cnt > 32) {
+               for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "lbq: try cleaning clean_idx = %d.\n",
+                                    clean_idx);
+                       lbq_desc = &rx_ring->lbq[clean_idx];
+                       if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+                               rx_ring->lbq_clean_idx = clean_idx;
+                               netif_err(qdev, ifup, qdev->ndev,
+                                               "Could not get a page chunk, i=%d, clean_idx =%d .\n",
+                                               i, clean_idx);
+                               return;
+                       }
+
+                       map = lbq_desc->p.pg_chunk.map +
+                               lbq_desc->p.pg_chunk.offset;
+                               dma_unmap_addr_set(lbq_desc, mapaddr, map);
+                       dma_unmap_len_set(lbq_desc, maplen,
+                                       rx_ring->lbq_buf_size);
+                               *lbq_desc->addr = cpu_to_le64(map);
+
+                       pci_dma_sync_single_for_device(qdev->pdev, map,
+                                               rx_ring->lbq_buf_size,
+                                               PCI_DMA_FROMDEVICE);
+                       clean_idx++;
+                       if (clean_idx == rx_ring->lbq_len)
+                               clean_idx = 0;
+               }
+
+               rx_ring->lbq_clean_idx = clean_idx;
+               rx_ring->lbq_prod_idx += 16;
+               if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+                       rx_ring->lbq_prod_idx = 0;
+               rx_ring->lbq_free_cnt -= 16;
+       }
+
+       if (start_idx != clean_idx) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "lbq: updating prod idx = %d.\n",
+                            rx_ring->lbq_prod_idx);
+               ql_write_db_reg(rx_ring->lbq_prod_idx,
+                               rx_ring->lbq_prod_idx_db_reg);
+       }
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+       u32 clean_idx = rx_ring->sbq_clean_idx;
+       u32 start_idx = clean_idx;
+       struct bq_desc *sbq_desc;
+       u64 map;
+       int i;
+
+       while (rx_ring->sbq_free_cnt > 16) {
+               for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
+                       sbq_desc = &rx_ring->sbq[clean_idx];
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "sbq: try cleaning clean_idx = %d.\n",
+                                    clean_idx);
+                       if (sbq_desc->p.skb == NULL) {
+                               netif_printk(qdev, rx_status, KERN_DEBUG,
+                                            qdev->ndev,
+                                            "sbq: getting new skb for index %d.\n",
+                                            sbq_desc->index);
+                               sbq_desc->p.skb =
+                                   netdev_alloc_skb(qdev->ndev,
+                                                    SMALL_BUFFER_SIZE);
+                               if (sbq_desc->p.skb == NULL) {
+                                       rx_ring->sbq_clean_idx = clean_idx;
+                                       return;
+                               }
+                               skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+                               map = pci_map_single(qdev->pdev,
+                                                    sbq_desc->p.skb->data,
+                                                    rx_ring->sbq_buf_size,
+                                                    PCI_DMA_FROMDEVICE);
+                               if (pci_dma_mapping_error(qdev->pdev, map)) {
+                                       netif_err(qdev, ifup, qdev->ndev,
+                                                 "PCI mapping failed.\n");
+                                       rx_ring->sbq_clean_idx = clean_idx;
+                                       dev_kfree_skb_any(sbq_desc->p.skb);
+                                       sbq_desc->p.skb = NULL;
+                                       return;
+                               }
+                               dma_unmap_addr_set(sbq_desc, mapaddr, map);
+                               dma_unmap_len_set(sbq_desc, maplen,
+                                                 rx_ring->sbq_buf_size);
+                               *sbq_desc->addr = cpu_to_le64(map);
+                       }
+
+                       clean_idx++;
+                       if (clean_idx == rx_ring->sbq_len)
+                               clean_idx = 0;
+               }
+               rx_ring->sbq_clean_idx = clean_idx;
+               rx_ring->sbq_prod_idx += 16;
+               if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+                       rx_ring->sbq_prod_idx = 0;
+               rx_ring->sbq_free_cnt -= 16;
+       }
+
+       if (start_idx != clean_idx) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "sbq: updating prod idx = %d.\n",
+                            rx_ring->sbq_prod_idx);
+               ql_write_db_reg(rx_ring->sbq_prod_idx,
+                               rx_ring->sbq_prod_idx_db_reg);
+       }
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+                                   struct rx_ring *rx_ring)
+{
+       ql_update_sbq(qdev, rx_ring);
+       ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers.  Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+                         struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+       int i;
+       for (i = 0; i < mapped; i++) {
+               if (i == 0 || (i == 7 && mapped > 7)) {
+                       /*
+                        * Unmap the skb->data area, or the
+                        * external sglist (AKA the Outbound
+                        * Address List (OAL)).
+                        * If its the zeroeth element, then it's
+                        * the skb->data area.  If it's the 7th
+                        * element and there is more than 6 frags,
+                        * then its an OAL.
+                        */
+                       if (i == 7) {
+                               netif_printk(qdev, tx_done, KERN_DEBUG,
+                                            qdev->ndev,
+                                            "unmapping OAL area.\n");
+                       }
+                       pci_unmap_single(qdev->pdev,
+                                        dma_unmap_addr(&tx_ring_desc->map[i],
+                                                       mapaddr),
+                                        dma_unmap_len(&tx_ring_desc->map[i],
+                                                      maplen),
+                                        PCI_DMA_TODEVICE);
+               } else {
+                       netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+                                    "unmapping frag %d.\n", i);
+                       pci_unmap_page(qdev->pdev,
+                                      dma_unmap_addr(&tx_ring_desc->map[i],
+                                                     mapaddr),
+                                      dma_unmap_len(&tx_ring_desc->map[i],
+                                                    maplen), PCI_DMA_TODEVICE);
+               }
+       }
+
+}
+
+/* Map the buffers for this transmit.  This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+                      struct ob_mac_iocb_req *mac_iocb_ptr,
+                      struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+       int len = skb_headlen(skb);
+       dma_addr_t map;
+       int frag_idx, err, map_idx = 0;
+       struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+       int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+       if (frag_cnt) {
+               netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+                            "frag_cnt = %d.\n", frag_cnt);
+       }
+       /*
+        * Map the skb buffer first.
+        */
+       map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+       err = pci_dma_mapping_error(qdev->pdev, map);
+       if (err) {
+               netif_err(qdev, tx_queued, qdev->ndev,
+                         "PCI mapping failed with error: %d\n", err);
+
+               return NETDEV_TX_BUSY;
+       }
+
+       tbd->len = cpu_to_le32(len);
+       tbd->addr = cpu_to_le64(map);
+       dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+       dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+       map_idx++;
+
+       /*
+        * This loop fills the remainder of the 8 address descriptors
+        * in the IOCB.  If there are more than 7 fragments, then the
+        * eighth address desc will point to an external list (OAL).
+        * When this happens, the remainder of the frags will be stored
+        * in this list.
+        */
+       for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+               tbd++;
+               if (frag_idx == 6 && frag_cnt > 7) {
+                       /* Let's tack on an sglist.
+                        * Our control block will now
+                        * look like this:
+                        * iocb->seg[0] = skb->data
+                        * iocb->seg[1] = frag[0]
+                        * iocb->seg[2] = frag[1]
+                        * iocb->seg[3] = frag[2]
+                        * iocb->seg[4] = frag[3]
+                        * iocb->seg[5] = frag[4]
+                        * iocb->seg[6] = frag[5]
+                        * iocb->seg[7] = ptr to OAL (external sglist)
+                        * oal->seg[0] = frag[6]
+                        * oal->seg[1] = frag[7]
+                        * oal->seg[2] = frag[8]
+                        * oal->seg[3] = frag[9]
+                        * oal->seg[4] = frag[10]
+                        *      etc...
+                        */
+                       /* Tack on the OAL in the eighth segment of IOCB. */
+                       map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+                                            sizeof(struct oal),
+                                            PCI_DMA_TODEVICE);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
+                       if (err) {
+                               netif_err(qdev, tx_queued, qdev->ndev,
+                                         "PCI mapping outbound address list with error: %d\n",
+                                         err);
+                               goto map_error;
+                       }
+
+                       tbd->addr = cpu_to_le64(map);
+                       /*
+                        * The length is the number of fragments
+                        * that remain to be mapped times the length
+                        * of our sglist (OAL).
+                        */
+                       tbd->len =
+                           cpu_to_le32((sizeof(struct tx_buf_desc) *
+                                        (frag_cnt - frag_idx)) | TX_DESC_C);
+                       dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+                                          map);
+                       dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+                                         sizeof(struct oal));
+                       tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
+                       map_idx++;
+               }
+
+               map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
+
+               err = dma_mapping_error(&qdev->pdev->dev, map);
+               if (err) {
+                       netif_err(qdev, tx_queued, qdev->ndev,
+                                 "PCI mapping frags failed with error: %d.\n",
+                                 err);
+                       goto map_error;
+               }
+
+               tbd->addr = cpu_to_le64(map);
+               tbd->len = cpu_to_le32(skb_frag_size(frag));
+               dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+               dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+                                 skb_frag_size(frag));
+
+       }
+       /* Save the number of segments we've mapped. */
+       tx_ring_desc->map_cnt = map_idx;
+       /* Terminate the last segment. */
+       tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+       return NETDEV_TX_OK;
+
+map_error:
+       /*
+        * If the first frag mapping failed, then i will be zero.
+        * This causes the unmap of the skb->data area.  Otherwise
+        * we pass in the number of frags that mapped successfully
+        * so they can be umapped.
+        */
+       ql_unmap_send(qdev, tx_ring_desc, map_idx);
+       return NETDEV_TX_BUSY;
+}
+
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+                                struct rx_ring *rx_ring)
+{
+       struct nic_stats *stats = &qdev->nic_stats;
+
+       stats->rx_err_count++;
+       rx_ring->rx_errors++;
+
+       switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+       case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+               stats->rx_code_err++;
+               break;
+       case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+               stats->rx_oversize_err++;
+               break;
+       case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+               stats->rx_undersize_err++;
+               break;
+       case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+               stats->rx_preamble_err++;
+               break;
+       case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+               stats->rx_frame_len_err++;
+               break;
+       case IB_MAC_IOCB_RSP_ERR_CRC:
+               stats->rx_crc_err++;
+       default:
+               break;
+       }
+}
+
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+                                 struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                 void *page, size_t *len)
+{
+       u16 *tags;
+
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               return;
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+               tags = (u16 *)page;
+               /* Look for stacked vlan tags in ethertype field */
+               if (tags[6] == ETH_P_8021Q &&
+                   tags[8] == ETH_P_8021Q)
+                       *len += 2 * VLAN_HLEN;
+               else
+                       *len += VLAN_HLEN;
+       }
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+                                       struct rx_ring *rx_ring,
+                                       struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                       u32 length,
+                                       u16 vlan_id)
+{
+       struct sk_buff *skb;
+       struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+       struct napi_struct *napi = &rx_ring->napi;
+
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
+       napi->dev = qdev->ndev;
+
+       skb = napi_get_frags(napi);
+       if (!skb) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Couldn't get an skb, exiting.\n");
+               rx_ring->rx_dropped++;
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
+       prefetch(lbq_desc->p.pg_chunk.va);
+       __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                            lbq_desc->p.pg_chunk.page,
+                            lbq_desc->p.pg_chunk.offset,
+                            length);
+
+       skb->len += length;
+       skb->data_len += length;
+       skb->truesize += length;
+       skb_shinfo(skb)->nr_frags++;
+
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += length;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       skb_record_rx_queue(skb, rx_ring->cq_id);
+       if (vlan_id != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+       napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+                                       struct rx_ring *rx_ring,
+                                       struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                       u32 length,
+                                       u16 vlan_id)
+{
+       struct net_device *ndev = qdev->ndev;
+       struct sk_buff *skb = NULL;
+       void *addr;
+       struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+       struct napi_struct *napi = &rx_ring->napi;
+       size_t hlen = ETH_HLEN;
+
+       skb = netdev_alloc_skb(ndev, length);
+       if (!skb) {
+               rx_ring->rx_dropped++;
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
+
+       addr = lbq_desc->p.pg_chunk.va;
+       prefetch(addr);
+
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               goto err_out;
+       }
+
+       /* Update the MAC header length*/
+       ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
+       /* The max framesize filter on this chip is set higher than
+        * MTU since FCoE uses 2k frames.
+        */
+       if (skb->len > ndev->mtu + hlen) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Segment too small, dropping.\n");
+               rx_ring->rx_dropped++;
+               goto err_out;
+       }
+       memcpy(skb_put(skb, hlen), addr, hlen);
+       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                    "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+                    length);
+       skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+                               lbq_desc->p.pg_chunk.offset + hlen,
+                               length - hlen);
+       skb->len += length - hlen;
+       skb->data_len += length - hlen;
+       skb->truesize += length - hlen;
+
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += skb->len;
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb_checksum_none_assert(skb);
+
+       if ((ndev->features & NETIF_F_RXCSUM) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+               /* TCP frame. */
+               if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "TCP checksum done!\n");
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+                               (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+                       /* Unfragmented ipv4 UDP frame. */
+                       struct iphdr *iph =
+                               (struct iphdr *)((u8 *)addr + hlen);
+                       if (!(iph->frag_off &
+                               htons(IP_MF|IP_OFFSET))) {
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               netif_printk(qdev, rx_status, KERN_DEBUG,
+                                            qdev->ndev,
+                                            "UDP checksum done!\n");
+                       }
+               }
+       }
+
+       skb_record_rx_queue(skb, rx_ring->cq_id);
+       if (vlan_id != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               napi_gro_receive(napi, skb);
+       else
+               netif_receive_skb(skb);
+       return;
+err_out:
+       dev_kfree_skb_any(skb);
+       put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+                                       struct rx_ring *rx_ring,
+                                       struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                       u32 length,
+                                       u16 vlan_id)
+{
+       struct net_device *ndev = qdev->ndev;
+       struct sk_buff *skb = NULL;
+       struct sk_buff *new_skb = NULL;
+       struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+       skb = sbq_desc->p.skb;
+       /* Allocate new_skb and copy */
+       new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+       if (new_skb == NULL) {
+               rx_ring->rx_dropped++;
+               return;
+       }
+       skb_reserve(new_skb, NET_IP_ALIGN);
+
+       pci_dma_sync_single_for_cpu(qdev->pdev,
+                                   dma_unmap_addr(sbq_desc, mapaddr),
+                                   dma_unmap_len(sbq_desc, maplen),
+                                   PCI_DMA_FROMDEVICE);
+
+       memcpy(skb_put(new_skb, length), skb->data, length);
+
+       pci_dma_sync_single_for_device(qdev->pdev,
+                                      dma_unmap_addr(sbq_desc, mapaddr),
+                                      dma_unmap_len(sbq_desc, maplen),
+                                      PCI_DMA_FROMDEVICE);
+       skb = new_skb;
+
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       /* loopback self test for ethtool */
+       if (test_bit(QL_SELFTEST, &qdev->flags)) {
+               ql_check_lb_frame(qdev, skb);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       /* The max framesize filter on this chip is set higher than
+        * MTU since FCoE uses 2k frames.
+        */
+       if (skb->len > ndev->mtu + ETH_HLEN) {
+               dev_kfree_skb_any(skb);
+               rx_ring->rx_dropped++;
+               return;
+       }
+
+       prefetch(skb->data);
+       if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "%s Multicast.\n",
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+       }
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "Promiscuous Packet.\n");
+
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += skb->len;
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb_checksum_none_assert(skb);
+
+       /* If rx checksum is on, and there are no
+        * csum or frame errors.
+        */
+       if ((ndev->features & NETIF_F_RXCSUM) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+               /* TCP frame. */
+               if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "TCP checksum done!\n");
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+                               (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+                       /* Unfragmented ipv4 UDP frame. */
+                       struct iphdr *iph = (struct iphdr *) skb->data;
+                       if (!(iph->frag_off &
+                               htons(IP_MF|IP_OFFSET))) {
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               netif_printk(qdev, rx_status, KERN_DEBUG,
+                                            qdev->ndev,
+                                            "UDP checksum done!\n");
+                       }
+               }
+       }
+
+       skb_record_rx_queue(skb, rx_ring->cq_id);
+       if (vlan_id != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               napi_gro_receive(&rx_ring->napi, skb);
+       else
+               netif_receive_skb(skb);
+}
+
+static void ql_realign_skb(struct sk_buff *skb, int len)
+{
+       void *temp_addr = skb->data;
+
+       /* Undo the skb_reserve(skb,32) we did before
+        * giving to hardware, and realign data on
+        * a 2-byte boundary.
+        */
+       skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+       skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+       skb_copy_to_linear_data(skb, temp_addr,
+               (unsigned int)len);
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion.  It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+                                      struct rx_ring *rx_ring,
+                                      struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+       struct bq_desc *lbq_desc;
+       struct bq_desc *sbq_desc;
+       struct sk_buff *skb = NULL;
+       u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       size_t hlen = ETH_HLEN;
+
+       /*
+        * Handle the header buffer if present.
+        */
+       if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+           ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "Header of %d bytes in small buffer.\n", hdr_len);
+               /*
+                * Headers fit nicely into a small buffer.
+                */
+               sbq_desc = ql_get_curr_sbuf(rx_ring);
+               pci_unmap_single(qdev->pdev,
+                               dma_unmap_addr(sbq_desc, mapaddr),
+                               dma_unmap_len(sbq_desc, maplen),
+                               PCI_DMA_FROMDEVICE);
+               skb = sbq_desc->p.skb;
+               ql_realign_skb(skb, hdr_len);
+               skb_put(skb, hdr_len);
+               sbq_desc->p.skb = NULL;
+       }
+
+       /*
+        * Handle the data buffer(s).
+        */
+       if (unlikely(!length)) {        /* Is there data too? */
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "No Data buffer in this packet.\n");
+               return skb;
+       }
+
+       if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+               if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Headers in small, data of %d bytes in small, combine them.\n",
+                                    length);
+                       /*
+                        * Data is less than small buffer size so it's
+                        * stuffed in a small buffer.
+                        * For this case we append the data
+                        * from the "data" small buffer to the "header" small
+                        * buffer.
+                        */
+                       sbq_desc = ql_get_curr_sbuf(rx_ring);
+                       pci_dma_sync_single_for_cpu(qdev->pdev,
+                                                   dma_unmap_addr
+                                                   (sbq_desc, mapaddr),
+                                                   dma_unmap_len
+                                                   (sbq_desc, maplen),
+                                                   PCI_DMA_FROMDEVICE);
+                       memcpy(skb_put(skb, length),
+                              sbq_desc->p.skb->data, length);
+                       pci_dma_sync_single_for_device(qdev->pdev,
+                                                      dma_unmap_addr
+                                                      (sbq_desc,
+                                                       mapaddr),
+                                                      dma_unmap_len
+                                                      (sbq_desc,
+                                                       maplen),
+                                                      PCI_DMA_FROMDEVICE);
+               } else {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "%d bytes in a single small buffer.\n",
+                                    length);
+                       sbq_desc = ql_get_curr_sbuf(rx_ring);
+                       skb = sbq_desc->p.skb;
+                       ql_realign_skb(skb, length);
+                       skb_put(skb, length);
+                       pci_unmap_single(qdev->pdev,
+                                        dma_unmap_addr(sbq_desc,
+                                                       mapaddr),
+                                        dma_unmap_len(sbq_desc,
+                                                      maplen),
+                                        PCI_DMA_FROMDEVICE);
+                       sbq_desc->p.skb = NULL;
+               }
+       } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+               if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Header in small, %d bytes in large. Chain large to small!\n",
+                                    length);
+                       /*
+                        * The data is in a single large buffer.  We
+                        * chain it to the header buffer's skb and let
+                        * it rip.
+                        */
+                       lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Chaining page at offset = %d, for %d bytes  to skb.\n",
+                                    lbq_desc->p.pg_chunk.offset, length);
+                       skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+                                               lbq_desc->p.pg_chunk.offset,
+                                               length);
+                       skb->len += length;
+                       skb->data_len += length;
+                       skb->truesize += length;
+               } else {
+                       /*
+                        * The headers and data are in a single large buffer. We
+                        * copy it to a new skb and let it go. This can happen with
+                        * jumbo mtu on a non-TCP/UDP frame.
+                        */
+                       lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+                       skb = netdev_alloc_skb(qdev->ndev, length);
+                       if (skb == NULL) {
+                               netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+                                            "No skb available, drop the packet.\n");
+                               return NULL;
+                       }
+                       pci_unmap_page(qdev->pdev,
+                                      dma_unmap_addr(lbq_desc,
+                                                     mapaddr),
+                                      dma_unmap_len(lbq_desc, maplen),
+                                      PCI_DMA_FROMDEVICE);
+                       skb_reserve(skb, NET_IP_ALIGN);
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+                                    length);
+                       skb_fill_page_desc(skb, 0,
+                                               lbq_desc->p.pg_chunk.page,
+                                               lbq_desc->p.pg_chunk.offset,
+                                               length);
+                       skb->len += length;
+                       skb->data_len += length;
+                       skb->truesize += length;
+                       ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+                                             lbq_desc->p.pg_chunk.va,
+                                             &hlen);
+                       __pskb_pull_tail(skb, hlen);
+               }
+       } else {
+               /*
+                * The data is in a chain of large buffers
+                * pointed to by a small buffer.  We loop
+                * thru and chain them to the our small header
+                * buffer's skb.
+                * frags:  There are 18 max frags and our small
+                *         buffer will hold 32 of them. The thing is,
+                *         we'll use 3 max for our 9000 byte jumbo
+                *         frames.  If the MTU goes up we could
+                *          eventually be in trouble.
+                */
+               int size, i = 0;
+               sbq_desc = ql_get_curr_sbuf(rx_ring);
+               pci_unmap_single(qdev->pdev,
+                                dma_unmap_addr(sbq_desc, mapaddr),
+                                dma_unmap_len(sbq_desc, maplen),
+                                PCI_DMA_FROMDEVICE);
+               if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+                       /*
+                        * This is an non TCP/UDP IP frame, so
+                        * the headers aren't split into a small
+                        * buffer.  We have to use the small buffer
+                        * that contains our sg list as our skb to
+                        * send upstairs. Copy the sg list here to
+                        * a local buffer and use it to find the
+                        * pages to chain.
+                        */
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "%d bytes of headers & data in chain of large.\n",
+                                    length);
+                       skb = sbq_desc->p.skb;
+                       sbq_desc->p.skb = NULL;
+                       skb_reserve(skb, NET_IP_ALIGN);
+               }
+               do {
+                       lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+                       size = (length < rx_ring->lbq_buf_size) ? length :
+                               rx_ring->lbq_buf_size;
+
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Adding page %d to skb for %d bytes.\n",
+                                    i, size);
+                       skb_fill_page_desc(skb, i,
+                                               lbq_desc->p.pg_chunk.page,
+                                               lbq_desc->p.pg_chunk.offset,
+                                               size);
+                       skb->len += size;
+                       skb->data_len += size;
+                       skb->truesize += size;
+                       length -= size;
+                       i++;
+               } while (length > 0);
+               ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+                                     &hlen);
+               __pskb_pull_tail(skb, hlen);
+       }
+       return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
+                                  struct rx_ring *rx_ring,
+                                  struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                  u16 vlan_id)
+{
+       struct net_device *ndev = qdev->ndev;
+       struct sk_buff *skb = NULL;
+
+       QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+       skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+       if (unlikely(!skb)) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "No skb available, drop packet.\n");
+               rx_ring->rx_dropped++;
+               return;
+       }
+
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       /* The max framesize filter on this chip is set higher than
+        * MTU since FCoE uses 2k frames.
+        */
+       if (skb->len > ndev->mtu + ETH_HLEN) {
+               dev_kfree_skb_any(skb);
+               rx_ring->rx_dropped++;
+               return;
+       }
+
+       /* loopback self test for ethtool */
+       if (test_bit(QL_SELFTEST, &qdev->flags)) {
+               ql_check_lb_frame(qdev, skb);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       prefetch(skb->data);
+       if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+                            (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+                            IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+               rx_ring->rx_multicast++;
+       }
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "Promiscuous Packet.\n");
+       }
+
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb_checksum_none_assert(skb);
+
+       /* If rx checksum is on, and there are no
+        * csum or frame errors.
+        */
+       if ((ndev->features & NETIF_F_RXCSUM) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+               /* TCP frame. */
+               if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "TCP checksum done!\n");
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+                               (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+               /* Unfragmented ipv4 UDP frame. */
+                       struct iphdr *iph = (struct iphdr *) skb->data;
+                       if (!(iph->frag_off &
+                               htons(IP_MF|IP_OFFSET))) {
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                            "TCP checksum done!\n");
+                       }
+               }
+       }
+
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += skb->len;
+       skb_record_rx_queue(skb, rx_ring->cq_id);
+       if (vlan_id != 0xffff)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               napi_gro_receive(&rx_ring->napi, skb);
+       else
+               netif_receive_skb(skb);
+}
+
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+                                       struct rx_ring *rx_ring,
+                                       struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+       u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+       u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
+                       ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+                       IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+       QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+       if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+               /* The data and headers are split into
+                * separate buffers.
+                */
+               ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+                                               vlan_id);
+       } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+               /* The data fit in a single small buffer.
+                * Allocate a new skb, copy the data and
+                * return the buffer to the free pool.
+                */
+               ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+                                               length, vlan_id);
+       } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+               /* TCP packet in a page chunk that's been checksummed.
+                * Tack it on to our GRO skb and let it go.
+                */
+               ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+                                               length, vlan_id);
+       } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+               /* Non-TCP packet in a page chunk. Allocate an
+                * skb, tack it on frags, and send it up.
+                */
+               ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+                                               length, vlan_id);
+       } else {
+               /* Non-TCP/UDP large frames that span multiple buffers
+                * can be processed corrrectly by the split frame logic.
+                */
+               ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+                                               vlan_id);
+       }
+
+       return (unsigned long)length;
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+                                  struct ob_mac_iocb_rsp *mac_rsp)
+{
+       struct tx_ring *tx_ring;
+       struct tx_ring_desc *tx_ring_desc;
+
+       QL_DUMP_OB_MAC_RSP(mac_rsp);
+       tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+       tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+       ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+       tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
+       tx_ring->tx_packets++;
+       dev_kfree_skb(tx_ring_desc->skb);
+       tx_ring_desc->skb = NULL;
+
+       if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
+                                       OB_MAC_IOCB_RSP_S |
+                                       OB_MAC_IOCB_RSP_L |
+                                       OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
+               if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+                       netif_warn(qdev, tx_done, qdev->ndev,
+                                  "Total descriptor length did not match transfer length.\n");
+               }
+               if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+                       netif_warn(qdev, tx_done, qdev->ndev,
+                                  "Frame too short to be valid, not sent.\n");
+               }
+               if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+                       netif_warn(qdev, tx_done, qdev->ndev,
+                                  "Frame too long, but sent anyway.\n");
+               }
+               if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+                       netif_warn(qdev, tx_done, qdev->ndev,
+                                  "PCI backplane error. Frame not sent.\n");
+               }
+       }
+       atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+       ql_link_off(qdev);
+       queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+       ql_link_off(qdev);
+       ql_disable_interrupts(qdev);
+       /* Clear adapter up bit to signal the recovery
+        * process that it shouldn't kill the reset worker
+        * thread
+        */
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
+       /* Set asic recovery bit to indicate reset process that we are
+        * in fatal error recovery process rather than normal close
+        */
+       set_bit(QL_ASIC_RECOVERY, &qdev->flags);
+       queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+                                   struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+       switch (ib_ae_rsp->event) {
+       case MGMT_ERR_EVENT:
+               netif_err(qdev, rx_err, qdev->ndev,
+                         "Management Processor Fatal Error.\n");
+               ql_queue_fw_error(qdev);
+               return;
+
+       case CAM_LOOKUP_ERR_EVENT:
+               netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+               netdev_err(qdev->ndev, "This event shouldn't occur.\n");
+               ql_queue_asic_error(qdev);
+               return;
+
+       case SOFT_ECC_ERROR_EVENT:
+               netdev_err(qdev->ndev, "Soft ECC error detected.\n");
+               ql_queue_asic_error(qdev);
+               break;
+
+       case PCI_ERR_ANON_BUF_RD:
+               netdev_err(qdev->ndev, "PCI error occurred when reading "
+                                       "anonymous buffers from rx_ring %d.\n",
+                                       ib_ae_rsp->q_id);
+               ql_queue_asic_error(qdev);
+               break;
+
+       default:
+               netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+                         ib_ae_rsp->event);
+               ql_queue_asic_error(qdev);
+               break;
+       }
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+       struct ql_adapter *qdev = rx_ring->qdev;
+       u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+       struct ob_mac_iocb_rsp *net_rsp = NULL;
+       int count = 0;
+
+       struct tx_ring *tx_ring;
+       /* While there are entries in the completion queue. */
+       while (prod != rx_ring->cnsmr_idx) {
+
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+                            rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+               net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+               rmb();
+               switch (net_rsp->opcode) {
+
+               case OPCODE_OB_MAC_TSO_IOCB:
+               case OPCODE_OB_MAC_IOCB:
+                       ql_process_mac_tx_intr(qdev, net_rsp);
+                       break;
+               default:
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+                                    net_rsp->opcode);
+               }
+               count++;
+               ql_update_cq(rx_ring);
+               prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+       }
+       if (!net_rsp)
+               return 0;
+       ql_write_cq_idx(rx_ring);
+       tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+       if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
+               if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+                       /*
+                        * The queue got stopped because the tx_ring was full.
+                        * Wake it up, because it's now at least 25% empty.
+                        */
+                       netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+       }
+
+       return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+       struct ql_adapter *qdev = rx_ring->qdev;
+       u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+       struct ql_net_rsp_iocb *net_rsp;
+       int count = 0;
+
+       /* While there are entries in the completion queue. */
+       while (prod != rx_ring->cnsmr_idx) {
+
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                            "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+                            rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+               net_rsp = rx_ring->curr_entry;
+               rmb();
+               switch (net_rsp->opcode) {
+               case OPCODE_IB_MAC_IOCB:
+                       ql_process_mac_rx_intr(qdev, rx_ring,
+                                              (struct ib_mac_iocb_rsp *)
+                                              net_rsp);
+                       break;
+
+               case OPCODE_IB_AE_IOCB:
+                       ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+                                               net_rsp);
+                       break;
+               default:
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+                                    net_rsp->opcode);
+                       break;
+               }
+               count++;
+               ql_update_cq(rx_ring);
+               prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+               if (count == budget)
+                       break;
+       }
+       ql_update_buffer_queues(qdev, rx_ring);
+       ql_write_cq_idx(rx_ring);
+       return count;
+}
+
+static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+       struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
+       struct ql_adapter *qdev = rx_ring->qdev;
+       struct rx_ring *trx_ring;
+       int i, work_done = 0;
+       struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
+
+       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                    "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
+
+       /* Service the TX rings first.  They start
+        * right after the RSS rings. */
+       for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+               trx_ring = &qdev->rx_ring[i];
+               /* If this TX completion ring belongs to this vector and
+                * it's not empty then service it.
+                */
+               if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
+                       (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+                                       trx_ring->cnsmr_idx)) {
+                       netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+                                    "%s: Servicing TX completion ring %d.\n",
+                                    __func__, trx_ring->cq_id);
+                       ql_clean_outbound_rx_ring(trx_ring);
+               }
+       }
+
+       /*
+        * Now service the RSS ring if it's active.
+        */
+       if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+                                       rx_ring->cnsmr_idx) {
+               netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+                            "%s: Servicing RX completion ring %d.\n",
+                            __func__, rx_ring->cq_id);
+               work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+       }
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               ql_enable_completion_interrupt(qdev, rx_ring->irq);
+       }
+       return work_done;
+}
+
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+               ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+                                NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+       } else {
+               ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+       }
+}
+
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+                                       netdev_features_t features)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status = 0;
+       bool need_restart = netif_running(ndev);
+
+       if (need_restart) {
+               status = ql_adapter_down(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring down the adapter\n");
+                       return status;
+               }
+       }
+
+       /* update the features with resent change */
+       ndev->features = features;
+
+       if (need_restart) {
+               status = ql_adapter_up(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring up the adapter\n");
+                       return status;
+               }
+       }
+
+       return status;
+}
+
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+       netdev_features_t features)
+{
+       int err;
+
+       /* Update the behavior of vlan accel in the adapter */
+       err = qlge_update_hw_vlan_features(ndev, features);
+       if (err)
+               return err;
+
+       return features;
+}
+
+static int qlge_set_features(struct net_device *ndev,
+       netdev_features_t features)
+{
+       netdev_features_t changed = ndev->features ^ features;
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+               qlge_vlan_mode(ndev, features);
+
+       return 0;
+}
+
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+{
+       u32 enable_bit = MAC_ADDR_E;
+       int err;
+
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to init vlan address.\n");
+       return err;
+}
+
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status;
+       int err;
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+
+       err = __qlge_vlan_rx_add_vid(qdev, vid);
+       set_bit(vid, qdev->active_vlans);
+
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
+}
+
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+{
+       u32 enable_bit = 0;
+       int err;
+
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to clear vlan address.\n");
+       return err;
+}
+
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status;
+       int err;
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+
+       err = __qlge_vlan_rx_kill_vid(qdev, vid);
+       clear_bit(vid, qdev->active_vlans);
+
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
+}
+
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+       int status;
+       u16 vid;
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return;
+
+       for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
+               __qlge_vlan_rx_add_vid(qdev, vid);
+
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+       struct rx_ring *rx_ring = dev_id;
+       napi_schedule(&rx_ring->napi);
+       return IRQ_HANDLED;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+       struct rx_ring *rx_ring = dev_id;
+       struct ql_adapter *qdev = rx_ring->qdev;
+       struct intr_context *intr_context = &qdev->intr_context[0];
+       u32 var;
+       int work_done = 0;
+
+       spin_lock(&qdev->hw_lock);
+       if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
+               netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+                            "Shared Interrupt, Not ours!\n");
+               spin_unlock(&qdev->hw_lock);
+               return IRQ_NONE;
+       }
+       spin_unlock(&qdev->hw_lock);
+
+       var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+
+       /*
+        * Check for fatal error.
+        */
+       if (var & STS_FE) {
+               ql_queue_asic_error(qdev);
+               netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
+               var = ql_read32(qdev, ERR_STS);
+               netdev_err(qdev->ndev, "Resetting chip. "
+                                       "Error Status Register = 0x%x\n", var);
+               return IRQ_HANDLED;
+       }
+
+       /*
+        * Check MPI processor activity.
+        */
+       if ((var & STS_PI) &&
+               (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+               /*
+                * We've got an async event or mailbox completion.
+                * Handle it and clear the source of the interrupt.
+                */
+               netif_err(qdev, intr, qdev->ndev,
+                         "Got MPI processor interrupt.\n");
+               ql_disable_completion_interrupt(qdev, intr_context->intr);
+               ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+               queue_delayed_work_on(smp_processor_id(),
+                               qdev->workqueue, &qdev->mpi_work, 0);
+               work_done++;
+       }
+
+       /*
+        * Get the bit-mask that shows the active queues for this
+        * pass.  Compare it to the queues that this irq services
+        * and call napi if there's a match.
+        */
+       var = ql_read32(qdev, ISR1);
+       if (var & intr_context->irq_mask) {
+               netif_info(qdev, intr, qdev->ndev,
+                          "Waking handler for rx_ring[0].\n");
+               ql_disable_completion_interrupt(qdev, intr_context->intr);
+               napi_schedule(&rx_ring->napi);
+               work_done++;
+       }
+       ql_enable_completion_interrupt(qdev, intr_context->intr);
+       return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+       if (skb_is_gso(skb)) {
+               int err;
+               __be16 l3_proto = vlan_get_protocol(skb);
+
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
+                       return err;
+
+               mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+               mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+               mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+               mac_iocb_ptr->total_hdrs_len =
+                   cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+               mac_iocb_ptr->net_trans_offset =
+                   cpu_to_le16(skb_network_offset(skb) |
+                               skb_transport_offset(skb)
+                               << OB_MAC_TRANSPORT_HDR_SHIFT);
+               mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+               if (likely(l3_proto == htons(ETH_P_IP))) {
+                       struct iphdr *iph = ip_hdr(skb);
+                       iph->check = 0;
+                       mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
+                       mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+                       tcp_hdr(skb)->check =
+                           ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                            &ipv6_hdr(skb)->daddr,
+                                            0, IPPROTO_TCP, 0);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+                            struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+       int len;
+       struct iphdr *iph = ip_hdr(skb);
+       __sum16 *check;
+       mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+       mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+       mac_iocb_ptr->net_trans_offset =
+               cpu_to_le16(skb_network_offset(skb) |
+               skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+       mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+       len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+       if (likely(iph->protocol == IPPROTO_TCP)) {
+               check = &(tcp_hdr(skb)->check);
+               mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+               mac_iocb_ptr->total_hdrs_len =
+                   cpu_to_le16(skb_transport_offset(skb) +
+                               (tcp_hdr(skb)->doff << 2));
+       } else {
+               check = &(udp_hdr(skb)->check);
+               mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+               mac_iocb_ptr->total_hdrs_len =
+                   cpu_to_le16(skb_transport_offset(skb) +
+                               sizeof(struct udphdr));
+       }
+       *check = ~csum_tcpudp_magic(iph->saddr,
+                                   iph->daddr, len, iph->protocol, 0);
+}
+
+static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct tx_ring_desc *tx_ring_desc;
+       struct ob_mac_iocb_req *mac_iocb_ptr;
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int tso;
+       struct tx_ring *tx_ring;
+       u32 tx_ring_idx = (u32) skb->queue_mapping;
+
+       tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
+       if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+               netif_info(qdev, tx_queued, qdev->ndev,
+                          "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
+                          __func__, tx_ring_idx);
+               netif_stop_subqueue(ndev, tx_ring->wq_id);
+               tx_ring->tx_errors++;
+               return NETDEV_TX_BUSY;
+       }
+       tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+       mac_iocb_ptr = tx_ring_desc->queue_entry;
+       memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
+
+       mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+       mac_iocb_ptr->tid = tx_ring_desc->index;
+       /* We use the upper 32-bits to store the tx queue for this IO.
+        * When we get the completion we can use it to establish the context.
+        */
+       mac_iocb_ptr->txq_idx = tx_ring_idx;
+       tx_ring_desc->skb = skb;
+
+       mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+       if (skb_vlan_tag_present(skb)) {
+               netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+                            "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
+               mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+               mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+       }
+       tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+       if (tso < 0) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+               ql_hw_csum_setup(skb,
+                                (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+       }
+       if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+                       NETDEV_TX_OK) {
+               netif_err(qdev, tx_queued, qdev->ndev,
+                         "Could not map the segments.\n");
+               tx_ring->tx_errors++;
+               return NETDEV_TX_BUSY;
+       }
+       QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+       tx_ring->prod_idx++;
+       if (tx_ring->prod_idx == tx_ring->wq_len)
+               tx_ring->prod_idx = 0;
+       wmb();
+
+       ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+       netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+                    "tx queued, slot %d, len %d\n",
+                    tx_ring->prod_idx, skb->len);
+
+       atomic_dec(&tx_ring->tx_count);
+
+       if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+               netif_stop_subqueue(ndev, tx_ring->wq_id);
+               if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+                       /*
+                        * The queue got stopped because the tx_ring was full.
+                        * Wake it up, because it's now at least 25% empty.
+                        */
+                       netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+       }
+       return NETDEV_TX_OK;
+}
+
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+       if (qdev->rx_ring_shadow_reg_area) {
+               pci_free_consistent(qdev->pdev,
+                                   PAGE_SIZE,
+                                   qdev->rx_ring_shadow_reg_area,
+                                   qdev->rx_ring_shadow_reg_dma);
+               qdev->rx_ring_shadow_reg_area = NULL;
+       }
+       if (qdev->tx_ring_shadow_reg_area) {
+               pci_free_consistent(qdev->pdev,
+                                   PAGE_SIZE,
+                                   qdev->tx_ring_shadow_reg_area,
+                                   qdev->tx_ring_shadow_reg_dma);
+               qdev->tx_ring_shadow_reg_area = NULL;
+       }
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+       qdev->rx_ring_shadow_reg_area =
+               pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+                                     &qdev->rx_ring_shadow_reg_dma);
+       if (qdev->rx_ring_shadow_reg_area == NULL) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Allocation of RX shadow space failed.\n");
+               return -ENOMEM;
+       }
+
+       qdev->tx_ring_shadow_reg_area =
+               pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+                                     &qdev->tx_ring_shadow_reg_dma);
+       if (qdev->tx_ring_shadow_reg_area == NULL) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Allocation of TX shadow space failed.\n");
+               goto err_wqp_sh_area;
+       }
+       return 0;
+
+err_wqp_sh_area:
+       pci_free_consistent(qdev->pdev,
+                           PAGE_SIZE,
+                           qdev->rx_ring_shadow_reg_area,
+                           qdev->rx_ring_shadow_reg_dma);
+       return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+       struct tx_ring_desc *tx_ring_desc;
+       int i;
+       struct ob_mac_iocb_req *mac_iocb_ptr;
+
+       mac_iocb_ptr = tx_ring->wq_base;
+       tx_ring_desc = tx_ring->q;
+       for (i = 0; i < tx_ring->wq_len; i++) {
+               tx_ring_desc->index = i;
+               tx_ring_desc->skb = NULL;
+               tx_ring_desc->queue_entry = mac_iocb_ptr;
+               mac_iocb_ptr++;
+               tx_ring_desc++;
+       }
+       atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+                                struct tx_ring *tx_ring)
+{
+       if (tx_ring->wq_base) {
+               pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+                                   tx_ring->wq_base, tx_ring->wq_base_dma);
+               tx_ring->wq_base = NULL;
+       }
+       kfree(tx_ring->q);
+       tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+                                struct tx_ring *tx_ring)
+{
+       tx_ring->wq_base =
+           pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+                                &tx_ring->wq_base_dma);
+
+       if ((tx_ring->wq_base == NULL) ||
+           tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+               goto pci_alloc_err;
+
+       tx_ring->q =
+           kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+       if (tx_ring->q == NULL)
+               goto err;
+
+       return 0;
+err:
+       pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+                           tx_ring->wq_base, tx_ring->wq_base_dma);
+       tx_ring->wq_base = NULL;
+pci_alloc_err:
+       netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
+       return -ENOMEM;
+}
+
+static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+       struct bq_desc *lbq_desc;
+
+       uint32_t  curr_idx, clean_idx;
+
+       curr_idx = rx_ring->lbq_curr_idx;
+       clean_idx = rx_ring->lbq_clean_idx;
+       while (curr_idx != clean_idx) {
+               lbq_desc = &rx_ring->lbq[curr_idx];
+
+               if (lbq_desc->p.pg_chunk.last_flag) {
+                       pci_unmap_page(qdev->pdev,
+                               lbq_desc->p.pg_chunk.map,
+                               ql_lbq_block_size(qdev),
+                                      PCI_DMA_FROMDEVICE);
+                       lbq_desc->p.pg_chunk.last_flag = 0;
+               }
+
+               put_page(lbq_desc->p.pg_chunk.page);
+               lbq_desc->p.pg_chunk.page = NULL;
+
+               if (++curr_idx == rx_ring->lbq_len)
+                       curr_idx = 0;
+
+       }
+       if (rx_ring->pg_chunk.page) {
+               pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+                       ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+               put_page(rx_ring->pg_chunk.page);
+               rx_ring->pg_chunk.page = NULL;
+       }
+}
+
+static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+       int i;
+       struct bq_desc *sbq_desc;
+
+       for (i = 0; i < rx_ring->sbq_len; i++) {
+               sbq_desc = &rx_ring->sbq[i];
+               if (sbq_desc == NULL) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "sbq_desc %d is NULL.\n", i);
+                       return;
+               }
+               if (sbq_desc->p.skb) {
+                       pci_unmap_single(qdev->pdev,
+                                        dma_unmap_addr(sbq_desc, mapaddr),
+                                        dma_unmap_len(sbq_desc, maplen),
+                                        PCI_DMA_FROMDEVICE);
+                       dev_kfree_skb(sbq_desc->p.skb);
+                       sbq_desc->p.skb = NULL;
+               }
+       }
+}
+
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+       int i;
+       struct rx_ring *rx_ring;
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               if (rx_ring->lbq)
+                       ql_free_lbq_buffers(qdev, rx_ring);
+               if (rx_ring->sbq)
+                       ql_free_sbq_buffers(qdev, rx_ring);
+       }
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+       struct rx_ring *rx_ring;
+       int i;
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               if (rx_ring->type != TX_Q)
+                       ql_update_buffer_queues(qdev, rx_ring);
+       }
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+                               struct rx_ring *rx_ring)
+{
+       int i;
+       struct bq_desc *lbq_desc;
+       __le64 *bq = rx_ring->lbq_base;
+
+       memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+       for (i = 0; i < rx_ring->lbq_len; i++) {
+               lbq_desc = &rx_ring->lbq[i];
+               memset(lbq_desc, 0, sizeof(*lbq_desc));
+               lbq_desc->index = i;
+               lbq_desc->addr = bq;
+               bq++;
+       }
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
+                               struct rx_ring *rx_ring)
+{
+       int i;
+       struct bq_desc *sbq_desc;
+       __le64 *bq = rx_ring->sbq_base;
+
+       memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
+       for (i = 0; i < rx_ring->sbq_len; i++) {
+               sbq_desc = &rx_ring->sbq[i];
+               memset(sbq_desc, 0, sizeof(*sbq_desc));
+               sbq_desc->index = i;
+               sbq_desc->addr = bq;
+               bq++;
+       }
+}
+
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+                                struct rx_ring *rx_ring)
+{
+       /* Free the small buffer queue. */
+       if (rx_ring->sbq_base) {
+               pci_free_consistent(qdev->pdev,
+                                   rx_ring->sbq_size,
+                                   rx_ring->sbq_base, rx_ring->sbq_base_dma);
+               rx_ring->sbq_base = NULL;
+       }
+
+       /* Free the small buffer queue control blocks. */
+       kfree(rx_ring->sbq);
+       rx_ring->sbq = NULL;
+
+       /* Free the large buffer queue. */
+       if (rx_ring->lbq_base) {
+               pci_free_consistent(qdev->pdev,
+                                   rx_ring->lbq_size,
+                                   rx_ring->lbq_base, rx_ring->lbq_base_dma);
+               rx_ring->lbq_base = NULL;
+       }
+
+       /* Free the large buffer queue control blocks. */
+       kfree(rx_ring->lbq);
+       rx_ring->lbq = NULL;
+
+       /* Free the rx queue. */
+       if (rx_ring->cq_base) {
+               pci_free_consistent(qdev->pdev,
+                                   rx_ring->cq_size,
+                                   rx_ring->cq_base, rx_ring->cq_base_dma);
+               rx_ring->cq_base = NULL;
+       }
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+                                struct rx_ring *rx_ring)
+{
+
+       /*
+        * Allocate the completion queue for this rx_ring.
+        */
+       rx_ring->cq_base =
+           pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+                                &rx_ring->cq_base_dma);
+
+       if (rx_ring->cq_base == NULL) {
+               netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
+               return -ENOMEM;
+       }
+
+       if (rx_ring->sbq_len) {
+               /*
+                * Allocate small buffer queue.
+                */
+               rx_ring->sbq_base =
+                   pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+                                        &rx_ring->sbq_base_dma);
+
+               if (rx_ring->sbq_base == NULL) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Small buffer queue allocation failed.\n");
+                       goto err_mem;
+               }
+
+               /*
+                * Allocate small buffer queue control blocks.
+                */
+               rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+                                            sizeof(struct bq_desc),
+                                            GFP_KERNEL);
+               if (rx_ring->sbq == NULL)
+                       goto err_mem;
+
+               ql_init_sbq_ring(qdev, rx_ring);
+       }
+
+       if (rx_ring->lbq_len) {
+               /*
+                * Allocate large buffer queue.
+                */
+               rx_ring->lbq_base =
+                   pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+                                        &rx_ring->lbq_base_dma);
+
+               if (rx_ring->lbq_base == NULL) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Large buffer queue allocation failed.\n");
+                       goto err_mem;
+               }
+               /*
+                * Allocate large buffer queue control blocks.
+                */
+               rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+                                            sizeof(struct bq_desc),
+                                            GFP_KERNEL);
+               if (rx_ring->lbq == NULL)
+                       goto err_mem;
+
+               ql_init_lbq_ring(qdev, rx_ring);
+       }
+
+       return 0;
+
+err_mem:
+       ql_free_rx_resources(qdev, rx_ring);
+       return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+       struct tx_ring *tx_ring;
+       struct tx_ring_desc *tx_ring_desc;
+       int i, j;
+
+       /*
+        * Loop through all queues and free
+        * any resources.
+        */
+       for (j = 0; j < qdev->tx_ring_count; j++) {
+               tx_ring = &qdev->tx_ring[j];
+               for (i = 0; i < tx_ring->wq_len; i++) {
+                       tx_ring_desc = &tx_ring->q[i];
+                       if (tx_ring_desc && tx_ring_desc->skb) {
+                               netif_err(qdev, ifdown, qdev->ndev,
+                                         "Freeing lost SKB %p, from queue %d, index %d.\n",
+                                         tx_ring_desc->skb, j,
+                                         tx_ring_desc->index);
+                               ql_unmap_send(qdev, tx_ring_desc,
+                                             tx_ring_desc->map_cnt);
+                               dev_kfree_skb(tx_ring_desc->skb);
+                               tx_ring_desc->skb = NULL;
+                       }
+               }
+       }
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+       int i;
+
+       for (i = 0; i < qdev->tx_ring_count; i++)
+               ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+       for (i = 0; i < qdev->rx_ring_count; i++)
+               ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+       ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+       int i;
+
+       /* Allocate space for our shadow registers and such. */
+       if (ql_alloc_shadow_space(qdev))
+               return -ENOMEM;
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "RX resource allocation failed.\n");
+                       goto err_mem;
+               }
+       }
+       /* Allocate tx queue resources */
+       for (i = 0; i < qdev->tx_ring_count; i++) {
+               if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "TX resource allocation failed.\n");
+                       goto err_mem;
+               }
+       }
+       return 0;
+
+err_mem:
+       ql_free_mem_resources(qdev);
+       return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+       struct cqicb *cqicb = &rx_ring->cqicb;
+       void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+               (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+       u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+               (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+       void __iomem *doorbell_area =
+           qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+       int err = 0;
+       u16 bq_len;
+       u64 tmp;
+       __le64 *base_indirect_ptr;
+       int page_entries;
+
+       /* Set up the shadow registers for this ring. */
+       rx_ring->prod_idx_sh_reg = shadow_reg;
+       rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+       *rx_ring->prod_idx_sh_reg = 0;
+       shadow_reg += sizeof(u64);
+       shadow_reg_dma += sizeof(u64);
+       rx_ring->lbq_base_indirect = shadow_reg;
+       rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+       shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+       shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+       rx_ring->sbq_base_indirect = shadow_reg;
+       rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+       /* PCI doorbell mem area + 0x00 for consumer index register */
+       rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+       rx_ring->cnsmr_idx = 0;
+       rx_ring->curr_entry = rx_ring->cq_base;
+
+       /* PCI doorbell mem area + 0x04 for valid register */
+       rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+       /* PCI doorbell mem area + 0x18 for large buffer consumer */
+       rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+
+       /* PCI doorbell mem area + 0x1c */
+       rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+
+       memset((void *)cqicb, 0, sizeof(struct cqicb));
+       cqicb->msix_vect = rx_ring->irq;
+
+       bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
+       cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+
+       cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
+
+       cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
+
+       /*
+        * Set up the control block load flags.
+        */
+       cqicb->flags = FLAGS_LC |       /* Load queue base address */
+           FLAGS_LV |          /* Load MSI-X vector */
+           FLAGS_LI;           /* Load irq delay values */
+       if (rx_ring->lbq_len) {
+               cqicb->flags |= FLAGS_LL;       /* Load lbq values */
+               tmp = (u64)rx_ring->lbq_base_dma;
+               base_indirect_ptr = rx_ring->lbq_base_indirect;
+               page_entries = 0;
+               do {
+                       *base_indirect_ptr = cpu_to_le64(tmp);
+                       tmp += DB_PAGE_SIZE;
+                       base_indirect_ptr++;
+                       page_entries++;
+               } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+               cqicb->lbq_addr =
+                   cpu_to_le64(rx_ring->lbq_base_indirect_dma);
+               bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
+                       (u16) rx_ring->lbq_buf_size;
+               cqicb->lbq_buf_size = cpu_to_le16(bq_len);
+               bq_len = (rx_ring->lbq_len == 65536) ? 0 :
+                       (u16) rx_ring->lbq_len;
+               cqicb->lbq_len = cpu_to_le16(bq_len);
+               rx_ring->lbq_prod_idx = 0;
+               rx_ring->lbq_curr_idx = 0;
+               rx_ring->lbq_clean_idx = 0;
+               rx_ring->lbq_free_cnt = rx_ring->lbq_len;
+       }
+       if (rx_ring->sbq_len) {
+               cqicb->flags |= FLAGS_LS;       /* Load sbq values */
+               tmp = (u64)rx_ring->sbq_base_dma;
+               base_indirect_ptr = rx_ring->sbq_base_indirect;
+               page_entries = 0;
+               do {
+                       *base_indirect_ptr = cpu_to_le64(tmp);
+                       tmp += DB_PAGE_SIZE;
+                       base_indirect_ptr++;
+                       page_entries++;
+               } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+               cqicb->sbq_addr =
+                   cpu_to_le64(rx_ring->sbq_base_indirect_dma);
+               cqicb->sbq_buf_size =
+                   cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+               bq_len = (rx_ring->sbq_len == 65536) ? 0 :
+                       (u16) rx_ring->sbq_len;
+               cqicb->sbq_len = cpu_to_le16(bq_len);
+               rx_ring->sbq_prod_idx = 0;
+               rx_ring->sbq_curr_idx = 0;
+               rx_ring->sbq_clean_idx = 0;
+               rx_ring->sbq_free_cnt = rx_ring->sbq_len;
+       }
+       switch (rx_ring->type) {
+       case TX_Q:
+               cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+               cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+               break;
+       case RX_Q:
+               /* Inbound completion handling rx_rings run in
+                * separate NAPI contexts.
+                */
+               netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+                              64);
+               cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+               cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+               break;
+       default:
+               netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                            "Invalid rx_ring->type = %d.\n", rx_ring->type);
+       }
+       err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+                          CFG_LCQ, rx_ring->cq_id);
+       if (err) {
+               netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
+               return err;
+       }
+       return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+       struct wqicb *wqicb = (struct wqicb *)tx_ring;
+       void __iomem *doorbell_area =
+           qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+       void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+           (tx_ring->wq_id * sizeof(u64));
+       u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+           (tx_ring->wq_id * sizeof(u64));
+       int err = 0;
+
+       /*
+        * Assign doorbell registers for this tx_ring.
+        */
+       /* TX PCI doorbell mem area for tx producer index */
+       tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+       tx_ring->prod_idx = 0;
+       /* TX PCI doorbell mem area + 0x04 */
+       tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+       /*
+        * Assign shadow registers for this tx_ring.
+        */
+       tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+       tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+       wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+       wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+                                  Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+       wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+       wqicb->rid = 0;
+       wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
+
+       wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
+
+       ql_init_tx_ring(qdev, tx_ring);
+
+       err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
+                          (u16) tx_ring->wq_id);
+       if (err) {
+               netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
+               return err;
+       }
+       return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+       if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+               pci_disable_msix(qdev->pdev);
+               clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+               kfree(qdev->msi_x_entry);
+               qdev->msi_x_entry = NULL;
+       } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+               pci_disable_msi(qdev->pdev);
+               clear_bit(QL_MSI_ENABLED, &qdev->flags);
+       }
+}
+
+/* We start by trying to get the number of vectors
+ * stored in qdev->intr_count. If we don't get that
+ * many then we reduce the count and try again.
+ */
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+       int i, err;
+
+       /* Get the MSIX vectors. */
+       if (qlge_irq_type == MSIX_IRQ) {
+               /* Try to alloc space for the msix struct,
+                * if it fails then go to MSI/legacy.
+                */
+               qdev->msi_x_entry = kcalloc(qdev->intr_count,
+                                           sizeof(struct msix_entry),
+                                           GFP_KERNEL);
+               if (!qdev->msi_x_entry) {
+                       qlge_irq_type = MSI_IRQ;
+                       goto msi;
+               }
+
+               for (i = 0; i < qdev->intr_count; i++)
+                       qdev->msi_x_entry[i].entry = i;
+
+               err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+                                           1, qdev->intr_count);
+               if (err < 0) {
+                       kfree(qdev->msi_x_entry);
+                       qdev->msi_x_entry = NULL;
+                       netif_warn(qdev, ifup, qdev->ndev,
+                                  "MSI-X Enable failed, trying MSI.\n");
+                       qlge_irq_type = MSI_IRQ;
+               } else {
+                       qdev->intr_count = err;
+                       set_bit(QL_MSIX_ENABLED, &qdev->flags);
+                       netif_info(qdev, ifup, qdev->ndev,
+                                  "MSI-X Enabled, got %d vectors.\n",
+                                  qdev->intr_count);
+                       return;
+               }
+       }
+msi:
+       qdev->intr_count = 1;
+       if (qlge_irq_type == MSI_IRQ) {
+               if (!pci_enable_msi(qdev->pdev)) {
+                       set_bit(QL_MSI_ENABLED, &qdev->flags);
+                       netif_info(qdev, ifup, qdev->ndev,
+                                  "Running with MSI interrupts.\n");
+                       return;
+               }
+       }
+       qlge_irq_type = LEG_IRQ;
+       netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                    "Running with legacy interrupts.\n");
+}
+
+/* Each vector services 1 RSS ring and and 1 or more
+ * TX completion rings.  This function loops through
+ * the TX completion rings and assigns the vector that
+ * will service it.  An example would be if there are
+ * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
+ * This would mean that vector 0 would service RSS ring 0
+ * and TX completion rings 0,1,2 and 3.  Vector 1 would
+ * service RSS ring 1 and TX completion rings 4,5,6 and 7.
+ */
+static void ql_set_tx_vect(struct ql_adapter *qdev)
+{
+       int i, j, vect;
+       u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+               /* Assign irq vectors to TX rx_rings.*/
+               for (vect = 0, j = 0, i = qdev->rss_ring_count;
+                                        i < qdev->rx_ring_count; i++) {
+                       if (j == tx_rings_per_vector) {
+                               vect++;
+                               j = 0;
+                       }
+                       qdev->rx_ring[i].irq = vect;
+                       j++;
+               }
+       } else {
+               /* For single vector all rings have an irq
+                * of zero.
+                */
+               for (i = 0; i < qdev->rx_ring_count; i++)
+                       qdev->rx_ring[i].irq = 0;
+       }
+}
+
+/* Set the interrupt mask for this vector.  Each vector
+ * will service 1 RSS ring and 1 or more TX completion
+ * rings.  This function sets up a bit mask per vector
+ * that indicates which rings it services.
+ */
+static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
+{
+       int j, vect = ctx->intr;
+       u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+               /* Add the RSS ring serviced by this vector
+                * to the mask.
+                */
+               ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
+               /* Add the TX ring(s) serviced by this vector
+                * to the mask. */
+               for (j = 0; j < tx_rings_per_vector; j++) {
+                       ctx->irq_mask |=
+                       (1 << qdev->rx_ring[qdev->rss_ring_count +
+                       (vect * tx_rings_per_vector) + j].cq_id);
+               }
+       } else {
+               /* For single vector we just shift each queue's
+                * ID into the mask.
+                */
+               for (j = 0; j < qdev->rx_ring_count; j++)
+                       ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
+       }
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+       int i = 0;
+       struct intr_context *intr_context = &qdev->intr_context[0];
+
+       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+               /* Each rx_ring has it's
+                * own intr_context since we have separate
+                * vectors for each queue.
+                */
+               for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+                       qdev->rx_ring[i].irq = i;
+                       intr_context->intr = i;
+                       intr_context->qdev = qdev;
+                       /* Set up this vector's bit-mask that indicates
+                        * which queues it services.
+                        */
+                       ql_set_irq_mask(qdev, intr_context);
+                       /*
+                        * We set up each vectors enable/disable/read bits so
+                        * there's no bit/mask calculations in the critical path.
+                        */
+                       intr_context->intr_en_mask =
+                           INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+                           INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+                           | i;
+                       intr_context->intr_dis_mask =
+                           INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+                           INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+                           INTR_EN_IHD | i;
+                       intr_context->intr_read_mask =
+                           INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+                           INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+                           i;
+                       if (i == 0) {
+                               /* The first vector/queue handles
+                                * broadcast/multicast, fatal errors,
+                                * and firmware events.  This in addition
+                                * to normal inbound NAPI processing.
+                                */
+                               intr_context->handler = qlge_isr;
+                               sprintf(intr_context->name, "%s-rx-%d",
+                                       qdev->ndev->name, i);
+                       } else {
+                               /*
+                                * Inbound queues handle unicast frames only.
+                                */
+                               intr_context->handler = qlge_msix_rx_isr;
+                               sprintf(intr_context->name, "%s-rx-%d",
+                                       qdev->ndev->name, i);
+                       }
+               }
+       } else {
+               /*
+                * All rx_rings use the same intr_context since
+                * there is only one vector.
+                */
+               intr_context->intr = 0;
+               intr_context->qdev = qdev;
+               /*
+                * We set up each vectors enable/disable/read bits so
+                * there's no bit/mask calculations in the critical path.
+                */
+               intr_context->intr_en_mask =
+                   INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+               intr_context->intr_dis_mask =
+                   INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+                   INTR_EN_TYPE_DISABLE;
+               intr_context->intr_read_mask =
+                   INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+               /*
+                * Single interrupt means one handler for all rings.
+                */
+               intr_context->handler = qlge_isr;
+               sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+               /* Set up this vector's bit-mask that indicates
+                * which queues it services. In this case there is
+                * a single vector so it will service all RSS and
+                * TX completion rings.
+                */
+               ql_set_irq_mask(qdev, intr_context);
+       }
+       /* Tell the TX completion rings which MSIx vector
+        * they will be using.
+        */
+       ql_set_tx_vect(qdev);
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+       int i;
+       struct intr_context *intr_context = &qdev->intr_context[0];
+
+       for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+               if (intr_context->hooked) {
+                       if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+                               free_irq(qdev->msi_x_entry[i].vector,
+                                        &qdev->rx_ring[i]);
+                       } else {
+                               free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+                       }
+               }
+       }
+       ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+       int i;
+       int status = 0;
+       struct pci_dev *pdev = qdev->pdev;
+       struct intr_context *intr_context = &qdev->intr_context[0];
+
+       ql_resolve_queues_to_irqs(qdev);
+
+       for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+               atomic_set(&intr_context->irq_cnt, 0);
+               if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+                       status = request_irq(qdev->msi_x_entry[i].vector,
+                                            intr_context->handler,
+                                            0,
+                                            intr_context->name,
+                                            &qdev->rx_ring[i]);
+                       if (status) {
+                               netif_err(qdev, ifup, qdev->ndev,
+                                         "Failed request for MSIX interrupt %d.\n",
+                                         i);
+                               goto err_irq;
+                       }
+               } else {
+                       netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                                    "trying msi or legacy interrupts.\n");
+                       netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                                    "%s: irq = %d.\n", __func__, pdev->irq);
+                       netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                                    "%s: context->name = %s.\n", __func__,
+                                    intr_context->name);
+                       netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+                                    "%s: dev_id = 0x%p.\n", __func__,
+                                    &qdev->rx_ring[0]);
+                       status =
+                           request_irq(pdev->irq, qlge_isr,
+                                       test_bit(QL_MSI_ENABLED,
+                                                &qdev->
+                                                flags) ? 0 : IRQF_SHARED,
+                                       intr_context->name, &qdev->rx_ring[0]);
+                       if (status)
+                               goto err_irq;
+
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Hooked intr %d, queue type %s, with name %s.\n",
+                                 i,
+                                 qdev->rx_ring[0].type == DEFAULT_Q ?
+                                 "DEFAULT_Q" :
+                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+                                 intr_context->name);
+               }
+               intr_context->hooked = 1;
+       }
+       return status;
+err_irq:
+       netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
+       ql_free_irq(qdev);
+       return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+       static const u8 init_hash_seed[] = {
+               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+               0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+               0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+               0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+               0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+       };
+       struct ricb *ricb = &qdev->ricb;
+       int status = 0;
+       int i;
+       u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+       memset((void *)ricb, 0, sizeof(*ricb));
+
+       ricb->base_cq = RSS_L4K;
+       ricb->flags =
+               (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
+       ricb->mask = cpu_to_le16((u16)(0x3ff));
+
+       /*
+        * Fill out the Indirection Table.
+        */
+       for (i = 0; i < 1024; i++)
+               hash_id[i] = (i & (qdev->rss_ring_count - 1));
+
+       memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
+       memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
+
+       status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
+               return status;
+       }
+       return status;
+}
+
+static int ql_clear_routing_entries(struct ql_adapter *qdev)
+{
+       int i, status = 0;
+
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return status;
+       /* Clear all the entries in the routing table. */
+       for (i = 0; i < 16; i++) {
+               status = ql_set_routing_reg(qdev, i, 0, 0);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to init routing register for CAM packets.\n");
+                       break;
+               }
+       }
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+       return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+       int status = 0;
+
+       /* Clear all the entries in the routing table. */
+       status = ql_clear_routing_entries(qdev);
+       if (status)
+               return status;
+
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return status;
+
+       status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
+                                               RT_IDX_IP_CSUM_ERR, 1);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                       "Failed to init routing register "
+                       "for IP CSUM error packets.\n");
+               goto exit;
+       }
+       status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
+                                               RT_IDX_TU_CSUM_ERR, 1);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                       "Failed to init routing register "
+                       "for TCP/UDP CSUM error packets.\n");
+               goto exit;
+       }
+       status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to init routing register for broadcast packets.\n");
+               goto exit;
+       }
+       /* If we have more than one inbound queue, then turn on RSS in the
+        * routing block.
+        */
+       if (qdev->rss_ring_count > 1) {
+               status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+                                       RT_IDX_RSS_MATCH, 1);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to init routing register for MATCH RSS packets.\n");
+                       goto exit;
+               }
+       }
+
+       status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+                                   RT_IDX_CAM_HIT, 1);
+       if (status)
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to init routing register for CAM packets.\n");
+exit:
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+       return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+       int status, set;
+
+       /* If check if the link is up and use to
+        * determine if we are setting or clearing
+        * the MAC address in the CAM.
+        */
+       set = ql_read32(qdev, STS);
+       set &= qdev->port_link_up;
+       status = ql_set_mac_addr(qdev, set);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
+               return status;
+       }
+
+       status = ql_route_initialize(qdev);
+       if (status)
+               netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
+
+       return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+       u32 value, mask;
+       int i;
+       int status = 0;
+
+       /*
+        * Set up the System register to halt on errors.
+        */
+       value = SYS_EFE | SYS_FAE;
+       mask = value << 16;
+       ql_write32(qdev, SYS, mask | value);
+
+       /* Set the default queue, and VLAN behavior. */
+       value = NIC_RCV_CFG_DFQ;
+       mask = NIC_RCV_CFG_DFQ_MASK;
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               value |= NIC_RCV_CFG_RV;
+               mask |= (NIC_RCV_CFG_RV << 16);
+       }
+       ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+       /* Set the MPI interrupt to enabled. */
+       ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+       /* Enable the function, set pagesize, enable error checking. */
+       value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+           FSC_EC | FSC_VM_PAGE_4K;
+       value |= SPLT_SETTING;
+
+       /* Set/clear header splitting. */
+       mask = FSC_VM_PAGESIZE_MASK |
+           FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+       ql_write32(qdev, FSC, mask | value);
+
+       ql_write32(qdev, SPLT_HDR, SPLT_LEN);
+
+       /* Set RX packet routing to use port/pci function on which the
+        * packet arrived on in addition to usual frame routing.
+        * This is helpful on bonding where both interfaces can have
+        * the same MAC address.
+        */
+       ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+       /* Reroute all packets to our Interface.
+        * They may have been routed to MPI firmware
+        * due to WOL.
+        */
+       value = ql_read32(qdev, MGMT_RCV_CFG);
+       value &= ~MGMT_RCV_CFG_RM;
+       mask = 0xffff0000;
+
+       /* Sticky reg needs clearing due to WOL. */
+       ql_write32(qdev, MGMT_RCV_CFG, mask);
+       ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+       /* Default WOL is enable on Mezz cards */
+       if (qdev->pdev->subsystem_device == 0x0068 ||
+                       qdev->pdev->subsystem_device == 0x0180)
+               qdev->wol = WAKE_MAGIC;
+
+       /* Start up the rx queues. */
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to start rx ring[%d].\n", i);
+                       return status;
+               }
+       }
+
+       /* If there is more than one inbound completion queue
+        * then download a RICB to configure RSS.
+        */
+       if (qdev->rss_ring_count > 1) {
+               status = ql_start_rss(qdev);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
+                       return status;
+               }
+       }
+
+       /* Start up the tx queues. */
+       for (i = 0; i < qdev->tx_ring_count; i++) {
+               status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to start tx ring[%d].\n", i);
+                       return status;
+               }
+       }
+
+       /* Initialize the port and set the max framesize. */
+       status = qdev->nic_ops->port_initialize(qdev);
+       if (status)
+               netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
+
+       /* Set up the MAC address and frame routing filter. */
+       status = ql_cam_route_initialize(qdev);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Failed to init CAM/Routing tables.\n");
+               return status;
+       }
+
+       /* Start NAPI for the RSS queues. */
+       for (i = 0; i < qdev->rss_ring_count; i++)
+               napi_enable(&qdev->rx_ring[i].napi);
+
+       return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+       u32 value;
+       int status = 0;
+       unsigned long end_jiffies;
+
+       /* Clear all the entries in the routing table. */
+       status = ql_clear_routing_entries(qdev);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
+               return status;
+       }
+
+       /* Check if bit is set then skip the mailbox command and
+        * clear the bit, else we are in normal reset process.
+        */
+       if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+               /* Stop management traffic. */
+               ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+
+               /* Wait for the NIC and MGMNT FIFOs to empty. */
+               ql_wait_fifo_empty(qdev);
+       } else
+               clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+
+       ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+
+       end_jiffies = jiffies + usecs_to_jiffies(30);
+       do {
+               value = ql_read32(qdev, RST_FO);
+               if ((value & RST_FO_FR) == 0)
+                       break;
+               cpu_relax();
+       } while (time_before(jiffies, end_jiffies));
+
+       if (value & RST_FO_FR) {
+               netif_err(qdev, ifdown, qdev->ndev,
+                         "ETIMEDOUT!!! errored out of resetting the chip!\n");
+               status = -ETIMEDOUT;
+       }
+
+       /* Resume management traffic. */
+       ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
+       return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       netif_info(qdev, probe, qdev->ndev,
+                  "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+                  "XG Roll = %d, XG Rev = %d.\n",
+                  qdev->func,
+                  qdev->port,
+                  qdev->chip_rev_id & 0x0000000f,
+                  qdev->chip_rev_id >> 4 & 0x0000000f,
+                  qdev->chip_rev_id >> 8 & 0x0000000f,
+                  qdev->chip_rev_id >> 12 & 0x0000000f);
+       netif_info(qdev, probe, qdev->ndev,
+                  "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_wol(struct ql_adapter *qdev)
+{
+       int status = 0;
+       u32 wol = MB_WOL_DISABLE;
+
+       /* The CAM is still intact after a reset, but if we
+        * are doing WOL, then we may need to program the
+        * routing regs. We would also need to issue the mailbox
+        * commands to instruct the MPI what to do per the ethtool
+        * settings.
+        */
+
+       if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+                       WAKE_MCAST | WAKE_BCAST)) {
+               netif_err(qdev, ifdown, qdev->ndev,
+                         "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
+                         qdev->wol);
+               return -EINVAL;
+       }
+
+       if (qdev->wol & WAKE_MAGIC) {
+               status = ql_mb_wol_set_magic(qdev, 1);
+               if (status) {
+                       netif_err(qdev, ifdown, qdev->ndev,
+                                 "Failed to set magic packet on %s.\n",
+                                 qdev->ndev->name);
+                       return status;
+               } else
+                       netif_info(qdev, drv, qdev->ndev,
+                                  "Enabled magic packet successfully on %s.\n",
+                                  qdev->ndev->name);
+
+               wol |= MB_WOL_MAGIC_PKT;
+       }
+
+       if (qdev->wol) {
+               wol |= MB_WOL_MODE_ON;
+               status = ql_mb_wol_mode(qdev, wol);
+               netif_err(qdev, drv, qdev->ndev,
+                         "WOL %s (wol code 0x%x) on %s\n",
+                         (status == 0) ? "Successfully set" : "Failed",
+                         wol, qdev->ndev->name);
+       }
+
+       return status;
+}
+
+static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
+{
+
+       /* Don't kill the reset worker thread if we
+        * are in the process of recovery.
+        */
+       if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+               cancel_delayed_work_sync(&qdev->asic_reset_work);
+       cancel_delayed_work_sync(&qdev->mpi_reset_work);
+       cancel_delayed_work_sync(&qdev->mpi_work);
+       cancel_delayed_work_sync(&qdev->mpi_idc_work);
+       cancel_delayed_work_sync(&qdev->mpi_core_to_log);
+       cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+       int i, status = 0;
+
+       ql_link_off(qdev);
+
+       ql_cancel_all_work_sync(qdev);
+
+       for (i = 0; i < qdev->rss_ring_count; i++)
+               napi_disable(&qdev->rx_ring[i].napi);
+
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+       ql_disable_interrupts(qdev);
+
+       ql_tx_ring_clean(qdev);
+
+       /* Call netif_napi_del() from common point.
+        */
+       for (i = 0; i < qdev->rss_ring_count; i++)
+               netif_napi_del(&qdev->rx_ring[i].napi);
+
+       status = ql_adapter_reset(qdev);
+       if (status)
+               netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+                         qdev->func);
+       ql_free_rx_buffers(qdev);
+
+       return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+       int err = 0;
+
+       err = ql_adapter_initialize(qdev);
+       if (err) {
+               netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
+               goto err_init;
+       }
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+       ql_alloc_rx_buffers(qdev);
+       /* If the port is initialized and the
+        * link is up the turn on the carrier.
+        */
+       if ((ql_read32(qdev, STS) & qdev->port_init) &&
+                       (ql_read32(qdev, STS) & qdev->port_link_up))
+               ql_link_on(qdev);
+       /* Restore rx mode. */
+       clear_bit(QL_ALLMULTI, &qdev->flags);
+       clear_bit(QL_PROMISCUOUS, &qdev->flags);
+       qlge_set_multicast_list(qdev->ndev);
+
+       /* Restore vlan setting. */
+       qlge_restore_vlan(qdev);
+
+       ql_enable_interrupts(qdev);
+       ql_enable_all_completion_interrupts(qdev);
+       netif_tx_start_all_queues(qdev->ndev);
+
+       return 0;
+err_init:
+       ql_adapter_reset(qdev);
+       return err;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+       ql_free_mem_resources(qdev);
+       ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+       int status = 0;
+
+       if (ql_alloc_mem_resources(qdev)) {
+               netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
+               return -ENOMEM;
+       }
+       status = ql_request_irq(qdev);
+       return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       /* If we hit pci_channel_io_perm_failure
+        * failure condition, then we already
+        * brought the adapter down.
+        */
+       if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+               netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+               clear_bit(QL_EEH_FATAL, &qdev->flags);
+               return 0;
+       }
+
+       /*
+        * Wait for device to recover from a reset.
+        * (Rarely happens, but possible.)
+        */
+       while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+               msleep(1);
+       ql_adapter_down(qdev);
+       ql_release_adapter_resources(qdev);
+       return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+       int i;
+       struct rx_ring *rx_ring;
+       struct tx_ring *tx_ring;
+       int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+       unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+               LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+       qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+       /* In a perfect world we have one RSS ring for each CPU
+        * and each has it's own vector.  To do that we ask for
+        * cpu_cnt vectors.  ql_enable_msix() will adjust the
+        * vector count to what we actually get.  We then
+        * allocate an RSS ring for each.
+        * Essentially, we are doing min(cpu_count, msix_vector_count).
+        */
+       qdev->intr_count = cpu_cnt;
+       ql_enable_msix(qdev);
+       /* Adjust the RSS ring count to the actual vector count. */
+       qdev->rss_ring_count = qdev->intr_count;
+       qdev->tx_ring_count = cpu_cnt;
+       qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
+
+       for (i = 0; i < qdev->tx_ring_count; i++) {
+               tx_ring = &qdev->tx_ring[i];
+               memset((void *)tx_ring, 0, sizeof(*tx_ring));
+               tx_ring->qdev = qdev;
+               tx_ring->wq_id = i;
+               tx_ring->wq_len = qdev->tx_ring_size;
+               tx_ring->wq_size =
+                   tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+               /*
+                * The completion queue ID for the tx rings start
+                * immediately after the rss rings.
+                */
+               tx_ring->cq_id = qdev->rss_ring_count + i;
+       }
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               memset((void *)rx_ring, 0, sizeof(*rx_ring));
+               rx_ring->qdev = qdev;
+               rx_ring->cq_id = i;
+               rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
+               if (i < qdev->rss_ring_count) {
+                       /*
+                        * Inbound (RSS) queues.
+                        */
+                       rx_ring->cq_len = qdev->rx_ring_size;
+                       rx_ring->cq_size =
+                           rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+                       rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+                       rx_ring->lbq_size =
+                           rx_ring->lbq_len * sizeof(__le64);
+                       rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+                       rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+                       rx_ring->sbq_size =
+                           rx_ring->sbq_len * sizeof(__le64);
+                       rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
+                       rx_ring->type = RX_Q;
+               } else {
+                       /*
+                        * Outbound queue handles outbound completions only.
+                        */
+                       /* outbound cq is same size as tx_ring it services. */
+                       rx_ring->cq_len = qdev->tx_ring_size;
+                       rx_ring->cq_size =
+                           rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+                       rx_ring->lbq_len = 0;
+                       rx_ring->lbq_size = 0;
+                       rx_ring->lbq_buf_size = 0;
+                       rx_ring->sbq_len = 0;
+                       rx_ring->sbq_size = 0;
+                       rx_ring->sbq_buf_size = 0;
+                       rx_ring->type = TX_Q;
+               }
+       }
+       return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+       int err = 0;
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       err = ql_adapter_reset(qdev);
+       if (err)
+               return err;
+
+       err = ql_configure_rings(qdev);
+       if (err)
+               return err;
+
+       err = ql_get_adapter_resources(qdev);
+       if (err)
+               goto error_up;
+
+       err = ql_adapter_up(qdev);
+       if (err)
+               goto error_up;
+
+       return err;
+
+error_up:
+       ql_release_adapter_resources(qdev);
+       return err;
+}
+
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+       struct rx_ring *rx_ring;
+       int i, status;
+       u32 lbq_buf_len;
+
+       /* Wait for an outstanding reset to complete. */
+       if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+               int i = 4;
+
+               while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Waiting for adapter UP...\n");
+                       ssleep(1);
+               }
+
+               if (!i) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Timed out waiting for adapter UP\n");
+                       return -ETIMEDOUT;
+               }
+       }
+
+       status = ql_adapter_down(qdev);
+       if (status)
+               goto error;
+
+       /* Get the new rx buffer size. */
+       lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+               LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+       qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+       for (i = 0; i < qdev->rss_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               /* Set the new size. */
+               rx_ring->lbq_buf_size = lbq_buf_len;
+       }
+
+       status = ql_adapter_up(qdev);
+       if (status)
+               goto error;
+
+       return status;
+error:
+       netif_alert(qdev, ifup, qdev->ndev,
+                   "Driver up/down cycle failed, closing device.\n");
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+       dev_close(qdev->ndev);
+       return status;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status;
+
+       if (ndev->mtu == 1500 && new_mtu == 9000) {
+               netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
+       } else if (ndev->mtu == 9000 && new_mtu == 1500) {
+               netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
+       } else
+               return -EINVAL;
+
+       queue_delayed_work(qdev->workqueue,
+                       &qdev->mpi_port_cfg_work, 3*HZ);
+
+       ndev->mtu = new_mtu;
+
+       if (!netif_running(qdev->ndev)) {
+               return 0;
+       }
+
+       status = ql_change_rx_buffers(qdev);
+       if (status) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Changing MTU failed.\n");
+       }
+
+       return status;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+                                              *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       struct rx_ring *rx_ring = &qdev->rx_ring[0];
+       struct tx_ring *tx_ring = &qdev->tx_ring[0];
+       unsigned long pkts, mcast, dropped, errors, bytes;
+       int i;
+
+       /* Get RX stats. */
+       pkts = mcast = dropped = errors = bytes = 0;
+       for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+                       pkts += rx_ring->rx_packets;
+                       bytes += rx_ring->rx_bytes;
+                       dropped += rx_ring->rx_dropped;
+                       errors += rx_ring->rx_errors;
+                       mcast += rx_ring->rx_multicast;
+       }
+       ndev->stats.rx_packets = pkts;
+       ndev->stats.rx_bytes = bytes;
+       ndev->stats.rx_dropped = dropped;
+       ndev->stats.rx_errors = errors;
+       ndev->stats.multicast = mcast;
+
+       /* Get TX stats. */
+       pkts = errors = bytes = 0;
+       for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
+                       pkts += tx_ring->tx_packets;
+                       bytes += tx_ring->tx_bytes;
+                       errors += tx_ring->tx_errors;
+       }
+       ndev->stats.tx_packets = pkts;
+       ndev->stats.tx_bytes = bytes;
+       ndev->stats.tx_errors = errors;
+       return &ndev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       struct netdev_hw_addr *ha;
+       int i, status;
+
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return;
+       /*
+        * Set or clear promiscuous mode if a
+        * transition is taking place.
+        */
+       if (ndev->flags & IFF_PROMISC) {
+               if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+                       if (ql_set_routing_reg
+                           (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
+                               netif_err(qdev, hw, qdev->ndev,
+                                         "Failed to set promiscuous mode.\n");
+                       } else {
+                               set_bit(QL_PROMISCUOUS, &qdev->flags);
+                       }
+               }
+       } else {
+               if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+                       if (ql_set_routing_reg
+                           (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
+                               netif_err(qdev, hw, qdev->ndev,
+                                         "Failed to clear promiscuous mode.\n");
+                       } else {
+                               clear_bit(QL_PROMISCUOUS, &qdev->flags);
+                       }
+               }
+       }
+
+       /*
+        * Set or clear all multicast mode if a
+        * transition is taking place.
+        */
+       if ((ndev->flags & IFF_ALLMULTI) ||
+           (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
+               if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+                       if (ql_set_routing_reg
+                           (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+                               netif_err(qdev, hw, qdev->ndev,
+                                         "Failed to set all-multi mode.\n");
+                       } else {
+                               set_bit(QL_ALLMULTI, &qdev->flags);
+                       }
+               }
+       } else {
+               if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+                       if (ql_set_routing_reg
+                           (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+                               netif_err(qdev, hw, qdev->ndev,
+                                         "Failed to clear all-multi mode.\n");
+                       } else {
+                               clear_bit(QL_ALLMULTI, &qdev->flags);
+                       }
+               }
+       }
+
+       if (!netdev_mc_empty(ndev)) {
+               status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+               if (status)
+                       goto exit;
+               i = 0;
+               netdev_for_each_mc_addr(ha, ndev) {
+                       if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
+                                               MAC_ADDR_TYPE_MULTI_MAC, i)) {
+                               netif_err(qdev, hw, qdev->ndev,
+                                         "Failed to loadmulticast address.\n");
+                               ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+                               goto exit;
+                       }
+                       i++;
+               }
+               ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+               if (ql_set_routing_reg
+                   (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
+                       netif_err(qdev, hw, qdev->ndev,
+                                 "Failed to set multicast match mode.\n");
+               } else {
+                       set_bit(QL_ALLMULTI, &qdev->flags);
+               }
+       }
+exit:
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       int status;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+       /* Update local copy of current mac address. */
+       memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+       status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+                       MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+       if (status)
+               netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+       return status;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       ql_queue_asic_error(qdev);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+           container_of(work, struct ql_adapter, asic_reset_work.work);
+       int status;
+       rtnl_lock();
+       status = ql_adapter_down(qdev);
+       if (status)
+               goto error;
+
+       status = ql_adapter_up(qdev);
+       if (status)
+               goto error;
+
+       /* Restore rx mode. */
+       clear_bit(QL_ALLMULTI, &qdev->flags);
+       clear_bit(QL_PROMISCUOUS, &qdev->flags);
+       qlge_set_multicast_list(qdev->ndev);
+
+       rtnl_unlock();
+       return;
+error:
+       netif_alert(qdev, ifup, qdev->ndev,
+                   "Driver up/down cycle failed, closing device\n");
+
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+       dev_close(qdev->ndev);
+       rtnl_unlock();
+}
+
+static const struct nic_operations qla8012_nic_ops = {
+       .get_flash              = ql_get_8012_flash_params,
+       .port_initialize        = ql_8012_port_initialize,
+};
+
+static const struct nic_operations qla8000_nic_ops = {
+       .get_flash              = ql_get_8000_flash_params,
+       .port_initialize        = ql_8000_port_initialize,
+};
+
+/* Find the pcie function number for the other NIC
+ * on this chip.  Since both NIC functions share a
+ * common firmware we have the lowest enabled function
+ * do any common work.  Examples would be resetting
+ * after a fatal firmware error, or doing a firmware
+ * coredump.
+ */
+static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+{
+       int status = 0;
+       u32 temp;
+       u32 nic_func1, nic_func2;
+
+       status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+                       &temp);
+       if (status)
+               return status;
+
+       nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
+                       MPI_TEST_NIC_FUNC_MASK);
+       nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
+                       MPI_TEST_NIC_FUNC_MASK);
+
+       if (qdev->func == nic_func1)
+               qdev->alt_func = nic_func2;
+       else if (qdev->func == nic_func2)
+               qdev->alt_func = nic_func1;
+       else
+               status = -EIO;
+
+       return status;
+}
+
+static int ql_get_board_info(struct ql_adapter *qdev)
+{
+       int status;
+       qdev->func =
+           (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+       if (qdev->func > 3)
+               return -EIO;
+
+       status = ql_get_alt_pcie_func(qdev);
+       if (status)
+               return status;
+
+       qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
+       if (qdev->port) {
+               qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+               qdev->port_link_up = STS_PL1;
+               qdev->port_init = STS_PI1;
+               qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+               qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+       } else {
+               qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+               qdev->port_link_up = STS_PL0;
+               qdev->port_init = STS_PI0;
+               qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+               qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+       }
+       qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+       qdev->device_id = qdev->pdev->device;
+       if (qdev->device_id == QLGE_DEVICE_ID_8012)
+               qdev->nic_ops = &qla8012_nic_ops;
+       else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+               qdev->nic_ops = &qla8000_nic_ops;
+       return status;
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (qdev->workqueue) {
+               destroy_workqueue(qdev->workqueue);
+               qdev->workqueue = NULL;
+       }
+
+       if (qdev->reg_base)
+               iounmap(qdev->reg_base);
+       if (qdev->doorbell_area)
+               iounmap(qdev->doorbell_area);
+       vfree(qdev->mpi_coredump);
+       pci_release_regions(pdev);
+}
+
+static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
+                         int cards_found)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int err = 0;
+
+       memset((void *)qdev, 0, sizeof(*qdev));
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "PCI device enable failed.\n");
+               return err;
+       }
+
+       qdev->ndev = ndev;
+       qdev->pdev = pdev;
+       pci_set_drvdata(pdev, ndev);
+
+       /* Set PCIe read request size */
+       err = pcie_set_readrq(pdev, 4096);
+       if (err) {
+               dev_err(&pdev->dev, "Set readrq failed.\n");
+               goto err_out1;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               dev_err(&pdev->dev, "PCI region request failed.\n");
+               return err;
+       }
+
+       pci_set_master(pdev);
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               set_bit(QL_DMA64, &qdev->flags);
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!err)
+                      err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       }
+
+       if (err) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto err_out2;
+       }
+
+       /* Set PCIe reset type for EEH to fundamental. */
+       pdev->needs_freset = 1;
+       pci_save_state(pdev);
+       qdev->reg_base =
+           ioremap_nocache(pci_resource_start(pdev, 1),
+                           pci_resource_len(pdev, 1));
+       if (!qdev->reg_base) {
+               dev_err(&pdev->dev, "Register mapping failed.\n");
+               err = -ENOMEM;
+               goto err_out2;
+       }
+
+       qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+       qdev->doorbell_area =
+           ioremap_nocache(pci_resource_start(pdev, 3),
+                           pci_resource_len(pdev, 3));
+       if (!qdev->doorbell_area) {
+               dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+               err = -ENOMEM;
+               goto err_out2;
+       }
+
+       err = ql_get_board_info(qdev);
+       if (err) {
+               dev_err(&pdev->dev, "Register access failed.\n");
+               err = -EIO;
+               goto err_out2;
+       }
+       qdev->msg_enable = netif_msg_init(debug, default_msg);
+       spin_lock_init(&qdev->hw_lock);
+       spin_lock_init(&qdev->stats_lock);
+
+       if (qlge_mpi_coredump) {
+               qdev->mpi_coredump =
+                       vmalloc(sizeof(struct ql_mpi_coredump));
+               if (qdev->mpi_coredump == NULL) {
+                       err = -ENOMEM;
+                       goto err_out2;
+               }
+               if (qlge_force_coredump)
+                       set_bit(QL_FRC_COREDUMP, &qdev->flags);
+       }
+       /* make sure the EEPROM is good */
+       err = qdev->nic_ops->get_flash(qdev);
+       if (err) {
+               dev_err(&pdev->dev, "Invalid FLASH.\n");
+               goto err_out2;
+       }
+
+       /* Keep local copy of current mac address. */
+       memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+       /* Set up the default ring sizes. */
+       qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+       qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+       /* Set up the coalescing parameters. */
+       qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+       qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+       qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+       qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+       /*
+        * Set up the operating parameters.
+        */
+       qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
+       INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+       INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+       INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+       INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+       INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+       INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
+       init_completion(&qdev->ide_completion);
+       mutex_init(&qdev->mpi_mutex);
+
+       if (!cards_found) {
+               dev_info(&pdev->dev, "%s\n", DRV_STRING);
+               dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+                        DRV_NAME, DRV_VERSION);
+       }
+       return 0;
+err_out2:
+       ql_release_all(pdev);
+err_out1:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static const struct net_device_ops qlge_netdev_ops = {
+       .ndo_open               = qlge_open,
+       .ndo_stop               = qlge_close,
+       .ndo_start_xmit         = qlge_send,
+       .ndo_change_mtu         = qlge_change_mtu,
+       .ndo_get_stats          = qlge_get_stats,
+       .ndo_set_rx_mode        = qlge_set_multicast_list,
+       .ndo_set_mac_address    = qlge_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_tx_timeout         = qlge_tx_timeout,
+       .ndo_fix_features       = qlge_fix_features,
+       .ndo_set_features       = qlge_set_features,
+       .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
+};
+
+static void ql_timer(unsigned long data)
+{
+       struct ql_adapter *qdev = (struct ql_adapter *)data;
+       u32 var = 0;
+
+       var = ql_read32(qdev, STS);
+       if (pci_channel_offline(qdev->pdev)) {
+               netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+               return;
+       }
+
+       mod_timer(&qdev->timer, jiffies + (5*HZ));
+}
+
+static int qlge_probe(struct pci_dev *pdev,
+                     const struct pci_device_id *pci_entry)
+{
+       struct net_device *ndev = NULL;
+       struct ql_adapter *qdev = NULL;
+       static int cards_found = 0;
+       int err = 0;
+
+       ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+                       min(MAX_CPUS, netif_get_num_default_rss_queues()));
+       if (!ndev)
+               return -ENOMEM;
+
+       err = ql_init_device(pdev, ndev, cards_found);
+       if (err < 0) {
+               free_netdev(ndev);
+               return err;
+       }
+
+       qdev = netdev_priv(ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       ndev->hw_features = NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO_ECN |
+                           NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_RXCSUM;
+       ndev->features = ndev->hw_features;
+       ndev->vlan_features = ndev->hw_features;
+       /* vlan gets same features (except vlan filter) */
+       ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+                                NETIF_F_HW_VLAN_CTAG_TX |
+                                NETIF_F_HW_VLAN_CTAG_RX);
+
+       if (test_bit(QL_DMA64, &qdev->flags))
+               ndev->features |= NETIF_F_HIGHDMA;
+
+       /*
+        * Set up net_device structure.
+        */
+       ndev->tx_queue_len = qdev->tx_ring_size;
+       ndev->irq = pdev->irq;
+
+       ndev->netdev_ops = &qlge_netdev_ops;
+       ndev->ethtool_ops = &qlge_ethtool_ops;
+       ndev->watchdog_timeo = 10 * HZ;
+
+       err = register_netdev(ndev);
+       if (err) {
+               dev_err(&pdev->dev, "net device registration failed.\n");
+               ql_release_all(pdev);
+               pci_disable_device(pdev);
+               free_netdev(ndev);
+               return err;
+       }
+       /* Start up the timer to trigger EEH if
+        * the bus goes dead
+        */
+       init_timer_deferrable(&qdev->timer);
+       qdev->timer.data = (unsigned long)qdev;
+       qdev->timer.function = ql_timer;
+       qdev->timer.expires = jiffies + (5*HZ);
+       add_timer(&qdev->timer);
+       ql_link_off(qdev);
+       ql_display_dev_info(ndev);
+       atomic_set(&qdev->lb_count, 0);
+       cards_found++;
+       return 0;
+}
+
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+{
+       return qlge_send(skb, ndev);
+}
+
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+       return ql_clean_inbound_rx_ring(rx_ring, budget);
+}
+
+static void qlge_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       del_timer_sync(&qdev->timer);
+       ql_cancel_all_work_sync(qdev);
+       unregister_netdev(ndev);
+       ql_release_all(pdev);
+       pci_disable_device(pdev);
+       free_netdev(ndev);
+}
+
+/* Clean up resources without touching hardware. */
+static void ql_eeh_close(struct net_device *ndev)
+{
+       int i;
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (netif_carrier_ok(ndev)) {
+               netif_carrier_off(ndev);
+               netif_stop_queue(ndev);
+       }
+
+       /* Disabling the timer */
+       ql_cancel_all_work_sync(qdev);
+
+       for (i = 0; i < qdev->rss_ring_count; i++)
+               netif_napi_del(&qdev->rx_ring[i].napi);
+
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
+       ql_tx_ring_clean(qdev);
+       ql_free_rx_buffers(qdev);
+       ql_release_adapter_resources(qdev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+                                              enum pci_channel_state state)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       switch (state) {
+       case pci_channel_io_normal:
+               return PCI_ERS_RESULT_CAN_RECOVER;
+       case pci_channel_io_frozen:
+               netif_device_detach(ndev);
+               del_timer_sync(&qdev->timer);
+               if (netif_running(ndev))
+                       ql_eeh_close(ndev);
+               pci_disable_device(pdev);
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               dev_err(&pdev->dev,
+                       "%s: pci_channel_io_perm_failure.\n", __func__);
+               del_timer_sync(&qdev->timer);
+               ql_eeh_close(ndev);
+               set_bit(QL_EEH_FATAL, &qdev->flags);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       /* Request a slot reset. */
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       pdev->error_state = pci_channel_io_normal;
+
+       pci_restore_state(pdev);
+       if (pci_enable_device(pdev)) {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Cannot re-enable PCI device after reset.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       pci_set_master(pdev);
+
+       if (ql_adapter_reset(qdev)) {
+               netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+               set_bit(QL_EEH_FATAL, &qdev->flags);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int err = 0;
+
+       if (netif_running(ndev)) {
+               err = qlge_open(ndev);
+               if (err) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Device initialization failed after reset.\n");
+                       return;
+               }
+       } else {
+               netif_err(qdev, ifup, qdev->ndev,
+                         "Device was not running prior to EEH.\n");
+       }
+       mod_timer(&qdev->timer, jiffies + (5*HZ));
+       netif_device_attach(ndev);
+}
+
+static const struct pci_error_handlers qlge_err_handler = {
+       .error_detected = qlge_io_error_detected,
+       .slot_reset = qlge_io_slot_reset,
+       .resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int err;
+
+       netif_device_detach(ndev);
+       del_timer_sync(&qdev->timer);
+
+       if (netif_running(ndev)) {
+               err = ql_adapter_down(qdev);
+               if (!err)
+                       return err;
+       }
+
+       ql_wol(qdev);
+       err = pci_save_state(pdev);
+       if (err)
+               return err;
+
+       pci_disable_device(pdev);
+
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int qlge_resume(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       err = pci_enable_device(pdev);
+       if (err) {
+               netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       if (netif_running(ndev)) {
+               err = ql_adapter_up(qdev);
+               if (err)
+                       return err;
+       }
+
+       mod_timer(&qdev->timer, jiffies + (5*HZ));
+       netif_device_attach(ndev);
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+       qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+       .name = DRV_NAME,
+       .id_table = qlge_pci_tbl,
+       .probe = qlge_probe,
+       .remove = qlge_remove,
+#ifdef CONFIG_PM
+       .suspend = qlge_suspend,
+       .resume = qlge_resume,
+#endif
+       .shutdown = qlge_shutdown,
+       .err_handler = &qlge_err_handler
+};
+
+module_pci_driver(qlge_driver);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
new file mode 100644 (file)
index 0000000..7ad1460
--- /dev/null
@@ -0,0 +1,1284 @@
+#include "qlge.h"
+
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+       u32 tmp;
+
+       /* Un-pause the RISC */
+       tmp = ql_read32(qdev, CSR);
+       if (!(tmp & CSR_RP))
+               return -EIO;
+
+       ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+       return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+       u32 tmp;
+       int count = UDELAY_COUNT;
+
+       /* Pause the RISC */
+       ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+       do {
+               tmp = ql_read32(qdev, CSR);
+               if (tmp & CSR_RP)
+                       break;
+               mdelay(UDELAY_DELAY);
+               count--;
+       } while (count);
+       return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+       u32 tmp;
+       int count = UDELAY_COUNT;
+
+       /* Reset the RISC */
+       ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+       do {
+               tmp = ql_read32(qdev, CSR);
+               if (tmp & CSR_RR) {
+                       ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+                       break;
+               }
+               mdelay(UDELAY_DELAY);
+               count--;
+       } while (count);
+       return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+       int status;
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+       if (status)
+               goto exit;
+       /* set up for reg read */
+       ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+       if (status)
+               goto exit;
+       /* get the data */
+       *data = ql_read32(qdev, PROC_DATA);
+exit:
+       return status;
+}
+
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+       int status = 0;
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+       if (status)
+               goto exit;
+       /* write the data to the data reg */
+       ql_write32(qdev, PROC_DATA, data);
+       /* trigger the write */
+       ql_write32(qdev, PROC_ADDR, reg);
+       /* wait for reg to come ready */
+       status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+       if (status)
+               goto exit;
+exit:
+       return status;
+}
+
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+{
+       int status;
+       status = ql_write_mpi_reg(qdev, 0x00001010, 1);
+       return status;
+}
+
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+       u32 temp;
+
+       /* If we are the lower of the 2 NIC functions
+        * on the chip the we are responsible for
+        * core dump and firmware reset after an error.
+        */
+       if (qdev->func < qdev->alt_func)
+               return 1;
+
+       /* If we are the higher of the 2 NIC functions
+        * on the chip and the lower function is not
+        * enabled, then we are responsible for
+        * core dump and firmware reset after an error.
+        */
+       temp =  ql_read32(qdev, STS);
+       if (!(temp & (1 << (8 + qdev->alt_func))))
+               return 1;
+
+       return 0;
+
+}
+
+static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int i, status;
+
+       status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+       if (status)
+               return -EBUSY;
+       for (i = 0; i < mbcp->out_count; i++) {
+               status =
+                   ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
+                                    &mbcp->mbox_out[i]);
+               if (status) {
+                       netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
+                       break;
+               }
+       }
+       ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+       return status;
+}
+
+/* Wait for a single mailbox command to complete.
+ * Returns zero on success.
+ */
+static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+{
+       int count = 100;
+       u32 value;
+
+       do {
+               value = ql_read32(qdev, STS);
+               if (value & STS_PI)
+                       return 0;
+               mdelay(UDELAY_DELAY); /* 100ms */
+       } while (--count);
+       return -ETIMEDOUT;
+}
+
+/* Execute a single mailbox command.
+ * Caller must hold PROC_ADDR semaphore.
+ */
+static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int i, status;
+
+       /*
+        * Make sure there's nothing pending.
+        * This shouldn't happen.
+        */
+       if (ql_read32(qdev, CSR) & CSR_HRI)
+               return -EIO;
+
+       status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+       if (status)
+               return status;
+
+       /*
+        * Fill the outbound mailboxes.
+        */
+       for (i = 0; i < mbcp->in_count; i++) {
+               status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
+                                               mbcp->mbox_in[i]);
+               if (status)
+                       goto end;
+       }
+       /*
+        * Wake up the MPI firmware.
+        */
+       ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+end:
+       ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+       return status;
+}
+
+/* We are being asked by firmware to accept
+ * a change to the port.  This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode. We wake up a worker
+ * to handler processing this since a mailbox command
+ * will need to be sent to ACK the request.
+ */
+static int ql_idc_req_aen(struct ql_adapter *qdev)
+{
+       int status;
+       struct mbox_params *mbcp = &qdev->idc_mbc;
+
+       netif_err(qdev, drv, qdev->ndev, "Enter!\n");
+       /* Get the status data and start up a thread to
+        * handle the request.
+        */
+       mbcp = &qdev->idc_mbc;
+       mbcp->out_count = 4;
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Could not read MPI, resetting ASIC!\n");
+               ql_queue_asic_error(qdev);
+       } else  {
+               /* Begin polled mode early so
+                * we don't get another interrupt
+                * when we leave mpi_worker.
+                */
+               ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+               queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
+       }
+       return status;
+}
+
+/* Process an inter-device event completion.
+ * If good, signal the caller's completion.
+ */
+static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+{
+       int status;
+       struct mbox_params *mbcp = &qdev->idc_mbc;
+       mbcp->out_count = 4;
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Could not read MPI, resetting RISC!\n");
+               ql_queue_fw_error(qdev);
+       } else
+               /* Wake up the sleeping mpi_idc_work thread that is
+                * waiting for this event.
+                */
+               complete(&qdev->ide_completion);
+
+       return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+       mbcp->out_count = 2;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "%s: Could not get mailbox status.\n", __func__);
+               return;
+       }
+
+       qdev->link_status = mbcp->mbox_out[1];
+       netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
+
+       /* If we're coming back from an IDC event
+        * then set up the CAM and frame routing.
+        */
+       if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
+               status = ql_cam_route_initialize(qdev);
+               if (status) {
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to init CAM/Routing tables.\n");
+                       return;
+               } else
+                       clear_bit(QL_CAM_RT_SET, &qdev->flags);
+       }
+
+       /* Queue up a worker to check the frame
+        * size information, and fix it if it's not
+        * to our liking.
+        */
+       if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
+               netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
+               set_bit(QL_PORT_CFG, &qdev->flags);
+               /* Begin polled mode early so
+                * we don't get another interrupt
+                * when we leave mpi_worker dpc.
+                */
+               ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+               queue_delayed_work(qdev->workqueue,
+                               &qdev->mpi_port_cfg_work, 0);
+       }
+
+       ql_link_on(qdev);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+
+       mbcp->out_count = 3;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status)
+               netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
+
+       ql_link_off(qdev);
+}
+
+static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+
+       mbcp->out_count = 5;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status)
+               netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
+       else
+               netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
+
+       return status;
+}
+
+static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+
+       mbcp->out_count = 1;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status)
+               netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
+       else
+               netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
+
+       return status;
+}
+
+static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+
+       mbcp->out_count = 6;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status)
+               netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
+       else {
+               int i;
+               netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
+               for (i = 0; i < mbcp->out_count; i++)
+                       netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+                                 i, mbcp->mbox_out[i]);
+
+       }
+
+       return status;
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+
+       mbcp->out_count = 2;
+
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
+       } else {
+               netif_err(qdev, drv, qdev->ndev, "Firmware Revision  = 0x%.08x.\n",
+                         mbcp->mbox_out[1]);
+               qdev->fw_rev_id = mbcp->mbox_out[1];
+               status = ql_cam_route_initialize(qdev);
+               if (status)
+                       netif_err(qdev, ifup, qdev->ndev,
+                                 "Failed to init CAM/Routing tables.\n");
+       }
+}
+
+/* Process an async event and clear it unless it's an
+ * error condition.
+ *  This can get called iteratively from the mpi_work thread
+ *  when events arrive via an interrupt.
+ *  It also gets called when a mailbox command is polling for
+ *  it's completion. */
+static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+       int orig_count = mbcp->out_count;
+
+       /* Just get mailbox zero for now. */
+       mbcp->out_count = 1;
+       status = ql_get_mb_sts(qdev, mbcp);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Could not read MPI, resetting ASIC!\n");
+               ql_queue_asic_error(qdev);
+               goto end;
+       }
+
+       switch (mbcp->mbox_out[0]) {
+
+       /* This case is only active when we arrive here
+        * as a result of issuing a mailbox command to
+        * the firmware.
+        */
+       case MB_CMD_STS_INTRMDT:
+       case MB_CMD_STS_GOOD:
+       case MB_CMD_STS_INVLD_CMD:
+       case MB_CMD_STS_XFC_ERR:
+       case MB_CMD_STS_CSUM_ERR:
+       case MB_CMD_STS_ERR:
+       case MB_CMD_STS_PARAM_ERR:
+               /* We can only get mailbox status if we're polling from an
+                * unfinished command.  Get the rest of the status data and
+                * return back to the caller.
+                * We only end up here when we're polling for a mailbox
+                * command completion.
+                */
+               mbcp->out_count = orig_count;
+               status = ql_get_mb_sts(qdev, mbcp);
+               return status;
+
+       /* We are being asked by firmware to accept
+        * a change to the port.  This is only
+        * a change to max frame sizes (Tx/Rx), pause
+        * parameters, or loopback mode.
+        */
+       case AEN_IDC_REQ:
+               status = ql_idc_req_aen(qdev);
+               break;
+
+       /* Process and inbound IDC event.
+        * This will happen when we're trying to
+        * change tx/rx max frame size, change pause
+        * parameters or loopback mode.
+        */
+       case AEN_IDC_CMPLT:
+       case AEN_IDC_EXT:
+               status = ql_idc_cmplt_aen(qdev);
+               break;
+
+       case AEN_LINK_UP:
+               ql_link_up(qdev, mbcp);
+               break;
+
+       case AEN_LINK_DOWN:
+               ql_link_down(qdev, mbcp);
+               break;
+
+       case AEN_FW_INIT_DONE:
+               /* If we're in process on executing the firmware,
+                * then convert the status to normal mailbox status.
+                */
+               if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+                       mbcp->out_count = orig_count;
+                       status = ql_get_mb_sts(qdev, mbcp);
+                       mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
+                       return status;
+               }
+               ql_init_fw_done(qdev, mbcp);
+               break;
+
+       case AEN_AEN_SFP_IN:
+               ql_sfp_in(qdev, mbcp);
+               break;
+
+       case AEN_AEN_SFP_OUT:
+               ql_sfp_out(qdev, mbcp);
+               break;
+
+       /* This event can arrive at boot time or after an
+        * MPI reset if the firmware failed to initialize.
+        */
+       case AEN_FW_INIT_FAIL:
+               /* If we're in process on executing the firmware,
+                * then convert the status to normal mailbox status.
+                */
+               if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+                       mbcp->out_count = orig_count;
+                       status = ql_get_mb_sts(qdev, mbcp);
+                       mbcp->mbox_out[0] = MB_CMD_STS_ERR;
+                       return status;
+               }
+               netif_err(qdev, drv, qdev->ndev,
+                         "Firmware initialization failed.\n");
+               status = -EIO;
+               ql_queue_fw_error(qdev);
+               break;
+
+       case AEN_SYS_ERR:
+               netif_err(qdev, drv, qdev->ndev, "System Error.\n");
+               ql_queue_fw_error(qdev);
+               status = -EIO;
+               break;
+
+       case AEN_AEN_LOST:
+               ql_aen_lost(qdev, mbcp);
+               break;
+
+       case AEN_DCBX_CHG:
+               /* Need to support AEN 8110 */
+               break;
+       default:
+               netif_err(qdev, drv, qdev->ndev,
+                         "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+               /* Clear the MPI firmware status. */
+       }
+end:
+       ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+       /* Restore the original mailbox count to
+        * what the caller asked for.  This can get
+        * changed when a mailbox command is waiting
+        * for a response and an AEN arrives and
+        * is handled.
+        * */
+       mbcp->out_count = orig_count;
+       return status;
+}
+
+/* Execute a single mailbox command.
+ * mbcp is a pointer to an array of u32.  Each
+ * element in the array contains the value for it's
+ * respective mailbox register.
+ */
+static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+       int status;
+       unsigned long count;
+
+       mutex_lock(&qdev->mpi_mutex);
+
+       /* Begin polled mode for MPI */
+       ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+       /* Load the mailbox registers and wake up MPI RISC. */
+       status = ql_exec_mb_cmd(qdev, mbcp);
+       if (status)
+               goto end;
+
+
+       /* If we're generating a system error, then there's nothing
+        * to wait for.
+        */
+       if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
+               goto end;
+
+       /* Wait for the command to complete. We loop
+        * here because some AEN might arrive while
+        * we're waiting for the mailbox command to
+        * complete. If more than 5 seconds expire we can
+        * assume something is wrong. */
+       count = jiffies + HZ * MAILBOX_TIMEOUT;
+       do {
+               /* Wait for the interrupt to come in. */
+               status = ql_wait_mbx_cmd_cmplt(qdev);
+               if (status)
+                       continue;
+
+               /* Process the event.  If it's an AEN, it
+                * will be handled in-line or a worker
+                * will be spawned. If it's our completion
+                * we will catch it below.
+                */
+               status = ql_mpi_handler(qdev, mbcp);
+               if (status)
+                       goto end;
+
+               /* It's either the completion for our mailbox
+                * command complete or an AEN.  If it's our
+                * completion then get out.
+                */
+               if (((mbcp->mbox_out[0] & 0x0000f000) ==
+                                       MB_CMD_STS_GOOD) ||
+                       ((mbcp->mbox_out[0] & 0x0000f000) ==
+                                       MB_CMD_STS_INTRMDT))
+                       goto done;
+       } while (time_before(jiffies, count));
+
+       netif_err(qdev, drv, qdev->ndev,
+                 "Timed out waiting for mailbox complete.\n");
+       status = -ETIMEDOUT;
+       goto end;
+
+done:
+
+       /* Now we can clear the interrupt condition
+        * and look at our status.
+        */
+       ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+       if (((mbcp->mbox_out[0] & 0x0000f000) !=
+                                       MB_CMD_STS_GOOD) &&
+               ((mbcp->mbox_out[0] & 0x0000f000) !=
+                                       MB_CMD_STS_INTRMDT)) {
+               status = -EIO;
+       }
+end:
+       /* End polled mode for MPI */
+       ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+       mutex_unlock(&qdev->mpi_mutex);
+       return status;
+}
+
+/* Get MPI firmware version. This will be used for
+ * driver banner and for ethtool info.
+ * Returns zero on success.
+ */
+int ql_mb_about_fw(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status = 0;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 3;
+
+       mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed about firmware command\n");
+               status = -EIO;
+       }
+
+       /* Store the firmware version */
+       qdev->fw_rev_id = mbcp->mbox_out[1];
+
+       return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_get_fw_state(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status = 0;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 2;
+
+       mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Get Firmware State.\n");
+               status = -EIO;
+       }
+
+       /* If bit zero is set in mbx 1 then the firmware is
+        * running, but not initialized.  This should never
+        * happen.
+        */
+       if (mbcp->mbox_out[1] & 1) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Firmware waiting for initialization.\n");
+               status = -EIO;
+       }
+
+       return status;
+}
+
+/* Send and ACK mailbox command to the firmware to
+ * let it continue with the change.
+ */
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status = 0;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 5;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
+       mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
+       mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
+       mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
+       mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status = 0;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 3;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
+       mbcp->mbox_in[1] = qdev->link_config;
+       mbcp->mbox_in[2] = qdev->max_frame_size;
+
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Port Config sent, wait for IDC.\n");
+       } else  if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Set Port Configuration.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+       u32 size)
+{
+       int status = 0;
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 9;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+       mbcp->mbox_in[1] = LSW(addr);
+       mbcp->mbox_in[2] = MSW(req_dma);
+       mbcp->mbox_in[3] = LSW(req_dma);
+       mbcp->mbox_in[4] = MSW(size);
+       mbcp->mbox_in[5] = LSW(size);
+       mbcp->mbox_in[6] = MSW(MSD(req_dma));
+       mbcp->mbox_in[7] = LSW(MSD(req_dma));
+       mbcp->mbox_in[8] = MSW(addr);
+
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+               u32 ram_addr, int word_count)
+{
+       int status;
+       char *my_buf;
+       dma_addr_t buf_dma;
+
+       my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+                                       &buf_dma);
+       if (!my_buf)
+               return -EIO;
+
+       status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+       if (!status)
+               memcpy(buf, my_buf, word_count * sizeof(u32));
+
+       pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+                               buf_dma);
+       return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status = 0;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 3;
+
+       mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed Get Port Configuration.\n");
+               status = -EIO;
+       } else  {
+               netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+                            "Passed Get Port Configuration.\n");
+               qdev->link_config = mbcp->mbox_out[1];
+               qdev->max_frame_size = mbcp->mbox_out[2];
+       }
+       return status;
+}
+
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 2;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+       mbcp->mbox_in[1] = wol;
+
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+       u8 *addr = qdev->ndev->dev_addr;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 8;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+       if (enable_wol) {
+               mbcp->mbox_in[1] = (u32)addr[0];
+               mbcp->mbox_in[2] = (u32)addr[1];
+               mbcp->mbox_in[3] = (u32)addr[2];
+               mbcp->mbox_in[4] = (u32)addr[3];
+               mbcp->mbox_in[5] = (u32)addr[4];
+               mbcp->mbox_in[6] = (u32)addr[5];
+               mbcp->mbox_in[7] = 0;
+       } else {
+               mbcp->mbox_in[1] = 0;
+               mbcp->mbox_in[2] = 1;
+               mbcp->mbox_in[3] = 1;
+               mbcp->mbox_in[4] = 1;
+               mbcp->mbox_in[5] = 1;
+               mbcp->mbox_in[6] = 1;
+               mbcp->mbox_in[7] = 0;
+       }
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+/* IDC - Inter Device Communication...
+ * Some firmware commands require consent of adjacent FCOE
+ * function.  This function waits for the OK, or a
+ * counter-request for a little more time.i
+ * The firmware will complete the request if the other
+ * function doesn't respond.
+ */
+static int ql_idc_wait(struct ql_adapter *qdev)
+{
+       int status = -ETIMEDOUT;
+       long wait_time = 1 * HZ;
+       struct mbox_params *mbcp = &qdev->idc_mbc;
+       do {
+               /* Wait here for the command to complete
+                * via the IDC process.
+                */
+               wait_time =
+                       wait_for_completion_timeout(&qdev->ide_completion,
+                                                       wait_time);
+               if (!wait_time) {
+                       netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
+                       break;
+               }
+               /* Now examine the response from the IDC process.
+                * We might have a good completion or a request for
+                * more wait time.
+                */
+               if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "IDC Time Extension from function.\n");
+                       wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
+               } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
+                       netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
+                       status = 0;
+                       break;
+               } else {
+                       netif_err(qdev, drv, qdev->ndev,
+                                 "IDC: Invalid State 0x%.04x.\n",
+                                 mbcp->mbox_out[0]);
+                       status = -EIO;
+                       break;
+               }
+       } while (wait_time);
+
+       return status;
+}
+
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 2;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+       mbcp->mbox_in[1] = led_config;
+
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed to set LED Configuration.\n");
+               status = -EIO;
+       }
+
+       return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 2;
+
+       mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed to get LED Configuration.\n");
+               status = -EIO;
+       } else
+               qdev->led_config = mbcp->mbox_out[1];
+
+       return status;
+}
+
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 2;
+
+       mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
+       mbcp->mbox_in[1] = control;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
+               return status;
+
+       if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Command not supported by firmware.\n");
+               status = -EINVAL;
+       } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+               /* This indicates that the firmware is
+                * already in the state we are trying to
+                * change it to.
+                */
+               netif_err(qdev, drv, qdev->ndev,
+                         "Command parameters make no change.\n");
+       }
+       return status;
+}
+
+/* Returns a negative error code or the mailbox command status. */
+static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
+{
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int status;
+
+       memset(mbcp, 0, sizeof(struct mbox_params));
+       *control = 0;
+
+       mbcp->in_count = 1;
+       mbcp->out_count = 1;
+
+       mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
+
+       status = ql_mailbox_command(qdev, mbcp);
+       if (status)
+               return status;
+
+       if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
+               *control = mbcp->mbox_in[1];
+               return status;
+       }
+
+       if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Command not supported by firmware.\n");
+               status = -EINVAL;
+       } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Failed to get MPI traffic control.\n");
+               status = -EIO;
+       }
+       return status;
+}
+
+int ql_wait_fifo_empty(struct ql_adapter *qdev)
+{
+       int count = 5;
+       u32 mgmnt_fifo_empty;
+       u32 nic_fifo_empty;
+
+       do {
+               nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
+               ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
+               mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
+               if (nic_fifo_empty && mgmnt_fifo_empty)
+                       return 0;
+               msleep(100);
+       } while (count-- > 0);
+       return -ETIMEDOUT;
+}
+
+/* API called in work thread context to set new TX/RX
+ * maximum frame size values to match MTU.
+ */
+static int ql_set_port_cfg(struct ql_adapter *qdev)
+{
+       int status;
+       status = ql_mb_set_port_cfg(qdev);
+       if (status)
+               return status;
+       status = ql_idc_wait(qdev);
+       return status;
+}
+
+/* The following routines are worker threads that process
+ * events that may sleep waiting for completion.
+ */
+
+/* This thread gets the maximum TX and RX frame size values
+ * from the firmware and, if necessary, changes them to match
+ * the MTU setting.
+ */
+void ql_mpi_port_cfg_work(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+           container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
+       int status;
+
+       status = ql_mb_get_port_cfg(qdev);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Bug: Failed to get port config data.\n");
+               goto err;
+       }
+
+       if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+                       qdev->max_frame_size ==
+                       CFG_DEFAULT_MAX_FRAME_SIZE)
+               goto end;
+
+       qdev->link_config |=    CFG_JUMBO_FRAME_SIZE;
+       qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
+       status = ql_set_port_cfg(qdev);
+       if (status) {
+               netif_err(qdev, drv, qdev->ndev,
+                         "Bug: Failed to set port config data.\n");
+               goto err;
+       }
+end:
+       clear_bit(QL_PORT_CFG, &qdev->flags);
+       return;
+err:
+       ql_queue_fw_error(qdev);
+       goto end;
+}
+
+/* Process an inter-device request.  This is issues by
+ * the firmware in response to another function requesting
+ * a change to the port. We set a flag to indicate a change
+ * has been made and then send a mailbox command ACKing
+ * the change request.
+ */
+void ql_mpi_idc_work(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+           container_of(work, struct ql_adapter, mpi_idc_work.work);
+       int status;
+       struct mbox_params *mbcp = &qdev->idc_mbc;
+       u32 aen;
+       int timeout;
+
+       aen = mbcp->mbox_out[1] >> 16;
+       timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
+
+       switch (aen) {
+       default:
+               netif_err(qdev, drv, qdev->ndev,
+                         "Bug: Unhandled IDC action.\n");
+               break;
+       case MB_CMD_PORT_RESET:
+       case MB_CMD_STOP_FW:
+               ql_link_off(qdev);
+       case MB_CMD_SET_PORT_CFG:
+               /* Signal the resulting link up AEN
+                * that the frame routing and mac addr
+                * needs to be set.
+                * */
+               set_bit(QL_CAM_RT_SET, &qdev->flags);
+               /* Do ACK if required */
+               if (timeout) {
+                       status = ql_mb_idc_ack(qdev);
+                       if (status)
+                               netif_err(qdev, drv, qdev->ndev,
+                                         "Bug: No pending IDC!\n");
+               } else {
+                       netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+                                    "IDC ACK not required\n");
+                       status = 0; /* success */
+               }
+               break;
+
+       /* These sub-commands issued by another (FCoE)
+        * function are requesting to do an operation
+        * on the shared resource (MPI environment).
+        * We currently don't issue these so we just
+        * ACK the request.
+        */
+       case MB_CMD_IOP_RESTART_MPI:
+       case MB_CMD_IOP_PREP_LINK_DOWN:
+               /* Drop the link, reload the routing
+                * table when link comes up.
+                */
+               ql_link_off(qdev);
+               set_bit(QL_CAM_RT_SET, &qdev->flags);
+               /* Fall through. */
+       case MB_CMD_IOP_DVR_START:
+       case MB_CMD_IOP_FLASH_ACC:
+       case MB_CMD_IOP_CORE_DUMP_MPI:
+       case MB_CMD_IOP_PREP_UPDATE_MPI:
+       case MB_CMD_IOP_COMP_UPDATE_MPI:
+       case MB_CMD_IOP_NONE:   /*  an IDC without params */
+               /* Do ACK if required */
+               if (timeout) {
+                       status = ql_mb_idc_ack(qdev);
+                       if (status)
+                               netif_err(qdev, drv, qdev->ndev,
+                                         "Bug: No pending IDC!\n");
+               } else {
+                       netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+                                    "IDC ACK not required\n");
+                       status = 0; /* success */
+               }
+               break;
+       }
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+           container_of(work, struct ql_adapter, mpi_work.work);
+       struct mbox_params mbc;
+       struct mbox_params *mbcp = &mbc;
+       int err = 0;
+
+       mutex_lock(&qdev->mpi_mutex);
+       /* Begin polled mode for MPI */
+       ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+       while (ql_read32(qdev, STS) & STS_PI) {
+               memset(mbcp, 0, sizeof(struct mbox_params));
+               mbcp->out_count = 1;
+               /* Don't continue if an async event
+                * did not complete properly.
+                */
+               err = ql_mpi_handler(qdev, mbcp);
+               if (err)
+                       break;
+       }
+
+       /* End polled mode for MPI */
+       ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+       mutex_unlock(&qdev->mpi_mutex);
+       ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+       struct ql_adapter *qdev =
+           container_of(work, struct ql_adapter, mpi_reset_work.work);
+       cancel_delayed_work_sync(&qdev->mpi_work);
+       cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+       cancel_delayed_work_sync(&qdev->mpi_idc_work);
+       /* If we're not the dominant NIC function,
+        * then there is nothing to do.
+        */
+       if (!ql_own_firmware(qdev)) {
+               netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+               return;
+       }
+
+       if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
+               netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+               qdev->core_is_dumped = 1;
+               queue_delayed_work(qdev->workqueue,
+                       &qdev->mpi_core_to_log, 5 * HZ);
+       }
+       ql_soft_reset_mpi_risc(qdev);
+}
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
new file mode 100644 (file)
index 0000000..40c0ada
--- /dev/null
@@ -0,0 +1,980 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define CORE_SPQE_PAGE_SIZE_BYTES                       4096
+
+#define X_FINAL_CLEANUP_AGG_INT 1
+#define NUM_OF_GLOBAL_QUEUES                            128
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 0
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
+
+#define FW_MAJOR_VERSION       8
+#define FW_MINOR_VERSION       10
+#define FW_REVISION_VERSION    5
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2       (4)
+#define MAX_NUM_PORTS_BB       (2)
+#define MAX_NUM_PORTS          (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS    (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS    (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB   (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS      (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER    (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2      (208)
+#define MAX_NUM_VPORTS_BB      (160)
+#define MAX_NUM_VPORTS         (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2   (320)
+#define MAX_NUM_L2_QUEUES_BB   (256)
+#define MAX_NUM_L2_QUEUES      (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2  (4)
+#define NUM_OF_PHYS_TCS                (8)
+
+#define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC                  (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO            (8)
+
+#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS           (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES        (8)
+#define NUM_OF_LCIDS           (320)
+#define NUM_OF_LTIDS           (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS  */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY                 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2  0
+#define DQ_XCM_AGG_VAL_SEL_WORD3  1
+#define DQ_XCM_AGG_VAL_SEL_WORD4  2
+#define DQ_XCM_AGG_VAL_SEL_WORD5  3
+#define DQ_XCM_AGG_VAL_SEL_REG3   4
+#define DQ_XCM_AGG_VAL_SEL_REG4   5
+#define DQ_XCM_AGG_VAL_SEL_REG5   6
+#define DQ_XCM_AGG_VAL_SEL_REG6   7
+
+/* XCM agg val selection */
+#define        DQ_XCM_CORE_TX_BD_CONS_CMD      DQ_XCM_AGG_VAL_SEL_WORD3
+#define        DQ_XCM_CORE_TX_BD_PROD_CMD      DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_CORE_SPQ_PROD_CMD        DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_ETH_EDPM_NUM_BDS_CMD     DQ_XCM_AGG_VAL_SEL_WORD2
+#define        DQ_XCM_ETH_TX_BD_CONS_CMD       DQ_XCM_AGG_VAL_SEL_WORD3
+#define        DQ_XCM_ETH_TX_BD_PROD_CMD       DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_ETH_GO_TO_BD_CONS_CMD    DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* UCM agg val selection (HW) */
+#define        DQ_UCM_AGG_VAL_SEL_WORD0        0
+#define        DQ_UCM_AGG_VAL_SEL_WORD1        1
+#define        DQ_UCM_AGG_VAL_SEL_WORD2        2
+#define        DQ_UCM_AGG_VAL_SEL_WORD3        3
+#define        DQ_UCM_AGG_VAL_SEL_REG0 4
+#define        DQ_UCM_AGG_VAL_SEL_REG1 5
+#define        DQ_UCM_AGG_VAL_SEL_REG2 6
+#define        DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD     DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD     DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD                DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD                DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define        DQ_TCM_AGG_VAL_SEL_WORD0        0
+#define        DQ_TCM_AGG_VAL_SEL_WORD1        1
+#define        DQ_TCM_AGG_VAL_SEL_WORD2        2
+#define        DQ_TCM_AGG_VAL_SEL_WORD3        3
+#define        DQ_TCM_AGG_VAL_SEL_REG1         4
+#define        DQ_TCM_AGG_VAL_SEL_REG2         5
+#define        DQ_TCM_AGG_VAL_SEL_REG6         6
+#define        DQ_TCM_AGG_VAL_SEL_REG9         7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD \
+       DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD        \
+       DQ_TCM_AGG_VAL_SEL_WORD0
+
+/* XCM agg counter flag selection */
+#define        DQ_XCM_AGG_FLG_SHIFT_BIT14      0
+#define        DQ_XCM_AGG_FLG_SHIFT_BIT15      1
+#define        DQ_XCM_AGG_FLG_SHIFT_CF12       2
+#define        DQ_XCM_AGG_FLG_SHIFT_CF13       3
+#define        DQ_XCM_AGG_FLG_SHIFT_CF18       4
+#define        DQ_XCM_AGG_FLG_SHIFT_CF19       5
+#define        DQ_XCM_AGG_FLG_SHIFT_CF22       6
+#define        DQ_XCM_AGG_FLG_SHIFT_CF23       7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_CORE_DQ_CF_CMD          (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD      (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD      (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD       (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD       (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD          (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/* UCM agg counter flag selection (HW) */
+#define        DQ_UCM_AGG_FLG_SHIFT_CF0        0
+#define        DQ_UCM_AGG_FLG_SHIFT_CF1        1
+#define        DQ_UCM_AGG_FLG_SHIFT_CF3        2
+#define        DQ_UCM_AGG_FLG_SHIFT_CF4        3
+#define        DQ_UCM_AGG_FLG_SHIFT_CF5        4
+#define        DQ_UCM_AGG_FLG_SHIFT_CF6        5
+#define        DQ_UCM_AGG_FLG_SHIFT_RULE0EN    6
+#define        DQ_UCM_AGG_FLG_SHIFT_RULE1EN    7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD      (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD      (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+#define        DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define        DQ_DPM_WQE_BUFF_SIZE    (320)
+
+/* Conn type ranges */
+#define        DQ_CONN_TYPE_RANGE_SHIFT        (4)
+
+/*****************/
+/* QM CONSTANTS  */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2    512
+#define MAX_QM_TX_QUEUES_BB    448
+#define MAX_QM_TX_QUEUES       MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES    MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE                      4
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE  0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH          16
+#define QM_LINE_CRD_REG_SIGN_BIT       (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH          24
+#define QM_BYTE_CRD_REG_SIGN_BIT       (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH           32
+#define QM_WFQ_CRD_REG_SIGN_BIT                (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH            32
+#define QM_RL_CRD_REG_SIGN_BIT         (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX  0
+#define CAU_FSM_ETH_TX  1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB    12
+
+#define CAU_HC_STOPPED_STATE   3
+#define CAU_HC_DISABLE_STATE   4
+#define CAU_HC_ENABLE_STATE    0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2     (368)
+#define MAX_SB_PER_PATH_BB     (288)
+#define MAX_TOT_SB_PER_PATH \
+       MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD     129
+#define MAX_SB_PER_PF_SIMD     64
+#define MAX_SB_PER_VF          64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE                   0x0000
+
+#define IGU_MEM_MSIX_BASE              0x0000
+#define IGU_MEM_MSIX_UPPER             0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER    0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE          0x0200
+#define IGU_MEM_PBA_MSIX_UPPER         0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER        0x03ff
+
+#define IGU_CMD_INT_ACK_BASE           0x0400
+#define IGU_CMD_INT_ACK_UPPER          (IGU_CMD_INT_ACK_BASE + \
+                                        MAX_TOT_SB_PER_PATH -  \
+                                        1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER     0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER     0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER     0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER          0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER      0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER      0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER         0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE                  0x0600
+#define IGU_CMD_PROD_UPD_UPPER                 (IGU_CMD_PROD_UPD_BASE +\
+                                                MAX_TOT_SB_PER_PATH - \
+                                                1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER                0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS             12
+#define PXP_PER_PF_ENTRY_SIZE          8
+#define PXP_NUM_GLOBAL_WINDOWS         243
+#define PXP_GLOBAL_ENTRY_SIZE          4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH        4
+#define PXP_PF_WINDOW_ADMIN_START      0
+#define PXP_PF_WINDOW_ADMIN_LENGTH     0x1000
+#define PXP_PF_WINDOW_ADMIN_END                (PXP_PF_WINDOW_ADMIN_START + \
+                                        PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START       0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH      (PXP_NUM_PF_WINDOWS * \
+                                                PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+                                        PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START       0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH      (PXP_NUM_GLOBAL_WINDOWS * \
+                                                PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+               (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+                PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR     0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR     0xf4
+#define PXP_PF_ME_OPAQUE_ADDR          0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR                0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START       0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM         PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+        PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM             PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE     0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+
+#define PXP_VF_BAR0_START_IGU                   0
+#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
+#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + \
+                                                PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ                    0x3000
+#define PXP_VF_BAR0_DQ_LENGTH                   0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           (PXP_VF_BAR0_START_DQ +        \
+                                                PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+                                                + 4)
+#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ +        \
+                                                PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B             (PXP_VF_BAR0_START_TSDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B             (PXP_VF_BAR0_START_MSDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B             (PXP_VF_BAR0_START_USDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B             (PXP_VF_BAR0_START_XSDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B             (PXP_VF_BAR0_START_YSDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B             (PXP_VF_BAR0_START_PSDM_ZONE_B \
+                                                +                             \
+                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+                                                - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN         12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER                1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+#define SDM_COMP_TYPE_NONE              0
+#define SDM_COMP_TYPE_WAKE_THREAD       1
+#define SDM_COMP_TYPE_AGG_INT           2
+#define SDM_COMP_TYPE_CM                3
+#define SDM_COMP_TYPE_LOADER            4
+#define SDM_COMP_TYPE_PXP               5
+#define SDM_COMP_TYPE_INDICATE_ERROR    6
+#define SDM_COMP_TYPE_RELEASE_THREAD    7
+#define SDM_COMP_TYPE_RAM               8
+
+/******************/
+/* PBF CONSTANTS  */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+       __le32  cid;
+       __le16  itid;
+       u8      error_code;
+       u8      fw_debug_param;
+};
+
+struct coalescing_timeset {
+       u8 value;
+#define        COALESCING_TIMESET_TIMESET_MASK         0x7F
+#define        COALESCING_TIMESET_TIMESET_SHIFT        0
+#define        COALESCING_TIMESET_VALID_MASK           0x1
+#define        COALESCING_TIMESET_VALID_SHIFT          7
+};
+
+struct common_prs_pf_msg_info {
+       __le32 value;
+#define        COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK     0x1
+#define        COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT    0
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK          0x1
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT         1
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK          0x1
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT         2
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK          0x1
+#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT         3
+#define        COMMON_PRS_PF_MSG_INFO_RESERVED_MASK            0xFFFFFFF
+#define        COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT           4
+};
+
+struct common_queue_zone {
+       __le16 ring_drv_data_consumer;
+       __le16 reserved;
+};
+
+struct eth_rx_prod_data {
+       __le16 bd_prod;
+       __le16 cqe_prod;
+};
+
+struct regpair {
+       __le32  lo;
+       __le32  hi;
+};
+
+struct vf_pf_channel_eqe_data {
+       struct regpair msg_addr;
+};
+
+struct malicious_vf_eqe_data {
+       u8 vf_id;
+       u8 err_id;
+       __le16 reserved[3];
+};
+
+struct initial_cleanup_eqe_data {
+       u8 vf_id;
+       u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+       u8 bytes[8];
+       struct vf_pf_channel_eqe_data vf_pf_channel;
+       struct malicious_vf_eqe_data malicious_vf;
+       struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+       u8                      protocol_id;
+       u8                      opcode;
+       __le16                  reserved0;
+       __le16                  echo;
+       u8                      fw_return_code;
+       u8                      flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+       union event_ring_data   data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+       ERROR_MODE /* Unsupported mode */,
+       MF_OVLAN,
+       MF_NPAR,
+       MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+       PROTOCOLID_ISCSI,
+       PROTOCOLID_RESERVED2,
+       PROTOCOLID_ROCE,
+       PROTOCOLID_CORE,
+       PROTOCOLID_ETH,
+       PROTOCOLID_RESERVED4,
+       PROTOCOLID_RESERVED5,
+       PROTOCOLID_PREROCE,
+       PROTOCOLID_COMMON,
+       PROTOCOLID_RESERVED6,
+       MAX_PROTOCOL_TYPE
+};
+
+struct ustorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset;
+       u8 reserved[3];
+};
+
+struct ustorm_queue_zone {
+       struct ustorm_eth_queue_zone eth;
+       struct common_queue_zone common;
+};
+
+/* status block structure */
+struct cau_pi_entry {
+       u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+       u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+       u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
+};
+
+/* core doorbell data */
+struct core_db_data {
+       u8 params;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT        0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+       DB_AGG_CMD_NOP,
+       DB_AGG_CMD_SET,
+       DB_AGG_CMD_ADD,
+       DB_AGG_CMD_MAX,
+       MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+       DB_DEST_XCM,
+       DB_DEST_UCM,
+       DB_DEST_TCM,
+       DB_NUM_DESTINATIONS,
+       MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+       __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+       IGU_INT_ENABLE  = 0,
+       IGU_INT_DISABLE = 1,
+       IGU_INT_NOP     = 2,
+       IGU_INT_NOP2    = 3,
+       MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+       u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
+       u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+       IGU_SEG_ACCESS_REG      = 0,
+       IGU_SEG_ACCESS_ATTN     = 1,
+       MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+       __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+};
+
+struct pb_context {
+       __le32 crc[4];
+};
+
+struct pxp_concrete_fid {
+       __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
+};
+
+struct pxp_pretend_concrete_fid {
+       __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+};
+
+union pxp_pretend_fid {
+       struct pxp_pretend_concrete_fid concrete_fid;
+       __le16                          opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+       union pxp_pretend_fid   fid;
+       __le16                  control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+       __le32                  offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
+#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+       struct pxp_pretend_cmd  pretend;
+};
+
+/* RSS hash type */
+struct rdif_task_context {
+       __le32 initial_ref_tag;
+       __le16 app_tag_value;
+       __le16 app_tag_mask;
+       u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK            0x1
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT           0
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK      0x1
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT     1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK           0x1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT          2
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK         0x1
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT        3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK          0x3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT         4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK         0x1
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT        7
+       u8 partial_dif_data[7];
+       __le16 partial_crc_value;
+       __le16 partial_checksum_value;
+       __le32 offset_in_io;
+       __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK           0x1
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT          0
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK          0x1
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT         1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK          0x1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT         2
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK            0x1
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT           3
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK           0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT          4
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK           0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT          5
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK            0x7
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT           6
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK           0x3
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT          9
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK           0x1
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT          11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK               0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT              12
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK        0x1
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT       13
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK   0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT  14
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK   0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT  15
+       __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK    0xF
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT   0
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK  0xF
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERRORINIO_MASK               0x1
+#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT              8
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK        0x1
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT       9
+#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK              0xF
+#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT             10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK               0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT              14
+       __le32 reserved2;
+};
+
+enum rss_hash_type {
+       RSS_HASH_TYPE_DEFAULT   = 0,
+       RSS_HASH_TYPE_IPV4      = 1,
+       RSS_HASH_TYPE_TCP_IPV4  = 2,
+       RSS_HASH_TYPE_IPV6      = 3,
+       RSS_HASH_TYPE_TCP_IPV6  = 4,
+       RSS_HASH_TYPE_UDP_IPV4  = 5,
+       RSS_HASH_TYPE_UDP_IPV6  = 6,
+       MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+       __le16  pi_array[PIS_PER_SB];
+       __le32  sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+       __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+};
+
+struct tdif_task_context {
+       __le32 initial_ref_tag;
+       __le16 app_tag_value;
+       __le16 app_tag_mask;
+       __le16 partial_crc_valueB;
+       __le16 partial_checksum_valueB;
+       __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK    0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT   0
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK  0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK               0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT              8
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK         0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT        9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK                0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT               10
+       u8 reserved1;
+       u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK             0x1
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT            0
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK       0x1
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT      1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK            0x1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT           2
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK          0x1
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT         3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK           0x3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT          4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK                0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT               7
+       __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK            0x1
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT           0
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK           0x1
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT          1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK           0x1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT          2
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK             0x1
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT            3
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK            0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT           4
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK            0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT           5
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK             0x7
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT            6
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK            0x3
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT           9
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK            0x1
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT           11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK                0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT               12
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK         0x1
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT        13
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK    0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT   14
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK  0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK               0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT              22
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK        0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT       23
+#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK               0xF
+#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT              24
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK    0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT   28
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK    0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT   29
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK          0x1
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT         30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK                0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT               31
+       __le32 offset_in_iob;
+       __le16 partial_crc_value_a;
+       __le16 partial_checksum_valuea_;
+       __le32 offset_in_ioa;
+       u8 partial_dif_data_a[8];
+       u8 partial_dif_data_b[8];
+};
+
+struct timers_context {
+       __le32 logical_client0;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK     0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT    0
+#define TIMERS_CONTEXT_VALIDLC0_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT             28
+#define TIMERS_CONTEXT_ACTIVELC0_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT            29
+#define TIMERS_CONTEXT_RESERVED0_MASK             0x3
+#define TIMERS_CONTEXT_RESERVED0_SHIFT            30
+       __le32 logical_client1;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK     0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT    0
+#define TIMERS_CONTEXT_VALIDLC1_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT             28
+#define TIMERS_CONTEXT_ACTIVELC1_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT            29
+#define TIMERS_CONTEXT_RESERVED1_MASK             0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT            30
+       __le32 logical_client2;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK     0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT    0
+#define TIMERS_CONTEXT_VALIDLC2_MASK              0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT             28
+#define TIMERS_CONTEXT_ACTIVELC2_MASK             0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT            29
+#define TIMERS_CONTEXT_RESERVED2_MASK             0x3
+#define TIMERS_CONTEXT_RESERVED2_SHIFT            30
+       __le32 host_expiration_fields;
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK  0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK  0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED3_MASK             0x7
+#define TIMERS_CONTEXT_RESERVED3_SHIFT            29
+};
+#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
new file mode 100644 (file)
index 0000000..b5ebc69
--- /dev/null
@@ -0,0 +1,396 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_HSI_VER_MAJOR                   3
+#define ETH_HSI_VER_MINOR                   0
+#define ETH_CACHE_LINE_SIZE                 64
+
+#define ETH_MAX_RAMROD_PER_CON                          8
+#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
+#define ETH_TX_MAX_LSO_HDR_NBD                                          4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                    510
+
+#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+
+/* Maximum number of buffers, used for RX packet placement */
+#define ETH_RX_MAX_BUFF_PER_PKT             5
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS                                     512
+#define ETH_NUM_VLAN_FILTERS                            512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
+#define ETH_MULTICAST_MAC_BINS                          256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+
+/*  ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT                          10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
+#define ETH_RSS_KEY_SIZE_REGS                       10
+#define ETH_RSS_ENGINE_NUM_K2               207
+#define ETH_RSS_ENGINE_NUM_BB               127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM              64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+
+
+struct eth_tx_1st_bd_flags {
+       u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet. */
+struct eth_tx_data_1st_bd {
+       __le16                          vlan;
+       u8                              nbds;
+       struct eth_tx_1st_bd_flags      bd_flags;
+       __le16                          bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK  0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK    0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT   2
+};
+
+/* The parsing information data for the second tx bd of a given packet. */
+struct eth_tx_data_2nd_bd {
+       __le16  tunn_ip_size;
+       __le16  bitfields1;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+       __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+};
+
+struct eth_fast_path_cqe_fw_debug {
+       u8 reserved0;
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/*  tunneling parsing flags */
+struct eth_tunnel_parsing_flags {
+       u8 flags;
+#define        ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK              0x3
+#define        ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT             0
+#define        ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK  0x1
+#define        ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define        ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK     0x3
+#define        ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT    3
+#define        ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK   0x1
+#define        ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT  5
+#define        ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK     0x1
+#define        ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT    6
+#define        ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK      0x1
+#define        ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT     7
+};
+
+/* Regular ETH Rx FP CQE. */
+struct eth_fast_path_rx_reg_cqe {
+       u8 type;
+       u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+       __le16 pkt_len;
+       struct parsing_and_err_flags pars_flags;
+       __le16 vlan_tag;
+       __le32 rss_hash;
+       __le16 len_on_first_bd;
+       u8 placement_offset;
+       struct eth_tunnel_parsing_flags tunnel_pars_flags;
+       u8 bd_num;
+       u8 reserved[7];
+       struct eth_fast_path_cqe_fw_debug fw_debug;
+       u8 reserved1[3];
+       u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK          0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT         0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK   0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT  1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK      0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT     2
+};
+
+/* TPA-continue ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_cont_cqe {
+       u8      type;
+       u8      tpa_agg_index;
+       __le16  len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+       u8      reserved[5];
+       u8      reserved1;
+       __le16  reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+};
+
+/* TPA-end ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_end_cqe {
+       u8      type;
+       u8      tpa_agg_index;
+       __le16  total_packet_len;
+       u8      num_of_bds;
+       u8      end_reason;
+       __le16  num_of_coalesced_segs;
+       __le32  ts_delta;
+       __le16  len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+       u8      reserved1[3];
+       u8      reserved2;
+       __le16  reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+};
+
+/* TPA-start ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_start_cqe {
+       u8      type;
+       u8      bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+       __le16  seg_len;
+       struct parsing_and_err_flags pars_flags;
+       __le16  vlan_tag;
+       __le32  rss_hash;
+       __le16  len_on_first_bd;
+       u8      placement_offset;
+       struct eth_tunnel_parsing_flags tunnel_pars_flags;
+       u8      tpa_agg_index;
+       u8      header_len;
+       __le16  ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+       struct eth_fast_path_cqe_fw_debug fw_debug;
+};
+
+/* The L4 pseudo checksum mode for Ethernet */
+enum eth_l4_pseudo_checksum_mode {
+       ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+       ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+       MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+       struct regpair addr;
+};
+
+/* regular ETH Rx SP CQE */
+struct eth_slow_path_rx_cqe {
+       u8      type;
+       u8      ramrod_cmd_id;
+       u8      error_flag;
+       u8      reserved[25];
+       __le16  echo;
+       u8      reserved1;
+       u8      flags;
+/* for PMD mode - valid indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK         0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT        0
+/* for PMD mode - valid toggle indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK  0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK     0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT    2
+};
+
+/* union for all ETH Rx CQE types */
+union eth_rx_cqe {
+       struct eth_fast_path_rx_reg_cqe         fast_path_regular;
+       struct eth_fast_path_rx_tpa_start_cqe   fast_path_tpa_start;
+       struct eth_fast_path_rx_tpa_cont_cqe    fast_path_tpa_cont;
+       struct eth_fast_path_rx_tpa_end_cqe     fast_path_tpa_end;
+       struct eth_slow_path_rx_cqe             slow_path;
+};
+
+/* ETH Rx CQE type */
+enum eth_rx_cqe_type {
+       ETH_RX_CQE_TYPE_UNUSED,
+       ETH_RX_CQE_TYPE_REGULAR,
+       ETH_RX_CQE_TYPE_SLOW_PATH,
+       ETH_RX_CQE_TYPE_TPA_START,
+       ETH_RX_CQE_TYPE_TPA_CONT,
+       ETH_RX_CQE_TYPE_TPA_END,
+       MAX_ETH_RX_CQE_TYPE
+};
+
+enum eth_rx_tunn_type {
+       ETH_RX_NO_TUNN,
+       ETH_RX_TUNN_GENEVE,
+       ETH_RX_TUNN_GRE,
+       ETH_RX_TUNN_VXLAN,
+       MAX_ETH_RX_TUNN_TYPE
+};
+
+/*  Aggregation end reason. */
+enum eth_tpa_end_reason {
+       ETH_AGG_END_UNUSED,
+       ETH_AGG_END_SP_UPDATE,
+       ETH_AGG_END_MAX_LEN,
+       ETH_AGG_END_LAST_SEG,
+       ETH_AGG_END_TIMEOUT,
+       ETH_AGG_END_NOT_CONSISTENT,
+       ETH_AGG_END_OUT_OF_ORDER,
+       ETH_AGG_END_NON_TPA_SEG,
+       MAX_ETH_TPA_END_REASON
+};
+
+/* The first tx bd of a given packet */
+struct eth_tx_1st_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_1st_bd       data;
+};
+
+/* The second tx bd of a given packet */
+struct eth_tx_2nd_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_2nd_bd       data;
+};
+
+/* The parsing information data for the third tx bd of a given packet. */
+struct eth_tx_data_3rd_bd {
+       __le16  lso_mss;
+       __le16  bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+       u8      tunn_l4_hdr_start_offset_w;
+       u8      tunn_hdr_size_w;
+};
+
+/* The third tx bd of a given packet */
+struct eth_tx_3rd_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_3rd_bd       data;
+};
+
+/* Complementary information for the regular tx bd of a given packet. */
+struct eth_tx_data_bd {
+       __le16  reserved0;
+       __le16  bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK   0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT  8
+#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+       __le16 reserved3;
+};
+
+/* The common non-special TX BD ring element */
+struct eth_tx_bd {
+       struct regpair  addr;
+       __le16          nbytes;
+       struct eth_tx_data_bd   data;
+};
+
+union eth_tx_bd_types {
+       struct eth_tx_1st_bd    first_bd;
+       struct eth_tx_2nd_bd    second_bd;
+       struct eth_tx_3rd_bd    third_bd;
+       struct eth_tx_bd        reg_bd;
+};
+
+/* Mstorm Queue Zone */
+enum eth_tx_tunn_type {
+       ETH_TX_TUNN_GENEVE,
+       ETH_TX_TUNN_TTAG,
+       ETH_TX_TUNN_GRE,
+       ETH_TX_TUNN_VXLAN,
+       MAX_ETH_TX_TUNN_TYPE
+};
+
+/* Ystorm Queue Zone */
+struct xstorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset;
+       u8 reserved[7];
+};
+
+/* ETH doorbell data */
+struct eth_db_data {
+       u8 params;
+#define ETH_DB_DATA_DEST_MASK         0x3
+#define ETH_DB_DATA_DEST_SHIFT        0
+#define ETH_DB_DATA_AGG_CMD_MASK      0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT     2
+#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
+#define ETH_DB_DATA_RESERVED_MASK     0x1
+#define ETH_DB_DATA_RESERVED_SHIFT    5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
new file mode 100644 (file)
index 0000000..b3c0feb
--- /dev/null
@@ -0,0 +1,1439 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ISCSI_COMMON__
+#define __ISCSI_COMMON__
+/**********************/
+/* ISCSI FW CONSTANTS */
+/**********************/
+
+/* iSCSI HSI constants */
+#define ISCSI_DEFAULT_MTU       (1500)
+
+/* Current iSCSI HSI version number composed of two fields (16 bit) */
+#define ISCSI_HSI_MAJOR_VERSION (0)
+#define ISCSI_HSI_MINOR_VERSION (0)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_SLOW_PATH_LAYER_CODE   (6)
+
+/* CQE completion status */
+#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
+#define ISCSI_EQE_RST_CONN_RCVD (0x1)
+
+/* iSCSI parameter defaults */
+#define ISCSI_DEFAULT_HEADER_DIGEST         (0)
+#define ISCSI_DEFAULT_DATA_DIGEST           (0)
+#define ISCSI_DEFAULT_INITIAL_R2T           (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA        (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH        (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH    (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH      (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T   (1)
+
+/* iSCSI parameter limits */
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH        (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH        (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH          (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH          (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
+
+/* iSCSI reserved params */
+#define ISCSI_ITT_ALL_ONES     (0xffffffff)
+#define ISCSI_TTT_ALL_ONES     (0xffffffff)
+
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_NOP_OUT_NO_IMM                     (0)
+#define ISCSI_OPCODE_NOP_OUT                            ( \
+               ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
+#define ISCSI_OPCODE_SCSI_CMD_NO_IMM            (1)
+#define ISCSI_OPCODE_SCSI_CMD                           ( \
+               ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM         (2)
+#define ISCSI_OPCODE_TMF_REQUEST                        ( \
+               ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM       (3)
+#define ISCSI_OPCODE_LOGIN_REQUEST                      ( \
+               ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM        (4)
+#define ISCSI_OPCODE_TEXT_REQUEST                       ( \
+               ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_DATA_OUT                           (5)
+#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM      (6)
+#define ISCSI_OPCODE_LOGOUT_REQUEST                     ( \
+               ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_NOP_IN             (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE      (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE       (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE     (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE      (0x24)
+#define ISCSI_OPCODE_DATA_IN            (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE    (0x26)
+#define ISCSI_OPCODE_R2T                (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG          (0x32)
+#define ISCSI_OPCODE_REJECT             (0x3f)
+
+/* iSCSI stages */
+#define ISCSI_STAGE_SECURITY_NEGOTIATION            (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION   (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE              (3)
+
+/* iSCSI CQE errors */
+#define CQE_ERROR_BITMAP_DATA_DIGEST          (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN  (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED       (0x20)
+
+struct cqe_error_bitmap {
+       u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK         0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT        0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK      0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT     3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK  0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK   0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT  5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK        0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT       6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK            0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT           7
+};
+
+union cqe_error_status {
+       u8 error_status;
+       struct cqe_error_bitmap error_bits;
+};
+
+struct data_hdr {
+       __le32 data[12];
+};
+
+struct iscsi_async_msg_hdr {
+       __le16 reserved0;
+       u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK           0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT          0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK         0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT        7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 all_ones;
+       __le32 reserved1;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le16 param1_rsrv;
+       u8 async_vcode;
+       u8 async_event;
+       __le16 param3_rsrv;
+       __le16 param2_rsrv;
+       __le32 reserved7;
+};
+
+struct iscsi_sge {
+       struct regpair sge_addr;
+       __le16 sge_len;
+       __le16 reserved0;
+       __le32 reserved1;
+};
+
+struct iscsi_cached_sge_ctx {
+       struct iscsi_sge sge;
+       struct regpair reserved;
+       __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_cmd_hdr {
+       __le16 reserved1;
+       u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK           0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT          0
+#define ISCSI_CMD_HDR_RSRV_MASK           0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT          3
+#define ISCSI_CMD_HDR_WRITE_MASK          0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT         5
+#define ISCSI_CMD_HDR_READ_MASK           0x1
+#define ISCSI_CMD_HDR_READ_SHIFT          6
+#define ISCSI_CMD_HDR_FINAL_MASK          0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT         7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 expected_transfer_length;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 cdb[4];
+};
+
+struct iscsi_common_hdr {
+       u8 hdr_status;
+       u8 hdr_response;
+       u8 hdr_flags;
+       u8 hdr_first_byte;
+#define ISCSI_COMMON_HDR_OPCODE_MASK         0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT        0
+#define ISCSI_COMMON_HDR_IMM_MASK            0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT           6
+#define ISCSI_COMMON_HDR_RSRV_MASK           0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT          7
+       __le32 hdr_second_dword;
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+       __le32 lun_reserved[4];
+       __le32 data[6];
+};
+
+struct iscsi_conn_offload_params {
+       struct regpair sq_pbl_addr;
+       struct regpair r2tq_pbl_addr;
+       struct regpair xhq_pbl_addr;
+       struct regpair uhq_pbl_addr;
+       __le32 initial_ack;
+       __le16 physical_q0;
+       __le16 physical_q1;
+       u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK  0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x3F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      2
+       u8 pbl_page_size_log;
+       u8 pbe_page_size_log;
+       u8 default_cq;
+       __le32 stat_sn;
+};
+
+struct iscsi_slow_path_hdr {
+       u8 op_code;
+       u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK   0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT  0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK  0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK   0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT  7
+};
+
+struct iscsi_conn_update_ramrod_params {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK           0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT          0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK           0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT          1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK     0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0xF
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      4
+       u8 reserved0[3];
+       __le32 max_seq_size;
+       __le32 max_send_pdu_length;
+       __le32 max_recv_pdu_length;
+       __le32 first_seq_length;
+       __le32 exp_stat_sn;
+};
+
+struct iscsi_ext_cdb_cmd_hdr {
+       __le16 reserved1;
+       u8 flags_attr;
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK          0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT         0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK          0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT         3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK         0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT        5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK          0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT         6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK         0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT        7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK      0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT     24
+       struct regpair lun;
+       __le32 itt;
+       __le32 expected_transfer_length;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       struct iscsi_sge cdb_sge;
+};
+
+struct iscsi_login_req_hdr {
+       u8 version_min;
+       u8 version_max;
+       u8 flags_attr;
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK            0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT           0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK            0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT           2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK           0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT          4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK              0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT             6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK              0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT             7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+       __le32 isid_TABC;
+       __le16 tsih;
+       __le16 isid_d;
+       __le32 itt;
+       __le16 reserved1;
+       __le16 cid;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 reserved2[4];
+};
+
+struct iscsi_logout_req_hdr {
+       __le16 reserved0;
+       u8 reason_code;
+       u8 opcode;
+       __le32 reserved1;
+       __le32 reserved2[2];
+       __le32 itt;
+       __le16 reserved3;
+       __le16 cid;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 reserved4[4];
+};
+
+struct iscsi_data_out_hdr {
+       __le16 reserved1;
+       u8 flags_attr;
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK   0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT  0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK  0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+       u8 opcode;
+       __le32 reserved2;
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 reserved3;
+       __le32 exp_stat_sn;
+       __le32 reserved4;
+       __le32 data_sn;
+       __le32 buffer_offset;
+       __le32 reserved5;
+};
+
+struct iscsi_data_in_hdr {
+       u8 status_rsvd;
+       u8 reserved1;
+       u8 flags;
+#define ISCSI_DATA_IN_HDR_STATUS_MASK     0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT    0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK  0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK   0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT  2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK       0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT      3
+#define ISCSI_DATA_IN_HDR_ACK_MASK        0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT       6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK      0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT     7
+       u8 opcode;
+       __le32 reserved2;
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 data_sn;
+       __le32 buffer_offset;
+       __le32 residual_count;
+};
+
+struct iscsi_r2t_hdr {
+       u8 reserved0[3];
+       u8 opcode;
+       __le32 reserved2;
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 r2t_sn;
+       __le32 buffer_offset;
+       __le32 desired_data_trns_len;
+};
+
+struct iscsi_nop_out_hdr {
+       __le16 reserved1;
+       u8 flags_attr;
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK    0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT   0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK  0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+       u8 opcode;
+       __le32 reserved2;
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 reserved3;
+       __le32 reserved4;
+       __le32 reserved5;
+       __le32 reserved6;
+};
+
+struct iscsi_nop_in_hdr {
+       __le16 reserved0;
+       u8 flags_attr;
+#define ISCSI_NOP_IN_HDR_RSRV_MASK           0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT          0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK         0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT        7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 reserved5;
+       __le32 reserved6;
+       __le32 reserved7;
+};
+
+struct iscsi_login_response_hdr {
+       u8 version_active;
+       u8 version_max;
+       u8 flags_attr;
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK            0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT           0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK            0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT           2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK           0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT          4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK              0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT             6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK              0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT             7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+       __le32 isid_TABC;
+       __le16 tsih;
+       __le16 isid_d;
+       __le32 itt;
+       __le32 reserved1;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le16 reserved2;
+       u8 status_detail;
+       u8 status_class;
+       __le32 reserved4[2];
+};
+
+struct iscsi_logout_response_hdr {
+       u8 reserved1;
+       u8 response;
+       u8 flags;
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+       __le32 reserved2[2];
+       __le32 itt;
+       __le32 reserved3;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 reserved4;
+       __le16 time2retain;
+       __le16 time2wait;
+       __le32 reserved5[1];
+};
+
+struct iscsi_text_request_hdr {
+       __le16 reserved0;
+       u8 flags_attr;
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK           0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT          0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK              0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT             6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK              0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT             7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 reserved4[4];
+};
+
+struct iscsi_text_response_hdr {
+       __le16 reserved1;
+       u8 flags;
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK           0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT          0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK              0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT             6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK              0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT             7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 ttt;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 reserved4[3];
+};
+
+struct iscsi_tmf_request_hdr {
+       __le16 reserved0;
+       u8 function;
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 rtt;
+       __le32 cmd_sn;
+       __le32 exp_stat_sn;
+       __le32 ref_cmd_sn;
+       __le32 exp_data_sn;
+       __le32 reserved4[2];
+};
+
+struct iscsi_tmf_response_hdr {
+       u8 reserved2;
+       u8 hdr_response;
+       u8 hdr_flags;
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair reserved0;
+       __le32 itt;
+       __le32 rtt;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 reserved4[3];
+};
+
+struct iscsi_response_hdr {
+       u8 hdr_status;
+       u8 hdr_response;
+       u8 hdr_flags;
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair lun;
+       __le32 itt;
+       __le32 snack_tag;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 exp_data_sn;
+       __le32 bi_residual_count;
+       __le32 residual_count;
+};
+
+struct iscsi_reject_hdr {
+       u8 reserved4;
+       u8 hdr_reason;
+       u8 hdr_flags;
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+       struct regpair reserved0;
+       __le32 reserved1;
+       __le32 reserved2;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le32 data_sn;
+       __le32 reserved3[2];
+};
+
+union iscsi_task_hdr {
+       struct iscsi_common_hdr common;
+       struct data_hdr data;
+       struct iscsi_cmd_hdr cmd;
+       struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd;
+       struct iscsi_login_req_hdr login_req;
+       struct iscsi_logout_req_hdr logout_req;
+       struct iscsi_data_out_hdr data_out;
+       struct iscsi_data_in_hdr data_in;
+       struct iscsi_r2t_hdr r2t;
+       struct iscsi_nop_out_hdr nop_out;
+       struct iscsi_nop_in_hdr nop_in;
+       struct iscsi_login_response_hdr login_response;
+       struct iscsi_logout_response_hdr logout_response;
+       struct iscsi_text_request_hdr text_request;
+       struct iscsi_text_response_hdr text_response;
+       struct iscsi_tmf_request_hdr tmf_request;
+       struct iscsi_tmf_response_hdr tmf_response;
+       struct iscsi_response_hdr response;
+       struct iscsi_reject_hdr reject;
+       struct iscsi_async_msg_hdr async_msg;
+};
+
+struct iscsi_cqe_common {
+       __le16 conn_id;
+       u8 cqe_type;
+       union cqe_error_status error_bitmap;
+       __le32 reserved[3];
+       union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_solicited {
+       __le16 conn_id;
+       u8 cqe_type;
+       union cqe_error_status error_bitmap;
+       __le16 itid;
+       u8 task_type;
+       u8 fw_dbg_field;
+       __le32 reserved1[2];
+       union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_unsolicited {
+       __le16 conn_id;
+       u8 cqe_type;
+       union cqe_error_status error_bitmap;
+       __le16 reserved0;
+       u8 reserved1;
+       u8 unsol_cqe_type;
+       struct regpair rqe_opaque;
+       union iscsi_task_hdr iscsi_hdr;
+};
+
+union iscsi_cqe {
+       struct iscsi_cqe_common cqe_common;
+       struct iscsi_cqe_solicited cqe_solicited;
+       struct iscsi_cqe_unsolicited cqe_unsolicited;
+};
+
+enum iscsi_cqes_type {
+       ISCSI_CQE_TYPE_SOLICITED = 1,
+       ISCSI_CQE_TYPE_UNSOLICITED,
+       ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
+          ,
+       ISCSI_CQE_TYPE_TASK_CLEANUP,
+       ISCSI_CQE_TYPE_DUMMY,
+       MAX_ISCSI_CQES_TYPE
+};
+
+enum iscsi_cqe_unsolicited_type {
+       ISCSI_CQE_UNSOLICITED_NONE,
+       ISCSI_CQE_UNSOLICITED_SINGLE,
+       ISCSI_CQE_UNSOLICITED_FIRST,
+       ISCSI_CQE_UNSOLICITED_MIDDLE,
+       ISCSI_CQE_UNSOLICITED_LAST,
+       MAX_ISCSI_CQE_UNSOLICITED_TYPE
+};
+
+struct iscsi_virt_sgl_ctx {
+       struct regpair sgl_base;
+       struct regpair dsgl_base;
+       __le32 sgl_initial_offset;
+       __le32 dsgl_initial_offset;
+       __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_sgl_var_params {
+       u8 sgl_ptr;
+       u8 dsgl_ptr;
+       __le16 sge_offset;
+       __le16 dsge_offset;
+};
+
+struct iscsi_phys_sgl_ctx {
+       struct regpair sgl_base;
+       struct regpair dsgl_base;
+       u8 sgl_size;
+       u8 dsgl_size;
+       __le16 reserved;
+       struct iscsi_sgl_var_params var_params[2];
+};
+
+union iscsi_data_desc_ctx {
+       struct iscsi_virt_sgl_ctx virt_sgl;
+       struct iscsi_phys_sgl_ctx phys_sgl;
+       struct iscsi_cached_sge_ctx cached_sge;
+};
+
+struct iscsi_debug_modes {
+       u8 flags;
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK         0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT        0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK            0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT           1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK              0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT             2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK          0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT         3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK  0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
+#define ISCSI_DEBUG_MODES_RESERVED0_MASK                       0x3
+#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT                      6
+};
+
+struct iscsi_dif_flags {
+       u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK             0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT            4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK          0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT         5
+};
+
+enum iscsi_eqe_opcode {
+       ISCSI_EVENT_TYPE_INIT_FUNC = 0,
+       ISCSI_EVENT_TYPE_DESTROY_FUNC,
+       ISCSI_EVENT_TYPE_OFFLOAD_CONN,
+       ISCSI_EVENT_TYPE_UPDATE_CONN,
+       ISCSI_EVENT_TYPE_CLEAR_SQ,
+       ISCSI_EVENT_TYPE_TERMINATE_CONN,
+       ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
+       ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
+       RESERVED8,
+       RESERVED9,
+       ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
+       ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
+       ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
+       ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
+       ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
+       ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
+       ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
+       ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
+       ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
+       ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
+       ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
+       MAX_ISCSI_EQE_OPCODE
+};
+
+enum iscsi_error_types {
+       ISCSI_STATUS_NONE = 0,
+       ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
+       ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+       ISCSI_CONN_ERROR_TASK_NOT_VALID,
+       ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+       ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+       ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+       ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+       ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+       ISCSI_CONN_ERROR_DATA_OVERRUN,
+       ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+       ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
+       ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+       ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
+       ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+       ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+       ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
+       ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+       ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+       ISCSI_ERROR_UNKNOWN,
+       MAX_ISCSI_ERROR_TYPES
+};
+
+struct iscsi_mflags {
+       u8 mflags;
+#define ISCSI_MFLAGS_SLOW_IO_MASK     0x1
+#define ISCSI_MFLAGS_SLOW_IO_SHIFT    0
+#define ISCSI_MFLAGS_SINGLE_SGE_MASK  0x1
+#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
+#define ISCSI_MFLAGS_RESERVED_MASK    0x3F
+#define ISCSI_MFLAGS_RESERVED_SHIFT   2
+};
+
+struct iscsi_sgl {
+       struct regpair sgl_addr;
+       __le16 updated_sge_size;
+       __le16 updated_sge_offset;
+       __le32 byte_offset;
+};
+
+union iscsi_mstorm_sgl {
+       struct iscsi_sgl sgl_struct;
+       struct iscsi_sge single_sge;
+};
+
+enum iscsi_ramrod_cmd_id {
+       ISCSI_RAMROD_CMD_ID_UNUSED = 0,
+       ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
+       ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+       ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+       ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
+       ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+       ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+       MAX_ISCSI_RAMROD_CMD_ID
+};
+
+struct iscsi_reg1 {
+       __le32 reg1_map;
+#define ISCSI_REG1_NUM_FAST_SGES_MASK  0x7
+#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK      0x1FFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT     3
+};
+
+union iscsi_seq_num {
+       __le16 data_sn;
+       __le16 r2t_sn;
+};
+
+struct iscsi_spe_conn_offload {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       struct iscsi_conn_offload_params iscsi;
+       struct tcp_offload_params tcp;
+};
+
+struct iscsi_spe_conn_offload_option2 {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       struct iscsi_conn_offload_params iscsi;
+       struct tcp_offload_params_opt2 tcp;
+};
+
+struct iscsi_spe_conn_termination {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       u8 abortive;
+       u8 reserved0[7];
+       struct regpair queue_cnts_addr;
+       struct regpair query_params_addr;
+};
+
+struct iscsi_spe_func_dstry {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 reserved0;
+       __le32 reserved1;
+};
+
+struct iscsi_spe_func_init {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 half_way_close_timeout;
+       u8 num_sq_pages_in_ring;
+       u8 num_r2tq_pages_in_ring;
+       u8 num_uhq_pages_in_ring;
+       u8 ll2_rx_queue_id;
+       u8 ooo_enable;
+       struct iscsi_debug_modes debug_mode;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
+       __le32 reserved4;
+       struct scsi_init_func_params func_params;
+       struct scsi_init_func_queues q_params;
+};
+
+struct ystorm_iscsi_task_state {
+       union iscsi_data_desc_ctx sgl_ctx_union;
+       __le32 buffer_offset[2];
+       __le16 bytes_nxt_dif;
+       __le16 rxmit_bytes_nxt_dif;
+       union iscsi_seq_num seq_num_union;
+       u8 dif_bytes_leftover;
+       u8 rxmit_dif_bytes_leftover;
+       __le16 reuse_count;
+       struct iscsi_dif_flags dif_flags;
+       u8 local_comp;
+       __le32 exp_r2t_sn;
+       __le32 sgl_offset[2];
+};
+
+struct ystorm_iscsi_task_st_ctx {
+       struct ystorm_iscsi_task_state state;
+       union iscsi_task_hdr pdu_hdr;
+};
+
+struct ystorm_iscsi_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 word0;
+       u8 flags0;
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK     0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT    0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK        0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT       4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK        0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT       5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK       0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT      6
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK        0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT       7
+       u8 flags1;
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK         0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT        0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK         0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT        2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK       0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT      6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK       0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT      7
+       u8 flags2;
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK        0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT       0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT    1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT    2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT    3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT    4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT    5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT    6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK     0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT    7
+       u8 byte2;
+       __le32 TTT;
+       u8 byte3;
+       u8 byte4;
+       __le16 word1;
+};
+
+struct mstorm_iscsi_task_ag_ctx {
+       u8 cdu_validation;
+       u8 byte1;
+       __le16 task_cid;
+       u8 flags0;
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT               5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK               0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT              6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK   0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT  7
+       u8 flags1;
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK     0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT    0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK                 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT                2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK                 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT                4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK  0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK               0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT              7
+       u8 flags2;
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK               0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT              0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT            1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT            2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT            3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT            4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT            5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT            6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK             0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT            7
+       u8 byte2;
+       __le32 reg0;
+       u8 byte3;
+       u8 byte4;
+       __le16 word1;
+};
+
+struct ustorm_iscsi_task_ag_ctx {
+       u8 reserved;
+       u8 state;
+       __le16 icid;
+       u8 flags0;
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK        0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                   0x1
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                  5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK          0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT         6
+       u8 flags1;
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK              0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT             0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK               0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT              2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK                    0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT                   4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK           0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT          6
+       u8 flags2;
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK       0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT      0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK     0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT    1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK            0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT           2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK                  0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT                 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK        0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK  0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK                0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT               6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK    0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT   7
+       u8 flags3;
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT               0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT               1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT               2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT               3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK         0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
+       __le32 dif_err_intervals;
+       __le32 dif_error_1st_interval;
+       __le32 rcv_cont_len;
+       __le32 exp_cont_len;
+       __le32 total_data_acked;
+       __le32 exp_data_acked;
+       u8 next_tid_valid;
+       u8 byte3;
+       __le16 word1;
+       __le16 next_tid;
+       __le16 word3;
+       __le32 hdr_residual_count;
+       __le32 exp_r2t_sn;
+};
+
+struct mstorm_iscsi_task_st_ctx {
+       union iscsi_mstorm_sgl sgl_union;
+       struct iscsi_dif_flags dif_flags;
+       struct iscsi_mflags flags;
+       u8 sgl_size;
+       u8 host_sge_index;
+       __le16 dix_cur_sge_offset;
+       __le16 dix_cur_sge_size;
+       __le32 data_offset_rtid;
+       u8 dif_offset;
+       u8 dix_sgl_size;
+       u8 dix_sge_index;
+       u8 task_type;
+       struct regpair sense_db;
+       struct regpair dix_sgl_cur_sge;
+       __le32 rem_task_size;
+       __le16 reuse_count;
+       __le16 dif_data_residue;
+       u8 reserved0[4];
+       __le32 reserved1[1];
+};
+
+struct ustorm_iscsi_task_st_ctx {
+       __le32 rem_rcv_len;
+       __le32 exp_data_transfer_len;
+       __le32 exp_data_sn;
+       struct regpair lun;
+       struct iscsi_reg1 reg1;
+       u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK             0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
+       u8 reserved2;
+       __le16 reserved3;
+       __le32 reserved4;
+       __le32 reserved5;
+       __le32 reserved6;
+       __le32 reserved7;
+       u8 task_type;
+       u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK     0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT    0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK  0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK       0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT      2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK             0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT            3
+       u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK             0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT            0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK            0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK   0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT  4
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT       5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT            7
+       u8 cq_rss_number;
+};
+
+struct iscsi_task_context {
+       struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+       struct regpair ystorm_ag_padding[2];
+       struct tdif_task_context tdif_context;
+       struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+       struct regpair mstorm_ag_padding[2];
+       struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+       struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+       struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+       struct rdif_task_context rdif_context;
+};
+
+enum iscsi_task_type {
+       ISCSI_TASK_TYPE_INITIATOR_WRITE,
+       ISCSI_TASK_TYPE_INITIATOR_READ,
+       ISCSI_TASK_TYPE_MIDPATH,
+       ISCSI_TASK_TYPE_UNSOLIC,
+       ISCSI_TASK_TYPE_EXCHCLEANUP,
+       ISCSI_TASK_TYPE_IRRELEVANT,
+       ISCSI_TASK_TYPE_TARGET_WRITE,
+       ISCSI_TASK_TYPE_TARGET_READ,
+       ISCSI_TASK_TYPE_TARGET_RESPONSE,
+       ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+       MAX_ISCSI_TASK_TYPE
+};
+
+union iscsi_ttt_txlen_union {
+       __le32 desired_tx_len;
+       __le32 ttt;
+};
+
+struct iscsi_uhqe {
+       __le32 reg1;
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK     0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT    0
+#define ISCSI_UHQE_LOCAL_COMP_MASK          0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT         20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK          0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT         21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK        0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT       22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK  0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK          0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT         24
+       __le32 reg2;
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK       0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT      0
+#define ISCSI_UHQE_TASK_ID_LO_MASK          0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
+};
+
+struct iscsi_wqe_field {
+       __le32 contlen_cdbsize_field;
+#define ISCSI_WQE_FIELD_CONT_LEN_MASK  0xFFFFFF
+#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_FIELD_CDB_SIZE_MASK  0xFF
+#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
+};
+
+union iscsi_wqe_field_union {
+       struct iscsi_wqe_field cont_field;
+       __le32 prev_tid;
+};
+
+struct iscsi_wqe {
+       __le16 task_id;
+       u8 flags;
+#define ISCSI_WQE_WQE_TYPE_MASK        0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT       0
+#define ISCSI_WQE_NUM_FAST_SGES_MASK   0x7
+#define ISCSI_WQE_NUM_FAST_SGES_SHIFT  3
+#define ISCSI_WQE_PTU_INVALIDATE_MASK  0x1
+#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_RESPONSE_MASK        0x1
+#define ISCSI_WQE_RESPONSE_SHIFT       7
+       struct iscsi_dif_flags prot_flags;
+       union iscsi_wqe_field_union cont_prevtid_union;
+};
+
+enum iscsi_wqe_type {
+       ISCSI_WQE_TYPE_NORMAL,
+       ISCSI_WQE_TYPE_TASK_CLEANUP,
+       ISCSI_WQE_TYPE_MIDDLE_PATH,
+       ISCSI_WQE_TYPE_LOGIN,
+       ISCSI_WQE_TYPE_FIRST_R2T_CONT,
+       ISCSI_WQE_TYPE_NONFIRST_R2T_CONT,
+       ISCSI_WQE_TYPE_RESPONSE,
+       MAX_ISCSI_WQE_TYPE
+};
+
+struct iscsi_xhqe {
+       union iscsi_ttt_txlen_union ttt_or_txlen;
+       __le32 exp_stat_sn;
+       struct iscsi_dif_flags prot_flags;
+       u8 total_ahs_length;
+       u8 opcode;
+       u8 flags;
+#define ISCSI_XHQE_NUM_FAST_SGES_MASK  0x7
+#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_XHQE_FINAL_MASK          0x1
+#define ISCSI_XHQE_FINAL_SHIFT         3
+#define ISCSI_XHQE_SUPER_IO_MASK       0x1
+#define ISCSI_XHQE_SUPER_IO_SHIFT      4
+#define ISCSI_XHQE_STATUS_BIT_MASK     0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT    5
+#define ISCSI_XHQE_RESERVED_MASK       0x3
+#define ISCSI_XHQE_RESERVED_SHIFT      6
+       union iscsi_seq_num seq_num_union;
+       __le16 reserved1;
+};
+
+struct mstorm_iscsi_stats_drv {
+       struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+};
+
+struct ooo_opaque {
+       __le32 cid;
+       u8 drop_isle;
+       u8 drop_size;
+       u8 ooo_opcode;
+       u8 ooo_isle;
+};
+
+struct pstorm_iscsi_stats_drv {
+       struct regpair iscsi_tx_bytes_cnt;
+       struct regpair iscsi_tx_packet_cnt;
+};
+
+struct tstorm_iscsi_stats_drv {
+       struct regpair iscsi_rx_bytes_cnt;
+       struct regpair iscsi_rx_packet_cnt;
+       struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+       __le32 iscsi_cmdq_threshold_cnt;
+       __le32 iscsi_rq_threshold_cnt;
+       __le32 iscsi_immq_threshold_cnt;
+};
+
+struct ustorm_iscsi_stats_drv {
+       struct regpair iscsi_rx_data_pdu_cnt;
+       struct regpair iscsi_rx_r2t_pdu_cnt;
+       struct regpair iscsi_rx_total_pdu_cnt;
+};
+
+struct xstorm_iscsi_stats_drv {
+       struct regpair iscsi_tx_go_to_slow_start_event_cnt;
+       struct regpair iscsi_tx_fast_retransmit_event_cnt;
+};
+
+struct ystorm_iscsi_stats_drv {
+       struct regpair iscsi_tx_data_pdu_cnt;
+       struct regpair iscsi_tx_r2t_pdu_cnt;
+       struct regpair iscsi_tx_total_pdu_cnt;
+};
+
+struct iscsi_db_data {
+       u8 params;
+#define ISCSI_DB_DATA_DEST_MASK         0x3
+#define ISCSI_DB_DATA_DEST_SHIFT        0
+#define ISCSI_DB_DATA_AGG_CMD_MASK      0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT     2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK    0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT   4
+#define ISCSI_DB_DATA_RESERVED_MASK     0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT    5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8 agg_flags;
+       __le16 sq_prod;
+};
+
+struct tstorm_iscsi_task_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       __le16 word0;
+       u8 flags0;
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK  0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT    4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT    5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT    6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT    7
+       u8 flags1;
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT    0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK     0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT    1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT     2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT     4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT     6
+       u8 flags2;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT     0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT     2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT     4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT     6
+       u8 flags3;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK      0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT     0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT   2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT   3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT   4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT   5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT   6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT   7
+       u8 flags4;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT   0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK    0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT   1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK  0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+       u8 byte2;
+       __le16 word1;
+       __le32 reg0;
+       u8 byte3;
+       u8 byte4;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg1;
+       __le32 reg2;
+};
+
+#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
new file mode 100644 (file)
index 0000000..7e441bd
--- /dev/null
@@ -0,0 +1,658 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CHAIN_H
+#define _QED_CHAIN_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x)            cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x)            cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val)  do { \
+                                       (x).hi = DMA_HI_LE((val)); \
+                                       (x).lo = DMA_LO_LE((val)); \
+                               } while (0)
+
+#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair)      ((dma_addr_t)HILO_64_REGPAIR(regpair))
+
+enum qed_chain_mode {
+       /* Each Page contains a next pointer at its end */
+       QED_CHAIN_MODE_NEXT_PTR,
+
+       /* Chain is a single page (next ptr) is unrequired */
+       QED_CHAIN_MODE_SINGLE,
+
+       /* Page pointers are located in a side list */
+       QED_CHAIN_MODE_PBL,
+};
+
+enum qed_chain_use_mode {
+       QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
+       QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
+       QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
+};
+
+enum qed_chain_cnt_type {
+       /* The chain's size/prod/cons are kept in 16-bit variables */
+       QED_CHAIN_CNT_TYPE_U16,
+
+       /* The chain's size/prod/cons are kept in 32-bit variables  */
+       QED_CHAIN_CNT_TYPE_U32,
+};
+
+struct qed_chain_next {
+       struct regpair  next_phys;
+       void            *next_virt;
+};
+
+struct qed_chain_pbl_u16 {
+       u16 prod_page_idx;
+       u16 cons_page_idx;
+};
+
+struct qed_chain_pbl_u32 {
+       u32 prod_page_idx;
+       u32 cons_page_idx;
+};
+
+struct qed_chain_pbl {
+       /* Base address of a pre-allocated buffer for pbl */
+       dma_addr_t      p_phys_table;
+       void            *p_virt_table;
+
+       /* Table for keeping the virtual addresses of the chain pages,
+        * respectively to the physical addresses in the pbl table.
+        */
+       void **pp_virt_addr_tbl;
+
+       /* Index to current used page by producer/consumer */
+       union {
+               struct qed_chain_pbl_u16 pbl16;
+               struct qed_chain_pbl_u32 pbl32;
+       } u;
+};
+
+struct qed_chain_u16 {
+       /* Cyclic index of next element to produce/consme */
+       u16 prod_idx;
+       u16 cons_idx;
+};
+
+struct qed_chain_u32 {
+       /* Cyclic index of next element to produce/consme */
+       u32 prod_idx;
+       u32 cons_idx;
+};
+
+struct qed_chain {
+       void                    *p_virt_addr;
+       dma_addr_t              p_phys_addr;
+       void                    *p_prod_elem;
+       void                    *p_cons_elem;
+
+       enum qed_chain_mode     mode;
+       enum qed_chain_use_mode intended_use; /* used to produce/consume */
+       enum qed_chain_cnt_type cnt_type;
+
+       union {
+               struct qed_chain_u16 chain16;
+               struct qed_chain_u32 chain32;
+       } u;
+
+       u32 page_cnt;
+
+       /* Number of elements - capacity is for usable elements only,
+        * while size will contain total number of elements [for entire chain].
+        */
+       u32 capacity;
+       u32 size;
+
+       /* Elements information for fast calculations */
+       u16                     elem_per_page;
+       u16                     elem_per_page_mask;
+       u16                     elem_unusable;
+       u16                     usable_per_page;
+       u16                     elem_size;
+       u16                     next_page_mask;
+       struct qed_chain_pbl    pbl;
+};
+
+#define QED_CHAIN_PBL_ENTRY_SIZE        (8)
+#define QED_CHAIN_PAGE_SIZE             (0x1000)
+#define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
+       ((mode == QED_CHAIN_MODE_NEXT_PTR) ?         \
+        (1 + ((sizeof(struct qed_chain_next) - 1) / \
+              (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+       ((u32)(ELEMS_PER_PAGE(elem_size) -     \
+              UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+{
+       return p_chain->u.chain16.prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+{
+       return p_chain->u.chain16.cons_idx;
+}
+
+static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+{
+       return p_chain->u.chain32.cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+{
+       u16 used;
+
+       used = (u16) (((u32)0x10000 +
+                      (u32)p_chain->u.chain16.prod_idx) -
+                     (u32)p_chain->u.chain16.cons_idx);
+       if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+       return (u16)(p_chain->capacity - used);
+}
+
+static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+{
+       u32 used;
+
+       used = (u32) (((u64)0x100000000ULL +
+                      (u64)p_chain->u.chain32.prod_idx) -
+                     (u64)p_chain->u.chain32.cons_idx);
+       if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+       return p_chain->capacity - used;
+}
+
+static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+{
+       return p_chain->usable_per_page;
+}
+
+static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+{
+       return p_chain->elem_unusable;
+}
+
+static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+{
+       return p_chain->page_cnt;
+}
+
+static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+{
+       return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief qed_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static inline void
+qed_chain_advance_page(struct qed_chain *p_chain,
+                      void **p_next_elem, void *idx_to_inc, void *page_to_inc)
+
+{
+       struct qed_chain_next *p_next = NULL;
+       u32 page_index = 0;
+       switch (p_chain->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               p_next = *p_next_elem;
+               *p_next_elem = p_next->next_virt;
+               if (is_chain_u16(p_chain))
+                       *(u16 *)idx_to_inc += p_chain->elem_unusable;
+               else
+                       *(u32 *)idx_to_inc += p_chain->elem_unusable;
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               *p_next_elem = p_chain->p_virt_addr;
+               break;
+
+       case QED_CHAIN_MODE_PBL:
+               if (is_chain_u16(p_chain)) {
+                       if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+                               *(u16 *)page_to_inc = 0;
+                       page_index = *(u16 *)page_to_inc;
+               } else {
+                       if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+                               *(u32 *)page_to_inc = 0;
+                       page_index = *(u32 *)page_to_inc;
+               }
+               *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+       }
+}
+
+#define is_unusable_idx(p, idx)        \
+       (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+       (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+#define is_unusable_next_idx(p, idx)                            \
+       ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+        (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx)                        \
+       ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
+        (p)->usable_per_page)
+
+#define test_and_skip(p, idx)                                             \
+       do {                                            \
+               if (is_chain_u16(p)) {                                     \
+                       if (is_unusable_idx(p, idx))                       \
+                               (p)->u.chain16.idx += (p)->elem_unusable;  \
+               } else {                                                   \
+                       if (is_unusable_idx_u32(p, idx))                   \
+                               (p)->u.chain32.idx += (p)->elem_unusable;  \
+               }                                       \
+       } while (0)
+
+/**
+ * @brief qed_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.cons_idx++;
+       else
+               p_chain->u.chain32.cons_idx++;
+       test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_produce(struct qed_chain *p_chain)
+{
+       void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain16.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+                       qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                              p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain16.prod_idx++;
+       } else {
+               if ((p_chain->u.chain32.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain32.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+                       qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                              p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain32.prod_idx++;
+       }
+
+       p_ret = p_chain->p_prod_elem;
+       p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief qed_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
+{
+       return p_chain->capacity;
+}
+
+/**
+ * @brief qed_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
+{
+       test_and_skip(p_chain, prod_idx);
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx++;
+       else
+               p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief qed_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static inline void *qed_chain_consume(struct qed_chain *p_chain)
+{
+       void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain16.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+                       qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                              p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain16.cons_idx++;
+       } else {
+               if ((p_chain->u.chain32.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain32.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+               qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                              p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain32.cons_idx++;
+       }
+
+       p_ret = p_chain->p_cons_elem;
+       p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief qed_chain_reset - Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static inline void qed_chain_reset(struct qed_chain *p_chain)
+{
+       u32 i;
+
+       if (is_chain_u16(p_chain)) {
+               p_chain->u.chain16.prod_idx = 0;
+               p_chain->u.chain16.cons_idx = 0;
+       } else {
+               p_chain->u.chain32.prod_idx = 0;
+               p_chain->u.chain32.cons_idx = 0;
+       }
+       p_chain->p_cons_elem = p_chain->p_virt_addr;
+       p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+       if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+               /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+                * indices, to avoid unnecessary page advancing on the first
+                * call to qed_chain_produce/consume. Instead, the indices
+                * will be advanced to page_cnt and then will be wrapped to 0.
+                */
+               u32 reset_val = p_chain->page_cnt - 1;
+
+               if (is_chain_u16(p_chain)) {
+                       p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
+                       p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+               } else {
+                       p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+                       p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+               }
+       }
+
+       switch (p_chain->intended_use) {
+       case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+       case QED_CHAIN_USE_TO_PRODUCE:
+               /* Do nothing */
+               break;
+
+       case QED_CHAIN_USE_TO_CONSUME:
+               /* produce empty elements */
+               for (i = 0; i < p_chain->capacity; i++)
+                       qed_chain_recycle_consumed(p_chain);
+               break;
+       }
+}
+
+/**
+ * @brief qed_chain_init - Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param p_virt_addr
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ * @param page_cnt     number of pages in the allocated buffer
+ * @param elem_size    size of each element in the chain
+ * @param intended_use
+ * @param mode
+ */
+static inline void qed_chain_init_params(struct qed_chain *p_chain,
+                                        u32 page_cnt,
+                                        u8 elem_size,
+                                        enum qed_chain_use_mode intended_use,
+                                        enum qed_chain_mode mode,
+                                        enum qed_chain_cnt_type cnt_type)
+{
+       /* chain fixed parameters */
+       p_chain->p_virt_addr = NULL;
+       p_chain->p_phys_addr = 0;
+       p_chain->elem_size      = elem_size;
+       p_chain->intended_use = intended_use;
+       p_chain->mode           = mode;
+       p_chain->cnt_type = cnt_type;
+
+       p_chain->elem_per_page          = ELEMS_PER_PAGE(elem_size);
+       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->elem_per_page_mask     = p_chain->elem_per_page - 1;
+       p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->next_page_mask = (p_chain->usable_per_page &
+                                  p_chain->elem_per_page_mask);
+
+       p_chain->page_cnt = page_cnt;
+       p_chain->capacity = p_chain->usable_per_page * page_cnt;
+       p_chain->size = p_chain->elem_per_page * page_cnt;
+
+       p_chain->pbl.p_phys_table = 0;
+       p_chain->pbl.p_virt_table = NULL;
+       p_chain->pbl.pp_virt_addr_tbl = NULL;
+}
+
+/**
+ * @brief qed_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr  virtual address of allocated buffer's beginning
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ *
+ */
+static inline void qed_chain_init_mem(struct qed_chain *p_chain,
+                                     void *p_virt_addr, dma_addr_t p_phys_addr)
+{
+       p_chain->p_virt_addr = p_virt_addr;
+       p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief qed_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl   pointer to a pre allocated side table which will hold
+ *                      virtual page addresses.
+ * @param p_phys_pbl   pointer to a pre-allocated side table which will hold
+ *                      physical page addresses.
+ * @param pp_virt_addr_tbl
+ *                      pointer to a pre-allocated side table which will hold
+ *                      the virtual addresses of the chain pages.
+ *
+ */
+static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
+                                         void *p_virt_pbl,
+                                         dma_addr_t p_phys_pbl,
+                                         void **pp_virt_addr_tbl)
+{
+       p_chain->pbl.p_phys_table = p_phys_pbl;
+       p_chain->pbl.p_virt_table = p_virt_pbl;
+       p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief qed_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr  virtual address of a chain page of which the next
+ *                      pointer element is initialized
+ * @param p_virt_next  virtual address of the next chain page
+ * @param p_phys_next  physical address of the next chain page
+ *
+ */
+static inline void
+qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
+                            void *p_virt_curr,
+                            void *p_virt_next, dma_addr_t p_phys_next)
+{
+       struct qed_chain_next *p_next;
+       u32 size;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+       p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
+
+       DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+       p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief qed_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
+{
+       struct qed_chain_next *p_next = NULL;
+       void *p_virt_addr = NULL;
+       u32 size, last_page_idx;
+
+       if (!p_chain->p_virt_addr)
+               goto out;
+
+       switch (p_chain->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               size = p_chain->elem_size * p_chain->usable_per_page;
+               p_virt_addr = p_chain->p_virt_addr;
+               p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
+               while (p_next->next_virt != p_chain->p_virt_addr) {
+                       p_virt_addr = p_next->next_virt;
+                       p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+                                                          size);
+               }
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               p_virt_addr = p_chain->p_virt_addr;
+               break;
+       case QED_CHAIN_MODE_PBL:
+               last_page_idx = p_chain->page_cnt - 1;
+               p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+               break;
+       }
+       /* p_virt_addr points at this stage to the last page of the chain */
+       size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+       p_virt_addr = (u8 *)p_virt_addr + size;
+out:
+       return p_virt_addr;
+}
+
+/**
+ * @brief qed_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+                                     u32 prod_idx, void *p_prod_elem)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx = (u16) prod_idx;
+       else
+               p_chain->u.chain32.prod_idx = prod_idx;
+       p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
+{
+       u32 i, page_cnt;
+
+       if (p_chain->mode != QED_CHAIN_MODE_PBL)
+               return;
+
+       page_cnt = qed_chain_get_page_cnt(p_chain);
+
+       for (i = 0; i < page_cnt; i++)
+               memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+                      QED_CHAIN_PAGE_SIZE);
+}
+
+#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
new file mode 100644 (file)
index 0000000..4475a9d
--- /dev/null
@@ -0,0 +1,257 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ETH_IF_H
+#define _QED_ETH_IF_H
+
+#include <linux/list.h>
+#include <linux/if_link.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_iov_if.h>
+
+struct qed_dev_eth_info {
+       struct qed_dev_info common;
+
+       u8      num_queues;
+       u8      num_tc;
+
+       u8      port_mac[ETH_ALEN];
+       u8      num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+       u16     rss_ind_table[128];
+       u32     rss_key[10];
+       u8      rss_caps;
+};
+
+struct qed_update_vport_params {
+       u8 vport_id;
+       u8 update_vport_active_flg;
+       u8 vport_active_flg;
+       u8 update_tx_switching_flg;
+       u8 tx_switching_flg;
+       u8 update_accept_any_vlan_flg;
+       u8 accept_any_vlan;
+       u8 update_rss_flg;
+       struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_start_vport_params {
+       bool remove_inner_vlan;
+       bool gro_enable;
+       bool drop_ttl0;
+       u8 vport_id;
+       u16 mtu;
+       bool clear_stats;
+};
+
+struct qed_stop_rxq_params {
+       u8 rss_id;
+       u8 rx_queue_id;
+       u8 vport_id;
+       bool eq_completion_only;
+};
+
+struct qed_stop_txq_params {
+       u8 rss_id;
+       u8 tx_queue_id;
+};
+
+enum qed_filter_rx_mode_type {
+       QED_FILTER_RX_MODE_TYPE_REGULAR,
+       QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+       QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+       QED_FILTER_XCAST_TYPE_ADD,
+       QED_FILTER_XCAST_TYPE_DEL,
+       QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+struct qed_filter_ucast_params {
+       enum qed_filter_xcast_params_type type;
+       u8 vlan_valid;
+       u16 vlan;
+       u8 mac_valid;
+       unsigned char mac[ETH_ALEN];
+};
+
+struct qed_filter_mcast_params {
+       enum qed_filter_xcast_params_type type;
+       u8 num;
+       unsigned char mac[64][ETH_ALEN];
+};
+
+union qed_filter_type_params {
+       enum qed_filter_rx_mode_type accept_flags;
+       struct qed_filter_ucast_params ucast;
+       struct qed_filter_mcast_params mcast;
+};
+
+enum qed_filter_type {
+       QED_FILTER_TYPE_UCAST,
+       QED_FILTER_TYPE_MCAST,
+       QED_FILTER_TYPE_RX_MODE,
+       QED_MAX_FILTER_TYPES,
+};
+
+struct qed_filter_params {
+       enum qed_filter_type type;
+       union qed_filter_type_params filter;
+};
+
+struct qed_queue_start_common_params {
+       u8 rss_id;
+       u8 queue_id;
+       u8 vport_id;
+       u16 sb;
+       u16 sb_idx;
+       u16 vf_qid;
+};
+
+struct qed_tunn_params {
+       u16 vxlan_port;
+       u8 update_vxlan_port;
+       u16 geneve_port;
+       u8 update_geneve_port;
+};
+
+struct qed_eth_cb_ops {
+       struct qed_common_cb_ops common;
+       void (*force_mac) (void *dev, u8 *mac);
+};
+
+#ifdef CONFIG_DCB
+/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
+ * of dcbnl_rtnl_ops structure.
+ */
+struct qed_eth_dcbnl_ops {
+       /* IEEE 802.1Qaz std */
+       int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+       int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+       int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+       int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets);
+       int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+       int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+       int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app);
+       int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app);
+
+       /* CEE std */
+       u8 (*getstate)(struct qed_dev *cdev);
+       u8 (*setstate)(struct qed_dev *cdev, u8 state);
+       void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+                            u8 *pgid, u8 *bw_pct, u8 *up_map);
+       void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+       void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+                            u8 *pgid, u8 *bw_pct, u8 *up_map);
+       void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+       void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting);
+       void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting);
+       u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap);
+       int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num);
+       u8 (*getpfcstate)(struct qed_dev *cdev);
+       int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id);
+       u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags);
+
+       /* DCBX configuration */
+       u8 (*getdcbx)(struct qed_dev *cdev);
+       void (*setpgtccfgtx)(struct qed_dev *cdev, int prio,
+                            u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+       void (*setpgtccfgrx)(struct qed_dev *cdev, int prio,
+                            u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+       void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+       void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+       u8 (*setall)(struct qed_dev *cdev);
+       int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num);
+       void (*setpfcstate)(struct qed_dev *cdev, u8 state);
+       int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up);
+       u8 (*setdcbx)(struct qed_dev *cdev, u8 state);
+       u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags);
+
+       /* Peer apps */
+       int (*peer_getappinfo)(struct qed_dev *cdev,
+                              struct dcb_peer_app_info *info,
+                              u16 *app_count);
+       int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
+
+       /* CEE peer */
+       int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc);
+       int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
+};
+#endif
+
+struct qed_eth_ops {
+       const struct qed_common_ops *common;
+#ifdef CONFIG_QED_SRIOV
+       const struct qed_iov_hv_ops *iov;
+#endif
+#ifdef CONFIG_DCB
+       const struct qed_eth_dcbnl_ops *dcb;
+#endif
+
+       int (*fill_dev_info)(struct qed_dev *cdev,
+                            struct qed_dev_eth_info *info);
+
+       void (*register_ops)(struct qed_dev *cdev,
+                            struct qed_eth_cb_ops *ops,
+                            void *cookie);
+
+        bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
+
+       int (*vport_start)(struct qed_dev *cdev,
+                          struct qed_start_vport_params *params);
+
+       int (*vport_stop)(struct qed_dev *cdev,
+                         u8 vport_id);
+
+       int (*vport_update)(struct qed_dev *cdev,
+                           struct qed_update_vport_params *params);
+
+       int (*q_rx_start)(struct qed_dev *cdev,
+                         struct qed_queue_start_common_params *params,
+                         u16 bd_max_bytes,
+                         dma_addr_t bd_chain_phys_addr,
+                         dma_addr_t cqe_pbl_addr,
+                         u16 cqe_pbl_size,
+                         void __iomem **pp_prod);
+
+       int (*q_rx_stop)(struct qed_dev *cdev,
+                        struct qed_stop_rxq_params *params);
+
+       int (*q_tx_start)(struct qed_dev *cdev,
+                         struct qed_queue_start_common_params *params,
+                         dma_addr_t pbl_addr,
+                         u16 pbl_size,
+                         void __iomem **pp_doorbell);
+
+       int (*q_tx_stop)(struct qed_dev *cdev,
+                        struct qed_stop_txq_params *params);
+
+       int (*filter_config)(struct qed_dev *cdev,
+                            struct qed_filter_params *params);
+
+       int (*fastpath_stop)(struct qed_dev *cdev);
+
+       int (*eth_cqe_completion)(struct qed_dev *cdev,
+                                 u8 rss_id,
+                                 struct eth_slow_path_rx_cqe *cqe);
+
+       void (*get_vport_stats)(struct qed_dev *cdev,
+                               struct qed_eth_stats *stats);
+
+       int (*tunn_config)(struct qed_dev *cdev,
+                          struct qed_tunn_params *params);
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+void qed_put_eth_ops(void);
+
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
new file mode 100644 (file)
index 0000000..d6c4177
--- /dev/null
@@ -0,0 +1,790 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IF_H
+#define _QED_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_chain.h>
+
+enum dcbx_protocol_type {
+       DCBX_PROTOCOL_ISCSI,
+       DCBX_PROTOCOL_FCOE,
+       DCBX_PROTOCOL_ROCE,
+       DCBX_PROTOCOL_ROCE_V2,
+       DCBX_PROTOCOL_ETH,
+       DCBX_MAX_PROTOCOL_TYPE
+};
+
+#ifdef CONFIG_DCB
+#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
+#define QED_LLDP_PORT_ID_STAT_LEN 4
+#define QED_DCBX_MAX_APP_PROTOCOL 32
+#define QED_MAX_PFC_PRIORITIES 8
+#define QED_DCBX_DSCP_SIZE 64
+
+struct qed_dcbx_lldp_remote {
+       u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+       u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+       bool enable_rx;
+       bool enable_tx;
+       u32 tx_interval;
+       u32 max_credit;
+};
+
+struct qed_dcbx_lldp_local {
+       u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+       u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+};
+
+struct qed_dcbx_app_prio {
+       u8 roce;
+       u8 roce_v2;
+       u8 fcoe;
+       u8 iscsi;
+       u8 eth;
+};
+
+struct qed_dbcx_pfc_params {
+       bool willing;
+       bool enabled;
+       u8 prio[QED_MAX_PFC_PRIORITIES];
+       u8 max_tc;
+};
+
+enum qed_dcbx_sf_ieee_type {
+       QED_DCBX_SF_IEEE_ETHTYPE,
+       QED_DCBX_SF_IEEE_TCP_PORT,
+       QED_DCBX_SF_IEEE_UDP_PORT,
+       QED_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
+struct qed_app_entry {
+       bool ethtype;
+       enum qed_dcbx_sf_ieee_type sf_ieee;
+       bool enabled;
+       u8 prio;
+       u16 proto_id;
+       enum dcbx_protocol_type proto_type;
+};
+
+struct qed_dcbx_params {
+       struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
+       u16 num_app_entries;
+       bool app_willing;
+       bool app_valid;
+       bool app_error;
+       bool ets_willing;
+       bool ets_enabled;
+       bool ets_cbs;
+       bool valid;
+       u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
+       u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
+       u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
+       struct qed_dbcx_pfc_params pfc;
+       u8 max_ets_tc;
+};
+
+struct qed_dcbx_admin_params {
+       struct qed_dcbx_params params;
+       bool valid;
+};
+
+struct qed_dcbx_remote_params {
+       struct qed_dcbx_params params;
+       bool valid;
+};
+
+struct qed_dcbx_operational_params {
+       struct qed_dcbx_app_prio app_prio;
+       struct qed_dcbx_params params;
+       bool valid;
+       bool enabled;
+       bool ieee;
+       bool cee;
+       u32 err;
+};
+
+struct qed_dcbx_get {
+       struct qed_dcbx_operational_params operational;
+       struct qed_dcbx_lldp_remote lldp_remote;
+       struct qed_dcbx_lldp_local lldp_local;
+       struct qed_dcbx_remote_params remote;
+       struct qed_dcbx_admin_params local;
+};
+#endif
+
+enum qed_led_mode {
+       QED_LED_MODE_OFF,
+       QED_LED_MODE_ON,
+       QED_LED_MODE_RESTORE
+};
+
+#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+                                           (void __iomem *)(reg_addr))
+
+#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0xFF
+
+/* forward */
+struct qed_dev;
+
+struct qed_eth_pf_params {
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+};
+
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct qed_iscsi_pf_params {
+       u64 glbl_q_params_addr;
+       u64 bdq_pbl_base_addr[2];
+       u32 max_cwnd;
+       u16 cq_num_entries;
+       u16 cmdq_num_entries;
+       u16 dup_ack_threshold;
+       u16 tx_sws_timer;
+       u16 min_rto;
+       u16 min_rto_rt;
+       u16 max_rto;
+
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+       u16 num_tasks;
+
+       /* The following parameters are used during protocol-init */
+       u16 half_way_close_timeout;
+       u16 bdq_xoff_threshold[2];
+       u16 bdq_xon_threshold[2];
+       u16 cmdq_xoff_threshold;
+       u16 cmdq_xon_threshold;
+       u16 rq_buffer_size;
+
+       u8 num_sq_pages_in_ring;
+       u8 num_r2tq_pages_in_ring;
+       u8 num_uhq_pages_in_ring;
+       u8 num_queues;
+       u8 log_page_size;
+       u8 rqe_log_size;
+       u8 max_fin_rt;
+       u8 gl_rq_pi;
+       u8 gl_cmd_pi;
+       u8 debug_mode;
+       u8 ll2_ooo_queue_id;
+       u8 ooo_enable;
+
+       u8 is_target;
+       u8 bdq_pbl_num_entries[2];
+};
+
+struct qed_rdma_pf_params {
+       /* Supplied to QED during resource allocation (may affect the ILT and
+        * the doorbell BAR).
+        */
+       u32 min_dpis;           /* number of requested DPIs */
+       u32 num_mrs;            /* number of requested memory regions */
+       u32 num_qps;            /* number of requested Queue Pairs */
+       u32 num_srqs;           /* number of requested SRQ */
+       u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
+       u8 gl_pi;               /* protocol index */
+
+       /* Will allocate rate limiters to be used with QPs */
+       u8 enable_dcqcn;
+};
+
+struct qed_pf_params {
+       struct qed_eth_pf_params eth_pf_params;
+       struct qed_iscsi_pf_params iscsi_pf_params;
+       struct qed_rdma_pf_params rdma_pf_params;
+};
+
+enum qed_int_mode {
+       QED_INT_MODE_INTA,
+       QED_INT_MODE_MSIX,
+       QED_INT_MODE_MSI,
+       QED_INT_MODE_POLL,
+};
+
+struct qed_sb_info {
+       struct status_block     *sb_virt;
+       dma_addr_t              sb_phys;
+       u32                     sb_ack; /* Last given ack */
+       u16                     igu_sb_id;
+       void __iomem            *igu_addr;
+       u8                      flags;
+#define QED_SB_INFO_INIT        0x1
+#define QED_SB_INFO_SETUP       0x2
+
+       struct qed_dev          *cdev;
+};
+
+struct qed_dev_info {
+       unsigned long   pci_mem_start;
+       unsigned long   pci_mem_end;
+       unsigned int    pci_irq;
+       u8              num_hwfns;
+
+       u8              hw_mac[ETH_ALEN];
+       bool            is_mf_default;
+
+       /* FW version */
+       u16             fw_major;
+       u16             fw_minor;
+       u16             fw_rev;
+       u16             fw_eng;
+
+       /* MFW version */
+       u32             mfw_rev;
+
+       bool rdma_supported;
+
+       u32             flash_size;
+       u8              mf_mode;
+       bool            tx_switching;
+};
+
+enum qed_sb_type {
+       QED_SB_TYPE_L2_QUEUE,
+};
+
+enum qed_protocol {
+       QED_PROTOCOL_ETH,
+       QED_PROTOCOL_ISCSI,
+};
+
+struct qed_link_params {
+       bool    link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
+       u32     override_flags;
+       bool    autoneg;
+       u32     adv_speeds;
+       u32     forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
+       u32     pause_config;
+#define QED_LINK_LOOPBACK_NONE                  BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
+#define QED_LINK_LOOPBACK_EXT                   BIT(3)
+#define QED_LINK_LOOPBACK_MAC                   BIT(4)
+       u32     loopback_mode;
+};
+
+struct qed_link_output {
+       bool    link_up;
+
+       u32     supported_caps;         /* In SUPPORTED defs */
+       u32     advertised_caps;        /* In ADVERTISED defs */
+       u32     lp_caps;                /* In ADVERTISED defs */
+       u32     speed;                  /* In Mb/s */
+       u8      duplex;                 /* In DUPLEX defs */
+       u8      port;                   /* In PORT defs */
+       bool    autoneg;
+       u32     pause_config;
+};
+
+struct qed_probe_params {
+       enum qed_protocol protocol;
+       u32 dp_module;
+       u8 dp_level;
+       bool is_vf;
+};
+
+#define QED_DRV_VER_STR_SIZE 12
+struct qed_slowpath_params {
+       u32     int_mode;
+       u8      drv_major;
+       u8      drv_minor;
+       u8      drv_rev;
+       u8      drv_eng;
+       u8      name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_int_info {
+       struct msix_entry       *msix;
+       u8                      msix_cnt;
+
+       /* This should be updated by the protocol driver */
+       u8                      used_cnt;
+};
+
+struct qed_common_cb_ops {
+       void    (*link_update)(void                     *dev,
+                              struct qed_link_output   *link);
+};
+
+struct qed_selftest_ops {
+/**
+ * @brief selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_interrupt)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_memory)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_register)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_clock)(struct qed_dev *cdev);
+};
+
+struct qed_common_ops {
+       struct qed_selftest_ops *selftest;
+
+       struct qed_dev* (*probe)(struct pci_dev *dev,
+                                struct qed_probe_params *params);
+
+       void            (*remove)(struct qed_dev *cdev);
+
+       int             (*set_power_state)(struct qed_dev *cdev,
+                                          pci_power_t state);
+
+       void            (*set_id)(struct qed_dev *cdev,
+                                 char name[],
+                                 char ver_str[]);
+
+       /* Client drivers need to make this call before slowpath_start.
+        * PF params required for the call before slowpath_start is
+        * documented within the qed_pf_params structure definition.
+        */
+       void            (*update_pf_params)(struct qed_dev *cdev,
+                                           struct qed_pf_params *params);
+       int             (*slowpath_start)(struct qed_dev *cdev,
+                                         struct qed_slowpath_params *params);
+
+       int             (*slowpath_stop)(struct qed_dev *cdev);
+
+       /* Requests to use `cnt' interrupts for fastpath.
+        * upon success, returns number of interrupts allocated for fastpath.
+        */
+       int             (*set_fp_int)(struct qed_dev *cdev,
+                                     u16 cnt);
+
+       /* Fills `info' with pointers required for utilizing interrupts */
+       int             (*get_fp_int)(struct qed_dev *cdev,
+                                     struct qed_int_info *info);
+
+       u32             (*sb_init)(struct qed_dev *cdev,
+                                  struct qed_sb_info *sb_info,
+                                  void *sb_virt_addr,
+                                  dma_addr_t sb_phy_addr,
+                                  u16 sb_id,
+                                  enum qed_sb_type type);
+
+       u32             (*sb_release)(struct qed_dev *cdev,
+                                     struct qed_sb_info *sb_info,
+                                     u16 sb_id);
+
+       void            (*simd_handler_config)(struct qed_dev *cdev,
+                                              void *token,
+                                              int index,
+                                              void (*handler)(void *));
+
+       void            (*simd_handler_clean)(struct qed_dev *cdev,
+                                             int index);
+
+/**
+ * @brief can_link_change - can the instance change the link or not
+ *
+ * @param cdev
+ *
+ * @return true if link-change is allowed, false otherwise.
+ */
+       bool (*can_link_change)(struct qed_dev *cdev);
+
+/**
+ * @brief set_link - set links according to params
+ *
+ * @param cdev
+ * @param params - values used to override the default link configuration
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int             (*set_link)(struct qed_dev *cdev,
+                                   struct qed_link_params *params);
+
+/**
+ * @brief get_link - returns the current link state.
+ *
+ * @param cdev
+ * @param if_link - structure to be filled with current link configuration.
+ */
+       void            (*get_link)(struct qed_dev *cdev,
+                                   struct qed_link_output *if_link);
+
+/**
+ * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @param cdev
+ */
+       int             (*drain)(struct qed_dev *cdev);
+
+/**
+ * @brief update_msglvl - update module debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+       void            (*update_msglvl)(struct qed_dev *cdev,
+                                        u32 dp_module,
+                                        u8 dp_level);
+
+       int             (*chain_alloc)(struct qed_dev *cdev,
+                                      enum qed_chain_use_mode intended_use,
+                                      enum qed_chain_mode mode,
+                                      enum qed_chain_cnt_type cnt_type,
+                                      u32 num_elems,
+                                      size_t elem_size,
+                                      struct qed_chain *p_chain);
+
+       void            (*chain_free)(struct qed_dev *cdev,
+                                     struct qed_chain *p_chain);
+
+/**
+ * @brief get_coalesce - Get coalesce parameters in usec
+ *
+ * @param cdev
+ * @param rx_coal - Rx coalesce value in usec
+ * @param tx_coal - Tx coalesce value in usec
+ *
+ */
+       void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
+
+/**
+ * @brief set_coalesce - Configure Rx coalesce value in usec
+ *
+ * @param cdev
+ * @param rx_coal - Rx coalesce value in usec
+ * @param tx_coal - Tx coalesce value in usec
+ * @param qid - Queue index
+ * @param sb_id - Status Block Id
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+                           u8 qid, u16 sb_id);
+
+/**
+ * @brief set_led - Configure LED mode
+ *
+ * @param cdev
+ * @param mode - LED mode
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*set_led)(struct qed_dev *cdev,
+                      enum qed_led_mode mode);
+};
+
+#define MASK_FIELD(_name, _value) \
+       ((_value) &= (_name ## _MASK))
+
+#define FIELD_VALUE(_name, _value) \
+       ((_value & _name ## _MASK) << _name ## _SHIFT)
+
+#define SET_FIELD(value, name, flag)                          \
+       do {                                                   \
+               (value) &= ~(name ## _MASK << name ## _SHIFT); \
+               (value) |= (((u64)flag) << (name ## _SHIFT));  \
+       } while (0)
+
+#define GET_FIELD(value, name) \
+       (((value) >> (name ## _SHIFT)) & name ## _MASK)
+
+/* Debug print definitions */
+#define DP_ERR(cdev, fmt, ...)                                              \
+               pr_err("[%s:%d(%s)]" fmt,                                    \
+                      __func__, __LINE__,                                   \
+                      DP_NAME(cdev) ? DP_NAME(cdev) : "",                   \
+                      ## __VA_ARGS__)                                       \
+
+#define DP_NOTICE(cdev, fmt, ...)                                    \
+       do {                                                          \
+               if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
+                       pr_notice("[%s:%d(%s)]" fmt,                  \
+                                 __func__, __LINE__,                 \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+                                 ## __VA_ARGS__);                    \
+                                                                     \
+               }                                                     \
+       } while (0)
+
+#define DP_INFO(cdev, fmt, ...)                                              \
+       do {                                                          \
+               if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
+                       pr_notice("[%s:%d(%s)]" fmt,                  \
+                                 __func__, __LINE__,                 \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+                                 ## __VA_ARGS__);                    \
+               }                                                     \
+       } while (0)
+
+#define DP_VERBOSE(cdev, module, fmt, ...)                             \
+       do {                                                            \
+               if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
+                            ((cdev)->dp_module & module))) {           \
+                       pr_notice("[%s:%d(%s)]" fmt,                    \
+                                 __func__, __LINE__,                   \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "",   \
+                                 ## __VA_ARGS__);                      \
+               }                                                       \
+       } while (0)
+
+enum DP_LEVEL {
+       QED_LEVEL_VERBOSE       = 0x0,
+       QED_LEVEL_INFO          = 0x1,
+       QED_LEVEL_NOTICE        = 0x2,
+       QED_LEVEL_ERR           = 0x3,
+};
+
+#define QED_LOG_LEVEL_SHIFT     (30)
+#define QED_LOG_VERBOSE_MASK    (0x3fffffff)
+#define QED_LOG_INFO_MASK       (0x40000000)
+#define QED_LOG_NOTICE_MASK     (0x80000000)
+
+enum DP_MODULE {
+       QED_MSG_SPQ     = 0x10000,
+       QED_MSG_STATS   = 0x20000,
+       QED_MSG_DCB     = 0x40000,
+       QED_MSG_IOV     = 0x80000,
+       QED_MSG_SP      = 0x100000,
+       QED_MSG_STORAGE = 0x200000,
+       QED_MSG_CXT     = 0x800000,
+       QED_MSG_ILT     = 0x2000000,
+       QED_MSG_ROCE    = 0x4000000,
+       QED_MSG_DEBUG   = 0x8000000,
+       /* to be added...up to 0x8000000 */
+};
+
+enum qed_mf_mode {
+       QED_MF_DEFAULT,
+       QED_MF_OVLAN,
+       QED_MF_NPAR,
+};
+
+struct qed_eth_stats {
+       u64     no_buff_discards;
+       u64     packet_too_big_discard;
+       u64     ttl0_discard;
+       u64     rx_ucast_bytes;
+       u64     rx_mcast_bytes;
+       u64     rx_bcast_bytes;
+       u64     rx_ucast_pkts;
+       u64     rx_mcast_pkts;
+       u64     rx_bcast_pkts;
+       u64     mftag_filter_discards;
+       u64     mac_filter_discards;
+       u64     tx_ucast_bytes;
+       u64     tx_mcast_bytes;
+       u64     tx_bcast_bytes;
+       u64     tx_ucast_pkts;
+       u64     tx_mcast_pkts;
+       u64     tx_bcast_pkts;
+       u64     tx_err_drop_pkts;
+       u64     tpa_coalesced_pkts;
+       u64     tpa_coalesced_events;
+       u64     tpa_aborts_num;
+       u64     tpa_not_coalesced_pkts;
+       u64     tpa_coalesced_bytes;
+
+       /* port */
+       u64     rx_64_byte_packets;
+       u64     rx_65_to_127_byte_packets;
+       u64     rx_128_to_255_byte_packets;
+       u64     rx_256_to_511_byte_packets;
+       u64     rx_512_to_1023_byte_packets;
+       u64     rx_1024_to_1518_byte_packets;
+       u64     rx_1519_to_1522_byte_packets;
+       u64     rx_1519_to_2047_byte_packets;
+       u64     rx_2048_to_4095_byte_packets;
+       u64     rx_4096_to_9216_byte_packets;
+       u64     rx_9217_to_16383_byte_packets;
+       u64     rx_crc_errors;
+       u64     rx_mac_crtl_frames;
+       u64     rx_pause_frames;
+       u64     rx_pfc_frames;
+       u64     rx_align_errors;
+       u64     rx_carrier_errors;
+       u64     rx_oversize_packets;
+       u64     rx_jabbers;
+       u64     rx_undersize_packets;
+       u64     rx_fragments;
+       u64     tx_64_byte_packets;
+       u64     tx_65_to_127_byte_packets;
+       u64     tx_128_to_255_byte_packets;
+       u64     tx_256_to_511_byte_packets;
+       u64     tx_512_to_1023_byte_packets;
+       u64     tx_1024_to_1518_byte_packets;
+       u64     tx_1519_to_2047_byte_packets;
+       u64     tx_2048_to_4095_byte_packets;
+       u64     tx_4096_to_9216_byte_packets;
+       u64     tx_9217_to_16383_byte_packets;
+       u64     tx_pause_frames;
+       u64     tx_pfc_frames;
+       u64     tx_lpi_entry_count;
+       u64     tx_total_collisions;
+       u64     brb_truncates;
+       u64     brb_discards;
+       u64     rx_mac_bytes;
+       u64     rx_mac_uc_packets;
+       u64     rx_mac_mc_packets;
+       u64     rx_mac_bc_packets;
+       u64     rx_mac_frames_ok;
+       u64     tx_mac_bytes;
+       u64     tx_mac_uc_packets;
+       u64     tx_mac_mc_packets;
+       u64     tx_mac_bc_packets;
+       u64     tx_mac_ctrl_frames;
+};
+
+#define QED_SB_IDX              0x0002
+
+#define RX_PI           0
+#define TX_PI(tc)       (RX_PI + 1 + tc)
+
+struct qed_sb_cnt_info {
+       int     sb_cnt;
+       int     sb_iov_cnt;
+       int     sb_free_blk;
+};
+
+static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+{
+       u32 prod = 0;
+       u16 rc = 0;
+
+       prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
+              STATUS_BLOCK_PROD_INDEX_MASK;
+       if (sb_info->sb_ack != prod) {
+               sb_info->sb_ack = prod;
+               rc |= QED_SB_IDX;
+       }
+
+       /* Let SB update */
+       mmiowb();
+       return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ *        written to the IGU.
+ *
+ * @param sb_info       - This is the structure allocated and
+ *                 initialized per status block. Assumption is
+ *                 that it was initialized using qed_sb_init
+ * @param int_cmd       - Enable/Disable/Nop
+ * @param upd_flg       - whether igu consumer should be
+ *                 updated.
+ *
+ * @return inline void
+ */
+static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+                             enum igu_int_cmd int_cmd,
+                             u8 upd_flg)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+               ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                (IGU_SEG_ACCESS_REG <<
+                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       mmiowb();
+       barrier();
+}
+
+static inline void __internal_ram_wr(void *p_hwfn,
+                                    void __iomem *addr,
+                                    int size,
+                                    u32 *data)
+
+{
+       unsigned int i;
+
+       for (i = 0; i < size / sizeof(*data); i++)
+               DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
+}
+
+static inline void internal_ram_wr(void __iomem *addr,
+                                  int size,
+                                  u32 *data)
+{
+       __internal_ram_wr(NULL, addr, size, data);
+}
+
+enum qed_rss_caps {
+       QED_RSS_IPV4            = 0x1,
+       QED_RSS_IPV6            = 0x2,
+       QED_RSS_IPV4_TCP        = 0x4,
+       QED_RSS_IPV6_TCP        = 0x8,
+       QED_RSS_IPV4_UDP        = 0x10,
+       QED_RSS_IPV6_UDP        = 0x20,
+};
+
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+#endif
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
new file mode 100644 (file)
index 0000000..5a4f8d0
--- /dev/null
@@ -0,0 +1,34 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IOV_IF_H
+#define _QED_IOV_IF_H
+
+#include <linux/qed/qed_if.h>
+
+/* Structs used by PF to control and manipulate child VFs */
+struct qed_iov_hv_ops {
+       int (*configure)(struct qed_dev *cdev, int num_vfs_param);
+
+       int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
+
+       int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
+
+       int (*get_config) (struct qed_dev *cdev, int vf_id,
+                          struct ifla_vf_info *ivi);
+
+       int (*set_link_state) (struct qed_dev *cdev, int vf_id,
+                              int link_state);
+
+       int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
+
+       int (*set_rate) (struct qed_dev *cdev, int vfid,
+                        u32 min_rate, u32 max_rate);
+};
+
+#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
new file mode 100644 (file)
index 0000000..187991c
--- /dev/null
@@ -0,0 +1,44 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __RDMA_COMMON__
+#define __RDMA_COMMON__
+/************************/
+/* RDMA FW CONSTANTS */
+/************************/
+
+#define RDMA_RESERVED_LKEY                      (0)
+#define RDMA_RING_PAGE_SIZE                     (0x1000)
+
+#define RDMA_MAX_SGE_PER_SQ_WQE         (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE         (4)
+
+#define RDMA_MAX_DATA_SIZE_IN_WQE       (0x7FFFFFFF)
+
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
+
+#define RDMA_MAX_CQS                            (64 * 1024)
+#define RDMA_MAX_TIDS                           (128 * 1024 - 1)
+#define RDMA_MAX_PDS                            (64 * 1024)
+
+#define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
+
+#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
+
+struct rdma_srq_id {
+       __le16 srq_idx;
+       __le16 opaque_fid;
+};
+
+struct rdma_srq_producers {
+       __le32 sge_prod;
+       __le32 wqe_prod;
+};
+
+#endif /* __RDMA_COMMON__ */
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
new file mode 100644 (file)
index 0000000..2eeaf3d
--- /dev/null
@@ -0,0 +1,17 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ROCE_COMMON__
+#define __ROCE_COMMON__
+
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+
+#define ROCE_MAX_QPS   (32 * 1024)
+
+#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
new file mode 100644 (file)
index 0000000..3b8e1ef
--- /dev/null
@@ -0,0 +1,91 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __STORAGE_COMMON__
+#define __STORAGE_COMMON__
+
+#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ                        (0)
+#define BDQ_ID_IMM_DATA          (1)
+#define BDQ_NUM_IDS          (2)
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+
+struct scsi_bd {
+       struct regpair address;
+       struct regpair opaque;
+};
+
+struct scsi_bdq_ram_drv_data {
+       __le16 external_producer;
+       __le16 reserved0[3];
+};
+
+struct scsi_drv_cmdq {
+       __le16 cmdq_cons;
+       __le16 reserved0;
+       __le32 reserved1;
+};
+
+struct scsi_init_func_params {
+       __le16 num_tasks;
+       u8 log_page_size;
+       u8 debug_mode;
+       u8 reserved2[12];
+};
+
+struct scsi_init_func_queues {
+       struct regpair glbl_q_params_addr;
+       __le16 rq_buffer_size;
+       __le16 cq_num_entries;
+       __le16 cmdq_num_entries;
+       u8 bdq_resource_id;
+       u8 q_validity;
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK        0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT       0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK  0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK       0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT      2
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK  0x1F
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+       u8 num_queues;
+       u8 queue_relative_offset;
+       u8 cq_sb_pi;
+       u8 cmdq_sb_pi;
+       __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
+       __le16 reserved0;
+       u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+       struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
+       __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
+       __le16 bdq_xon_threshold[BDQ_NUM_IDS];
+       __le16 cmdq_xoff_threshold;
+       __le16 cmdq_xon_threshold;
+       __le32 reserved1;
+};
+
+struct scsi_ram_per_bdq_resource_drv_data {
+       struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
+};
+
+struct scsi_sge {
+       struct regpair sge_addr;
+       __le16 sge_len;
+       __le16 reserved0;
+       __le32 reserved1;
+};
+
+struct scsi_terminate_extra_params {
+       __le16 unsolicited_cq_count;
+       __le16 cmdq_count;
+       u8 reserved[4];
+};
+
+#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
new file mode 100644 (file)
index 0000000..accba0e
--- /dev/null
@@ -0,0 +1,226 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __TCP_COMMON__
+#define __TCP_COMMON__
+
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+enum tcp_connect_mode {
+       TCP_CONNECT_ACTIVE,
+       TCP_CONNECT_PASSIVE,
+       MAX_TCP_CONNECT_MODE
+};
+
+struct tcp_init_params {
+       __le32 max_cwnd;
+       __le16 dup_ack_threshold;
+       __le16 tx_sws_timer;
+       __le16 min_rto;
+       __le16 min_rto_rt;
+       __le16 max_rto;
+       u8 maxfinrt;
+       u8 reserved[1];
+};
+
+enum tcp_ip_version {
+       TCP_IPV4,
+       TCP_IPV6,
+       MAX_TCP_IP_VERSION
+};
+
+struct tcp_offload_params {
+       __le16 local_mac_addr_lo;
+       __le16 local_mac_addr_mid;
+       __le16 local_mac_addr_hi;
+       __le16 remote_mac_addr_lo;
+       __le16 remote_mac_addr_mid;
+       __le16 remote_mac_addr_hi;
+       __le16 vlan_id;
+       u8 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK         0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT        0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK         0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT        1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK         0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT        2
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK      0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT     3
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK     0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT    4
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK      0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT     5
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK  0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK     0x1
+#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT    7
+       u8 ip_version;
+       __le32 remote_ip[4];
+       __le32 local_ip[4];
+       __le32 flow_label;
+       u8 ttl;
+       u8 tos_or_tc;
+       __le16 remote_port;
+       __le16 local_port;
+       __le16 mss;
+       u8 rcv_wnd_scale;
+       u8 connect_mode;
+       __le16 srtt;
+       __le32 cwnd;
+       __le32 ss_thresh;
+       __le16 reserved1;
+       u8 ka_max_probe_cnt;
+       u8 dup_ack_theshold;
+       __le32 rcv_next;
+       __le32 snd_una;
+       __le32 snd_next;
+       __le32 snd_max;
+       __le32 snd_wnd;
+       __le32 rcv_wnd;
+       __le32 snd_wl1;
+       __le32 ts_time;
+       __le32 ts_recent;
+       __le32 ts_recent_age;
+       __le32 total_rt;
+       __le32 ka_timeout_delta;
+       __le32 rt_timeout_delta;
+       u8 dup_ack_cnt;
+       u8 snd_wnd_probe_cnt;
+       u8 ka_probe_cnt;
+       u8 rt_cnt;
+       __le16 rtt_var;
+       __le16 reserved2;
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 initial_rcv_wnd;
+       u8 snd_wnd_scale;
+       u8 ack_frequency;
+       __le16 da_timeout_value;
+       __le32 ts_ticks_per_second;
+};
+
+struct tcp_offload_params_opt2 {
+       __le16 local_mac_addr_lo;
+       __le16 local_mac_addr_mid;
+       __le16 local_mac_addr_hi;
+       __le16 remote_mac_addr_lo;
+       __le16 remote_mac_addr_mid;
+       __le16 remote_mac_addr_hi;
+       __le16 vlan_id;
+       u8 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK      0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT     0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK      0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT     1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK      0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT     2
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK  0x1F
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+       u8 ip_version;
+       __le32 remote_ip[4];
+       __le32 local_ip[4];
+       __le32 flow_label;
+       u8 ttl;
+       u8 tos_or_tc;
+       __le16 remote_port;
+       __le16 local_port;
+       __le16 mss;
+       u8 rcv_wnd_scale;
+       u8 connect_mode;
+       __le16 syn_ip_payload_length;
+       __le32 syn_phy_addr_lo;
+       __le32 syn_phy_addr_hi;
+       __le32 reserved1[22];
+};
+
+enum tcp_seg_placement_event {
+       TCP_EVENT_ADD_PEN,
+       TCP_EVENT_ADD_NEW_ISLE,
+       TCP_EVENT_ADD_ISLE_RIGHT,
+       TCP_EVENT_ADD_ISLE_LEFT,
+       TCP_EVENT_JOIN,
+       TCP_EVENT_NOP,
+       MAX_TCP_SEG_PLACEMENT_EVENT
+};
+
+struct tcp_update_params {
+       __le16 flags;
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK   0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT  0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK               0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT              1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK               0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT              2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK         0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT        3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK        0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT       4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK       0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT      5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK       0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT      6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK        0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT       7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK   0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT  8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK  0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK             0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT            10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK          0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT         11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK                     0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT                    12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK                  0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT                 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK                0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT               14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK        0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT       15
+       __le16 remote_mac_addr_lo;
+       __le16 remote_mac_addr_mid;
+       __le16 remote_mac_addr_hi;
+       __le16 mss;
+       u8 ttl;
+       u8 tos_or_tc;
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 flow_label;
+       __le32 initial_rcv_wnd;
+       u8 ka_max_probe_cnt;
+       u8 reserved1[7];
+};
+
+struct tcp_upload_params {
+       __le32 rcv_next;
+       __le32 snd_una;
+       __le32 snd_next;
+       __le32 snd_max;
+       __le32 snd_wnd;
+       __le32 rcv_wnd;
+       __le32 snd_wl1;
+       __le32 cwnd;
+       __le32 ss_thresh;
+       __le16 srtt;
+       __le16 rtt_var;
+       __le32 ts_time;
+       __le32 ts_recent;
+       __le32 ts_recent_age;
+       __le32 total_rt;
+       __le32 ka_timeout_delta;
+       __le32 rt_timeout_delta;
+       u8 dup_ack_cnt;
+       u8 snd_wnd_probe_cnt;
+       u8 ka_probe_cnt;
+       u8 rt_cnt;
+       __le32 reserved;
+};
+
+#endif /* __TCP_COMMON__ */