Following patches are inflight currently. Adding here to make ocrmda synced.
Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
--- /dev/null
+From bf24e5d575c684833685a514930770c719c5dbf7 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Thu, 30 Jan 2014 15:35:40 +0530
+Subject: [PATCH 01/16] RDMA/ocrdma: Eq full catastrophe avoidance
+
+Stale entries in the CQ being destroyed causes hardware to generate EQEs indefinetly for a given CQ.
+Thus causing uncontrolled execution of irq_handler. This patch fixes this using following sementics:
+
+ * irq_handler will ring EQ doorbell atleast once and implement budgeting scheme.
+ * cq_destroy will count number of valid entires during destroy and ring
+ cq-db so that hadrware does not generate uncontrolled EQE.
+ * cq_destroy will synchronize with last running irq_handler instance.
+ * arm_cq will always defer arming CQ till poll_cq, except for the first arm_cq call.
+ * poll_cq will always ring cq-db with arm=SET if arm_cq was called prior to enter poll_cq.
+ * poll_cq will always ring cq-db with arm=UNSET if arm_cq was not called prior to enter poll_cq.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 18 +++++-
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 53 ++++++++---------
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.h | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 81 ++++++++++++++++++++-------
+ 4 files changed, 103 insertions(+), 50 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index adc11d1..a329de6 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -183,8 +183,8 @@ struct ocrdma_cq {
+ */
+ u32 max_hw_cqe;
+ bool phase_change;
+- bool armed, solicited;
+- bool arm_needed;
++ bool deferred_arm, deferred_sol;
++ bool first_arm;
+
+ spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
+ * to cq polling
+@@ -197,6 +197,7 @@ struct ocrdma_cq {
+ struct ocrdma_ucontext *ucontext;
+ dma_addr_t pa;
+ u32 len;
++ u32 cqe_cnt;
+
+ /* head of all qp's sq and rq for which cqes need to be flushed
+ * by the software.
+@@ -423,4 +424,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
+ }
+
+
++static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
++ int eqid)
++{
++ int indx;
++
++ for (indx = 0; indx < dev->eq_cnt; indx++) {
++ if (dev->eq_tbl[indx].q.id == eqid)
++ return indx;
++ }
++
++ return -EINVAL;
++}
++
+ #endif
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 50219ab..135331d 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -444,7 +444,7 @@ mbx_err:
+ return status;
+ }
+
+-static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
++int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+ {
+ int irq;
+
+@@ -574,6 +574,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
+ if (status)
+ goto alloc_err;
+
++ dev->eq_tbl[0].cq_cnt++;
+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
+ if (status)
+ goto mbx_cq_free;
+@@ -858,16 +859,8 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
+ BUG();
+
+ cq = dev->cq_tbl[cq_idx];
+- if (cq == NULL) {
+- pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
++ if (cq == NULL)
+ return;
+- }
+- spin_lock_irqsave(&cq->cq_lock, flags);
+- cq->armed = false;
+- cq->solicited = false;
+- spin_unlock_irqrestore(&cq->cq_lock, flags);
+-
+- ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+@@ -892,27 +885,35 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
+ struct ocrdma_dev *dev = eq->dev;
+ struct ocrdma_eqe eqe;
+ struct ocrdma_eqe *ptr;
+- u16 eqe_popped = 0;
+ u16 cq_id;
+- while (1) {
++ int budget = eq->cq_cnt;
++
++ do {
+ ptr = ocrdma_get_eqe(eq);
+ eqe = *ptr;
+ ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
+ if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
+ break;
+- eqe_popped += 1;
++
+ ptr->id_valid = 0;
++ /* ring eq doorbell as soon as its consumed. */
++ ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
+ /* check whether its CQE or not. */
+ if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
+ cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
+ ocrdma_cq_handler(dev, cq_id);
+ }
+ ocrdma_eq_inc_tail(eq);
+- }
+- ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
+- /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
+- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+- ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
++
++ /* There can be a stale EQE after the last bound CQ is
++ * destroyed. EQE valid and budget == 0 implies this.
++ */
++ if (budget)
++ budget--;
++
++ } while (budget);
++
++ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+ return IRQ_HANDLED;
+ }
+
+@@ -1357,12 +1358,10 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
+ int i;
+
+ mutex_lock(&dev->dev_lock);
+- for (i = 0; i < dev->eq_cnt; i++) {
+- if (dev->eq_tbl[i].q.id != eq_id)
+- continue;
+- dev->eq_tbl[i].cq_cnt -= 1;
+- break;
+- }
++ i = ocrdma_get_eq_table_index(dev, eq_id);
++ if (i == -EINVAL)
++ BUG();
++ dev->eq_tbl[i].cq_cnt -= 1;
+ mutex_unlock(&dev->dev_lock);
+ }
+
+@@ -1417,6 +1416,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ cq->eqn = ocrdma_bind_eq(dev);
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
+ cqe_count = cq->len / cqe_size;
++ cq->cqe_cnt = cqe_count;
+ if (cqe_count > 1024) {
+ /* Set cnt to 3 to indicate more than 1024 cq entries */
+ cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
+@@ -1484,12 +1484,9 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
+ (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
+ OCRDMA_DESTROY_CQ_QID_MASK;
+
+- ocrdma_unbind_eq(dev, cq->eqn);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+- if (status)
+- goto mbx_err;
++ ocrdma_unbind_eq(dev, cq->eqn);
+ dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
+-mbx_err:
+ kfree(cmd);
+ return status;
+ }
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+index f2a89d4..38102b3 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+@@ -133,5 +133,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
+ bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+ bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+ void ocrdma_flush_qp(struct ocrdma_qp *);
++int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+
+ #endif /* __OCRDMA_HW_H__ */
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 86242ce..ae2b778 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -910,6 +910,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
+ spin_lock_init(&cq->comp_handler_lock);
+ INIT_LIST_HEAD(&cq->sq_head);
+ INIT_LIST_HEAD(&cq->rq_head);
++ cq->first_arm = true;
+
+ if (ib_ctx) {
+ uctx = get_ocrdma_ucontext(ib_ctx);
+@@ -927,9 +928,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
+ goto ctx_err;
+ }
+ cq->phase = OCRDMA_CQE_VALID;
+- cq->arm_needed = true;
+ dev->cq_tbl[cq->id] = cq;
+-
+ return &cq->ibcq;
+
+ ctx_err:
+@@ -952,15 +951,52 @@ int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
+ return status;
+ }
+
++void ocrdma_flush_cq(struct ocrdma_cq *cq)
++{
++ int cqe_cnt;
++ int valid_count = 0;
++ unsigned long flags;
++
++ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
++ struct ocrdma_cqe *cqe = NULL;
++
++ cqe = cq->va;
++ cqe_cnt = cq->cqe_cnt;
++
++ /* Last irq might have scheduled a polling thread
++ * sync-up with it before hard flushing.
++ */
++ spin_lock_irqsave(&cq->cq_lock, flags);
++ while (cqe_cnt) {
++ if (is_cqe_valid(cq, cqe))
++ valid_count++;
++ cqe++;
++ cqe_cnt--;
++ }
++ ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
++ spin_unlock_irqrestore(&cq->cq_lock, flags);
++}
++
+ int ocrdma_destroy_cq(struct ib_cq *ibcq)
+ {
+ int status;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
++ struct ocrdma_eq *eq = NULL;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int pdid = 0;
++ u32 irq, indx;
+
+- status = ocrdma_mbx_destroy_cq(dev, cq);
++ dev->cq_tbl[cq->id] = NULL;
++ indx = ocrdma_get_eq_table_index(dev, cq->eqn);
++ if (indx == -EINVAL)
++ BUG();
+
++ eq = &dev->eq_tbl[indx];
++ irq = ocrdma_get_irq(dev, eq);
++ synchronize_irq(irq);
++ ocrdma_flush_cq(cq);
++
++ status = ocrdma_mbx_destroy_cq(dev, cq);
+ if (cq->ucontext) {
+ pdid = cq->ucontext->cntxt_pd->id;
+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
+@@ -969,7 +1005,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
+ ocrdma_get_db_addr(dev, pdid),
+ dev->nic_info.db_page_size);
+ }
+- dev->cq_tbl[cq->id] = NULL;
+
+ kfree(cq);
+ return status;
+@@ -2706,10 +2741,18 @@ expand_cqe:
+ }
+ stop_cqe:
+ cq->getp = cur_getp;
+- if (polled_hw_cqes || expand || stop) {
+- ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
++ if (cq->deferred_arm) {
++ ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
++ polled_hw_cqes);
++ cq->deferred_arm = false;
++ cq->deferred_sol = false;
++ } else {
++ /* We need to pop the CQE. No need to arm */
++ ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
+ polled_hw_cqes);
++ cq->deferred_sol = false;
+ }
++
+ return i;
+ }
+
+@@ -2781,30 +2824,28 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ u16 cq_id;
+- u16 cur_getp;
+- struct ocrdma_cqe *cqe;
+ unsigned long flags;
++ bool arm_needed = false, sol_needed = false;
+
+ cq_id = cq->id;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
+- cq->armed = true;
++ arm_needed = true;
+ if (cq_flags & IB_CQ_SOLICITED)
+- cq->solicited = true;
+-
+- cur_getp = cq->getp;
+- cqe = cq->va + cur_getp;
++ sol_needed = true;
+
+- /* check whether any valid cqe exist or not, if not then safe to
+- * arm. If cqe is not yet consumed, then let it get consumed and then
+- * we arm it to avoid false interrupts.
+- */
+- if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
+- cq->arm_needed = false;
+- ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
++ if (cq->first_arm) {
++ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
++ cq->first_arm = false;
++ goto skip_defer;
+ }
++ cq->deferred_arm = true;
++
++skip_defer:
++ cq->deferred_sol = sol_needed;
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
++
+ return 0;
+ }
+
+--
+1.7.1
+
--- /dev/null
+From 622cd67f49d6d5da5e57283ea22c89bc94783865 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <Devesh.Sharma@Emulex.Com>
+Date: Mon, 3 Feb 2014 18:17:03 +0530
+Subject: [PATCH 02/16] RDMA/ocrdma: SQ and RQ doorbell offset clean up
+
+Introducing new macros to define SQ and RQ doorbell offset.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 7 -------
+ drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 5 ++++-
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 23 +++++++----------------
+ 3 files changed, 11 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index a329de6..283653c 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -385,13 +385,6 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
+ return container_of(ibsrq, struct ocrdma_srq, ibsrq);
+ }
+
+-
+-static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
+-{
+- return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
+- qp->id < 128) ? 24 : 16);
+-}
+-
+ static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
+ {
+ int cqe_valid;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+index 60d5ac2..e71685a 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+@@ -103,7 +103,10 @@ enum {
+ OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
+ OCRDMA_DB_CQ_OFFSET = 0x120,
+ OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
+- OCRDMA_DB_MQ_OFFSET = 0x140
++ OCRDMA_DB_MQ_OFFSET = 0x140,
++
++ OCRDMA_DB_SQ_SHIFT = 16,
++ OCRDMA_DB_RQ_SHIFT = 24
+ };
+
+ #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index ae2b778..ef52ef2 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -1127,15 +1127,9 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
+ }
+ uresp.db_page_addr = usr_db;
+ uresp.db_page_size = dev->nic_info.db_page_size;
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+- uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
+- uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+- uresp.db_shift = 24;
+- } else {
+- uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
+- uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
+- uresp.db_shift = 16;
+- }
++ uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
++ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
++ uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
+
+ if (qp->dpp_enabled) {
+ uresp.dpp_credit = dpp_credit_lmt;
+@@ -1308,7 +1302,7 @@ static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
+ {
+ if (qp->db_cache) {
+ u32 val = qp->rq.dbid | (qp->db_cache <<
+- ocrdma_get_num_posted_shift(qp));
++ OCRDMA_DB_RQ_SHIFT);
+ iowrite32(val, qp->rq_db);
+ qp->db_cache = 0;
+ }
+@@ -2053,7 +2047,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+
+ static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
+ {
+- u32 val = qp->sq.dbid | (1 << 16);
++ u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
+
+ iowrite32(val, qp->sq_db);
+ }
+@@ -2158,12 +2152,9 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+
+ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
+ {
+- u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
++ u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
+
+- if (qp->state != OCRDMA_QPS_INIT)
+- iowrite32(val, qp->rq_db);
+- else
+- qp->db_cache++;
++ iowrite32(val, qp->rq_db);
+ }
+
+ static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
+--
+1.7.1
+
--- /dev/null
+From cb733f184ebc399935d3c3e948d9288915ab9748 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Tue, 18 Feb 2014 16:02:52 +0530
+Subject: [PATCH 03/16] RDMA/ocrdma: read ASIC_ID register to select asic_gen
+
+ocrdma driver selects execution path based on sli_family and asic generation number.
+this introduces reading asic gen number from pci register instead of obtining from emulex NIC driver.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 13 +++++++++++++
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 6 +++---
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 2 +-
+ drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 17 +++++++++++++++--
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 6 +++---
+ 5 files changed, 35 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index 283653c..44064c7 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -171,6 +171,7 @@ struct ocrdma_dev {
+ int id;
+ u64 stag_arr[OCRDMA_MAX_STAG];
+ u16 pvid;
++ u32 asic_id;
+ };
+
+ struct ocrdma_cq {
+@@ -430,4 +431,16 @@ static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
+ return -EINVAL;
+ }
+
++static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
++{
++ if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
++ pci_read_config_dword(
++ dev->nic_info.pdev,
++ OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
++ }
++
++ return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
++ OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
++}
++
+ #endif
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 135331d..ce6f539 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -1037,7 +1037,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ attr->max_inline_data =
+ attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge));
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
++ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ attr->ird = 1;
+ attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
+ attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
+@@ -1379,7 +1379,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ __func__, dev->id, dev->attr.max_cqe, entries);
+ return -EINVAL;
+ }
+- if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
++ if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
+ return -EINVAL;
+
+ if (dpp_cq) {
+@@ -1439,7 +1439,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ }
+ /* shared eq between all the consumer cqs. */
+ cmd->cmd.eqn = cq->eqn;
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
++ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ if (dpp_cq)
+ cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
+ OCRDMA_CREATE_CQ_TYPE_SHIFT;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 53d3ea4..b21761b 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -346,7 +346,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
+
+ dev->ibdev.process_mad = ocrdma_process_mad;
+
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
++ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ dev->ibdev.uverbs_cmd_mask |=
+ OCRDMA_UVERBS(CREATE_SRQ) |
+ OCRDMA_UVERBS(MODIFY_SRQ) |
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+index e71685a..de4ebfc 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+@@ -30,8 +30,16 @@
+
+ #define Bit(_b) (1 << (_b))
+
+-#define OCRDMA_GEN1_FAMILY 0xB
+-#define OCRDMA_GEN2_FAMILY 0x0F
++enum {
++ OCRDMA_ASIC_GEN_SKH_R = 0x04,
++ OCRDMA_ASIC_GEN_LANCER = 0x0B
++};
++
++enum {
++ OCRDMA_ASIC_REV_A0 = 0x00,
++ OCRDMA_ASIC_REV_B0 = 0x10,
++ OCRDMA_ASIC_REV_C0 = 0x20
++};
+
+ #define OCRDMA_SUBSYS_ROCE 10
+ enum {
+@@ -141,6 +149,11 @@ enum {
+ #define OCRDMA_MIN_Q_PAGE_SIZE (4096)
+ #define OCRDMA_MAX_Q_PAGES (8)
+
++#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
++#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
++#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
++#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
++
+ /*
+ # 0: 4K Bytes
+ # 1: 8K Bytes
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index ef52ef2..20ef4ba 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -267,7 +267,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
+
+ if (udata && uctx) {
+ pd->dpp_enabled =
+- dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
++ ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
+ pd->num_dpp_qp =
+ pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ }
+@@ -1161,7 +1161,7 @@ err:
+ static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+ struct ocrdma_pd *pd)
+ {
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
++ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ qp->sq_db = dev->nic_info.db +
+ (pd->id * dev->nic_info.db_page_size) +
+ OCRDMA_DB_GEN2_SQ_OFFSET;
+@@ -1687,7 +1687,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ (srq->pd->id * dev->nic_info.db_page_size);
+ uresp.db_page_size = dev->nic_info.db_page_size;
+ uresp.num_rqe_allocated = srq->rq.max_cnt;
+- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
++ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = 24;
+ } else {
+--
+1.7.1
+
--- /dev/null
+From fec1a4edb29dfab809fcdff9c600ee150874a63f Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Wed, 29 Jan 2014 14:04:45 +0530
+Subject: [PATCH 04/16] RDMA/ocrdma: Allow DPP QP creation
+
+Allow creating DPP QP even if inline-data is not requested. This is an optimization to
+lower the latency figures.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index ce6f539..dc72df9 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -2026,8 +2026,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
+ OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
+ qp->rq_cq = cq;
+
+- if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
+- (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
++ if (pd->dpp_enabled && pd->num_dpp_qp) {
+ ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
+ dpp_cq_id);
+ }
+--
+1.7.1
+
--- /dev/null
+From 459a5fdb860295fdfd3f163b5deb462848f3092a Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Thu, 30 Jan 2014 09:57:37 +0530
+Subject: [PATCH 05/16] RDMA/ocrdma: ABI versioning between ocrdma and be2net
+
+While loading RoCE driver be2net driver should check for ABI version to catch
+functional incompatibilities.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_abi.h | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 1 +
+ 2 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+index fbac8eb..2a14d4a 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+@@ -29,6 +29,7 @@
+ #define __OCRDMA_ABI_H__
+
+ #define OCRDMA_ABI_VERSION 1
++#define OCRDMA_BE_ROCE_ABI_VERSION 1
+ /* user kernel communication data structures. */
+
+ struct ocrdma_alloc_ucontext_resp {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index b21761b..8f4e97c 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -541,6 +541,7 @@ static struct ocrdma_driver ocrdma_drv = {
+ .add = ocrdma_add,
+ .remove = ocrdma_remove,
+ .state_change_handler = ocrdma_event_handler,
++ .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
+ };
+
+ static void ocrdma_unregister_inet6addr_notifier(void)
+--
+1.7.1
+
--- /dev/null
+From ee947e52b96caa36435896fdc19cd351a805bd27 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Thu, 30 Jan 2014 11:58:44 +0530
+Subject: [PATCH 06/16] RDMA/ocrdma: update version string
+
+Update the driver vrsion string and node description string.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 4 +++-
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 4 ++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index 44064c7..2eb4d22 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -39,7 +39,9 @@
+ #include <be_roce.h>
+ #include "ocrdma_sli.h"
+
+-#define OCRDMA_ROCE_DEV_VERSION "1.0.0"
++#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0-ofed"
++
++#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
+ #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
+
+ #define OCRDMA_MAX_AH 512
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 8f4e97c..5d30161 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -41,8 +41,8 @@
+ #include "ocrdma_hw.h"
+ #include "ocrdma_abi.h"
+
+-MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
+-MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
++MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
++MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
+ MODULE_AUTHOR("Emulex Corporation");
+ MODULE_LICENSE("GPL");
+
+--
+1.7.1
+
--- /dev/null
+From 2e753c2d314295448b1822786feff345efc2e6b7 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Thu, 30 Jan 2014 11:13:22 +0530
+Subject: [PATCH 07/16] RDMA/ocrdma: increment abi version count
+
+Increment the ABI version count for driver/library interface.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_abi.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+index 2a14d4a..5a82ce5 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+@@ -28,7 +28,7 @@
+ #ifndef __OCRDMA_ABI_H__
+ #define __OCRDMA_ABI_H__
+
+-#define OCRDMA_ABI_VERSION 1
++#define OCRDMA_ABI_VERSION 2
+ #define OCRDMA_BE_ROCE_ABI_VERSION 1
+ /* user kernel communication data structures. */
+
+--
+1.7.1
+
--- /dev/null
+From 8452902d31f2da4cff07b8cc169437b891ca97cd Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Thu, 30 Jan 2014 12:17:52 +0530
+Subject: [PATCH 08/16] RDMA/ocrdma: Memory leak fix in ocrdma_dereg_mr
+
+Fix for memory leak in ocrdma_dereg_mr.
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 20ef4ba..9eabe27 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -840,8 +840,7 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
+
+ status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+
+- if (mr->hwmr.fr_mr == 0)
+- ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
++ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+
+ /* it could be user registered memory. */
+ if (mr->umem)
+--
+1.7.1
+
--- /dev/null
+From 24a45fdbe496d1b40a017f0a8d2d0f9e6b4e8060 Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Thu, 30 Jan 2014 12:37:09 +0530
+Subject: [PATCH 09/16] RDMA/ocrdma: Use non zero tag in SRQ posting
+
+As part of SRQ receive buffers posting we populate a non-zero tag
+which will be returned in SRQ receive completions.
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 28 +++++++++++++++++---------
+ 1 files changed, 18 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 9eabe27..786ddfc 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -1537,7 +1537,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
+ int discard_cnt = 0;
+ u32 cur_getp, stop_getp;
+ struct ocrdma_cqe *cqe;
+- u32 qpn = 0;
++ u32 qpn = 0, wqe_idx = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, cq_flags);
+
+@@ -1566,24 +1566,29 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
+ if (qpn == 0 || qpn != qp->id)
+ goto skip_cqe;
+
+- /* mark cqe discarded so that it is not picked up later
+- * in the poll_cq().
+- */
+- discard_cnt += 1;
+- cqe->cmn.qpn = 0;
+ if (is_cqe_for_sq(cqe)) {
+ ocrdma_hwq_inc_tail(&qp->sq);
+ } else {
+ if (qp->srq) {
++ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
++ OCRDMA_CQE_BUFTAG_SHIFT) &
++ qp->srq->rq.max_wqe_idx;
++ if (wqe_idx < 1)
++ BUG();
+ spin_lock_irqsave(&qp->srq->q_lock, flags);
+ ocrdma_hwq_inc_tail(&qp->srq->rq);
+- ocrdma_srq_toggle_bit(qp->srq, cur_getp);
++ ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
+ spin_unlock_irqrestore(&qp->srq->q_lock, flags);
+
+ } else {
+ ocrdma_hwq_inc_tail(&qp->rq);
+ }
+ }
++ /* mark cqe discarded so that it is not picked up later
++ * in the poll_cq().
++ */
++ discard_cnt += 1;
++ cqe->cmn.qpn = 0;
+ skip_cqe:
+ cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
+ } while (cur_getp != stop_getp);
+@@ -2239,7 +2244,7 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
+
+ if (row == srq->bit_fields_len)
+ BUG();
+- return indx;
++ return indx + 1; /* Use from index 1 */
+ }
+
+ static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
+@@ -2576,10 +2581,13 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
+
+ srq = get_ocrdma_srq(qp->ibqp.srq);
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+- OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
++ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
++ if (wqe_idx < 1)
++ BUG();
++
+ ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
+ spin_lock_irqsave(&srq->q_lock, flags);
+- ocrdma_srq_toggle_bit(srq, wqe_idx);
++ ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
+ spin_unlock_irqrestore(&srq->q_lock, flags);
+ ocrdma_hwq_inc_tail(&srq->rq);
+ }
+--
+1.7.1
+
--- /dev/null
+From f4969efbef7ed1205bbdc35c41b04666121e04ba Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Thu, 30 Jan 2014 13:35:13 +0530
+Subject: [PATCH 10/16] RDMA/ocrdma: Display proper value for max_mw
+
+Fixing the max_mw value
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +-
+ 3 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index 2eb4d22..f486f3a 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -67,6 +67,7 @@ struct ocrdma_dev_attr {
+ int max_mr;
+ u64 max_mr_size;
+ u32 max_num_mr_pbl;
++ int max_mw;
+ int max_fmr;
+ int max_map_per_fmr;
+ int max_pages_per_frmr;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index dc72df9..69b4266 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -1016,6 +1016,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
++ attr->max_mw = rsp->max_mw;
+ attr->max_mr = rsp->max_mr;
+ attr->max_mr_size = ~0ull;
+ attr->max_fmr = 0;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 786ddfc..635a757 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -89,7 +89,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
+ attr->max_cq = dev->attr.max_cq;
+ attr->max_cqe = dev->attr.max_cqe;
+ attr->max_mr = dev->attr.max_mr;
+- attr->max_mw = 0;
++ attr->max_mw = dev->attr.max_mw;
+ attr->max_pd = dev->attr.max_pd;
+ attr->atomic_cap = 0;
+ attr->max_fmr = 0;
+--
+1.7.1
+
--- /dev/null
+From 5601237f4362363cb35f97caa15db3f3ba40271e Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Thu, 30 Jan 2014 13:45:11 +0530
+Subject: [PATCH 11/16] RDMA/ocrdma: Handle CQ overrun error
+
+Update the variables to handle CQ overrun errors
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 69b4266..bd9c8b1 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -640,7 +640,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
+ {
+ struct ocrdma_qp *qp = NULL;
+ struct ocrdma_cq *cq = NULL;
+- struct ib_event ib_evt;
++ struct ib_event ib_evt = { 0 };
+ int cq_event = 0;
+ int qp_event = 1;
+ int srq_event = 0;
+@@ -665,6 +665,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
+ case OCRDMA_CQ_OVERRUN_ERROR:
+ ib_evt.element.cq = &cq->ibcq;
+ ib_evt.event = IB_EVENT_CQ_ERR;
++ cq_event = 1;
++ qp_event = 0;
+ break;
+ case OCRDMA_CQ_QPCAT_ERROR:
+ ib_evt.element.qp = &qp->ibqp;
+@@ -726,6 +728,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
+ qp->srq->ibsrq.
+ srq_context);
+ } else if (dev_event) {
++ pr_err("%s: Fatal event received\n", dev->ibdev.name);
+ ib_dispatch_event(&ib_evt);
+ }
+
+--
+1.7.1
+
--- /dev/null
+From f6c4b31875b669bf6562be219578da94d2f06ee3 Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Thu, 30 Jan 2014 13:52:18 +0530
+Subject: [PATCH 12/16] RDMA/ocrdma: Support non-embedded mailbox commands
+
+Added a routine to issue non-embedded mailbox commands,
+for handling large mailbox request/response data
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 55 +++++++++++++++++++++++++----
+ 1 files changed, 47 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index bd9c8b1..63e3747 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -953,7 +953,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
+ {
+ int status = 0;
+ u16 cqe_status, ext_status;
+- struct ocrdma_mqe *rsp;
++ struct ocrdma_mqe *rsp_mqe;
++ struct ocrdma_mbx_rsp *rsp = NULL;
+
+ mutex_lock(&dev->mqe_ctx.lock);
+ ocrdma_post_mqe(dev, mqe);
+@@ -962,23 +963,61 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
+ goto mbx_err;
+ cqe_status = dev->mqe_ctx.cqe_status;
+ ext_status = dev->mqe_ctx.ext_status;
+- rsp = ocrdma_get_mqe_rsp(dev);
+- ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
++ rsp_mqe = ocrdma_get_mqe_rsp(dev);
++ ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
++ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
++ OCRDMA_MQE_HDR_EMB_SHIFT)
++ rsp = &mqe->u.rsp;
++
+ if (cqe_status || ext_status) {
+- pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
+- __func__,
+- (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+- OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
++ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
++ __func__, cqe_status, ext_status);
++ if (rsp) {
++ /* This is for embedded cmds. */
++ pr_err("opcode=0x%x, subsystem=0x%x\n",
++ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
++ OCRDMA_MBX_RSP_OPCODE_SHIFT,
++ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
++ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
++ }
+ status = ocrdma_get_mbx_cqe_errno(cqe_status);
+ goto mbx_err;
+ }
+- if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
++ /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
++ if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
+ status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
+ mbx_err:
+ mutex_unlock(&dev->mqe_ctx.lock);
+ return status;
+ }
+
++static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
++ void *payload_va)
++{
++ int status = 0;
++ struct ocrdma_mbx_rsp *rsp = payload_va;
++
++ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
++ OCRDMA_MQE_HDR_EMB_SHIFT)
++ BUG();
++
++ status = ocrdma_mbx_cmd(dev, mqe);
++ if (!status)
++ /* For non embedded, only CQE failures are handled in
++ * ocrdma_mbx_cmd. We need to check for RSP errors.
++ */
++ if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
++ status = ocrdma_get_mbx_errno(rsp->status);
++
++ if (status)
++ pr_err("opcode=0x%x, subsystem=0x%x\n",
++ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
++ OCRDMA_MBX_RSP_OPCODE_SHIFT,
++ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
++ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
++ return status;
++}
++
+ static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ struct ocrdma_dev_attr *attr,
+ struct ocrdma_mbx_query_config *rsp)
+--
+1.7.1
+
--- /dev/null
+From 6ca02cc2ef0271b1e951307769c7756fdfc5120a Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@emulex.com>
+Date: Mon, 3 Feb 2014 18:23:28 +0530
+Subject: [PATCH 13/16] RDMA/ocrdma: Query controller information
+
+Issue mailbox commands to query ocrdma controller
+information and phy information and print them
+while adding ocrdma device.
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 33 +++++++++
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 106 ++++++++++++++++++++++++++++
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.h | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 6 ++
+ drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 81 +++++++++++++++++++++-
+ 5 files changed, 226 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index f486f3a..55eb0e6 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -44,6 +44,11 @@
+ #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
+ #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
+
++#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
++#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
++
++#define OC_SKH_DEVICE_PF 0x720
++#define OC_SKH_DEVICE_VF 0x728
+ #define OCRDMA_MAX_AH 512
+
+ #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+@@ -86,6 +91,12 @@ struct ocrdma_dev_attr {
+ u8 num_ird_pages;
+ };
+
++struct ocrdma_dma_mem {
++ void *va;
++ dma_addr_t pa;
++ u32 size;
++};
++
+ struct ocrdma_pbl {
+ void *va;
+ dma_addr_t pa;
+@@ -125,6 +136,14 @@ struct mqe_ctx {
+ bool cmd_done;
+ };
+
++
++struct phy_info {
++ u16 auto_speeds_supported;
++ u16 fixed_speeds_supported;
++ u16 phy_type;
++ u16 interface_type;
++};
++
+ struct ocrdma_dev {
+ struct ib_device ibdev;
+ struct ocrdma_dev_attr attr;
+@@ -168,6 +187,9 @@ struct ocrdma_dev {
+ struct mqe_ctx mqe_ctx;
+
+ struct be_dev_info nic_info;
++ struct phy_info phy;
++ char model_number[32];
++ u32 hba_port_num;
+
+ struct list_head entry;
+ struct rcu_head rcu;
+@@ -421,6 +443,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
+ }
+
+
++static inline char *hca_name(struct ocrdma_dev *dev)
++{
++ switch (dev->nic_info.pdev->device) {
++ case OC_SKH_DEVICE_PF:
++ case OC_SKH_DEVICE_VF:
++ return OC_NAME_SH;
++ default:
++ return OC_NAME_UNKNOWN;
++ }
++}
++
+ static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
+ int eqid)
+ {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 63e3747..f36aa5d 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -243,6 +243,23 @@ static int ocrdma_get_mbx_errno(u32 status)
+ return err_num;
+ }
+
++char *port_speed_string(struct ocrdma_dev *dev)
++{
++ char *str = "";
++ u16 speeds_supported;
++
++ speeds_supported = dev->phy.fixed_speeds_supported |
++ dev->phy.auto_speeds_supported;
++ if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
++ str = "40Gbps ";
++ else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
++ str = "10Gbps ";
++ else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
++ str = "1Gbps ";
++
++ return str;
++}
++
+ static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
+ {
+ int err_num = -EINVAL;
+@@ -332,6 +349,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
+ return mqe;
+ }
+
++static void *ocrdma_alloc_mqe(void)
++{
++ return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
++}
++
+ static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
+ {
+ dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
+@@ -1154,6 +1176,54 @@ mbx_err:
+ return status;
+ }
+
++int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
++{
++ int status = -ENOMEM;
++ struct ocrdma_dma_mem dma;
++ struct ocrdma_mqe *mqe;
++ struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
++ struct mgmt_hba_attribs *hba_attribs;
++
++ mqe = ocrdma_alloc_mqe();
++ if (!mqe)
++ return status;
++ memset(mqe, 0, sizeof(*mqe));
++
++ dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
++ dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
++ dma.size, &dma.pa, GFP_KERNEL);
++ if (!dma.va)
++ goto free_mqe;
++
++ mqe->hdr.pyld_len = dma.size;
++ mqe->hdr.spcl_sge_cnt_emb |=
++ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
++ OCRDMA_MQE_HDR_SGE_CNT_MASK;
++ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
++ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
++ mqe->u.nonemb_req.sge[0].len = dma.size;
++
++ memset(dma.va, 0, dma.size);
++ ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
++ OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
++ OCRDMA_SUBSYS_COMMON,
++ dma.size);
++
++ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
++ if (!status) {
++ ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
++ hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
++
++ dev->hba_port_num = hba_attribs->phy_port;
++ strncpy(dev->model_number,
++ hba_attribs->controller_model_number, 31);
++ }
++ dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
++free_mqe:
++ kfree(mqe);
++ return status;
++}
++
+ static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
+ {
+ int status = -ENOMEM;
+@@ -1201,6 +1271,35 @@ mbx_err:
+ return status;
+ }
+
++int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
++{
++ int status = -ENOMEM;
++ struct ocrdma_mqe *cmd;
++ struct ocrdma_get_phy_info_rsp *rsp;
++
++ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
++ if (!cmd)
++ return status;
++
++ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
++ OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
++ sizeof(*cmd));
++
++ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
++ if (status)
++ goto mbx_err;
++
++ rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
++ dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
++ dev->phy.auto_speeds_supported =
++ le16_to_cpu(rsp->auto_speeds_supported);
++ dev->phy.fixed_speeds_supported =
++ le16_to_cpu(rsp->fixed_speeds_supported);
++mbx_err:
++ kfree(cmd);
++ return status;
++}
++
+ int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+ {
+ int status = -ENOMEM;
+@@ -2589,6 +2688,13 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
+ status = ocrdma_mbx_create_ah_tbl(dev);
+ if (status)
+ goto conf_err;
++ status = ocrdma_mbx_get_phy_info(dev);
++ if (status)
++ goto conf_err;
++ status = ocrdma_mbx_get_ctrl_attribs(dev);
++ if (status)
++ goto conf_err;
++
+ return 0;
+
+ conf_err:
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+index 38102b3..3f8aa86 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+@@ -135,4 +135,5 @@ bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+ void ocrdma_flush_qp(struct ocrdma_qp *);
+ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+
++char *port_speed_string(struct ocrdma_dev *dev);
+ #endif /* __OCRDMA_HW_H__ */
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 5d30161..488a512 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -436,6 +436,12 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+ spin_lock(&ocrdma_devlist_lock);
+ list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
+ spin_unlock(&ocrdma_devlist_lock);
++ pr_info("%s %s: %s \"%s\" port %d\n",
++ dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
++ port_speed_string(dev), dev->model_number,
++ dev->hba_port_num);
++ pr_info("%s ocrdma%d driver loaded successfully\n",
++ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ return dev;
+
+ alloc_err:
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+index de4ebfc..9e72aec 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+@@ -82,12 +82,14 @@ enum {
+ OCRDMA_CMD_CREATE_CQ = 12,
+ OCRDMA_CMD_CREATE_EQ = 13,
+ OCRDMA_CMD_CREATE_MQ = 21,
++ OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
+ OCRDMA_CMD_GET_FW_VER = 35,
+ OCRDMA_CMD_DELETE_MQ = 53,
+ OCRDMA_CMD_DELETE_CQ = 54,
+ OCRDMA_CMD_DELETE_EQ = 55,
+ OCRDMA_CMD_GET_FW_CONFIG = 58,
+- OCRDMA_CMD_CREATE_MQ_EXT = 90
++ OCRDMA_CMD_CREATE_MQ_EXT = 90,
++ OCRDMA_CMD_PHY_DETAILS = 102
+ };
+
+ enum {
+@@ -578,6 +580,30 @@ enum {
+ OCRDMA_FN_MODE_RDMA = 0x4
+ };
+
++struct ocrdma_get_phy_info_rsp {
++ struct ocrdma_mqe_hdr hdr;
++ struct ocrdma_mbx_rsp rsp;
++
++ u16 phy_type;
++ u16 interface_type;
++ u32 misc_params;
++ u16 ext_phy_details;
++ u16 rsvd;
++ u16 auto_speeds_supported;
++ u16 fixed_speeds_supported;
++ u32 future_use[2];
++};
++
++enum {
++ OCRDMA_PHY_SPEED_ZERO = 0x0,
++ OCRDMA_PHY_SPEED_10MBPS = 0x1,
++ OCRDMA_PHY_SPEED_100MBPS = 0x2,
++ OCRDMA_PHY_SPEED_1GBPS = 0x4,
++ OCRDMA_PHY_SPEED_10GBPS = 0x8,
++ OCRDMA_PHY_SPEED_40GBPS = 0x20
++};
++
++
+ struct ocrdma_get_link_speed_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+@@ -1719,4 +1745,57 @@ struct ocrdma_av {
+ u32 valid;
+ } __packed;
+
++struct mgmt_hba_attribs {
++ u8 flashrom_version_string[32];
++ u8 manufacturer_name[32];
++ u32 supported_modes;
++ u32 rsvd0[3];
++ u8 ncsi_ver_string[12];
++ u32 default_extended_timeout;
++ u8 controller_model_number[32];
++ u8 controller_description[64];
++ u8 controller_serial_number[32];
++ u8 ip_version_string[32];
++ u8 firmware_version_string[32];
++ u8 bios_version_string[32];
++ u8 redboot_version_string[32];
++ u8 driver_version_string[32];
++ u8 fw_on_flash_version_string[32];
++ u32 functionalities_supported;
++ u16 max_cdblength;
++ u8 asic_revision;
++ u8 generational_guid[16];
++ u8 hba_port_count;
++ u16 default_link_down_timeout;
++ u8 iscsi_ver_min_max;
++ u8 multifunction_device;
++ u8 cache_valid;
++ u8 hba_status;
++ u8 max_domains_supported;
++ u8 phy_port;
++ u32 firmware_post_status;
++ u32 hba_mtu[8];
++ u32 rsvd1[4];
++};
++
++struct mgmt_controller_attrib {
++ struct mgmt_hba_attribs hba_attribs;
++ u16 pci_vendor_id;
++ u16 pci_device_id;
++ u16 pci_sub_vendor_id;
++ u16 pci_sub_system_id;
++ u8 pci_bus_number;
++ u8 pci_device_number;
++ u8 pci_function_number;
++ u8 interface_type;
++ u64 unique_identifier;
++ u32 rsvd0[5];
++};
++
++struct ocrdma_get_ctrl_attribs_rsp {
++ struct ocrdma_mbx_hdr hdr;
++ struct mgmt_controller_attrib ctrl_attribs;
++};
++
++
+ #endif /* __OCRDMA_SLI_H__ */
+--
+1.7.1
+
--- /dev/null
+From dec68de33a6a9cb72512bdbeba81850335dc1749 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Tue, 18 Feb 2014 16:44:08 +0530
+Subject: [PATCH 14/16] RDMA/ocrdma: Support for Skyhawk statistics
+
+Issue mailbox command to get the statistics counters
+from the skyhawk hardware
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/Makefile | 2 +-
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 28 ++
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 42 ++
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.h | 1 +
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 8 +
+ drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 152 +++++++
+ drivers/infiniband/hw/ocrdma/ocrdma_stats.c | 623 +++++++++++++++++++++++++++
+ drivers/infiniband/hw/ocrdma/ocrdma_stats.h | 54 +++
+ 8 files changed, 909 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+ create mode 100644 drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+
+diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
+index 06a5bed..d1bfd4f 100644
+--- a/drivers/infiniband/hw/ocrdma/Makefile
++++ b/drivers/infiniband/hw/ocrdma/Makefile
+@@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/emulex/benet
+
+ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
+
+-ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
++ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index 55eb0e6..15c8ee4 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -53,6 +53,8 @@
+
+ #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
++#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
++
+ struct ocrdma_dev_attr {
+ u8 fw_ver[32];
+ u32 vendor_id;
+@@ -136,6 +138,18 @@ struct mqe_ctx {
+ bool cmd_done;
+ };
+
++struct ocrdma_stats {
++ u8 type;
++ struct ocrdma_dev *dev;
++};
++
++struct stats_mem {
++ struct ocrdma_mqe mqe;
++ void *va;
++ dma_addr_t pa;
++ u32 size;
++ char *debugfs_mem;
++};
+
+ struct phy_info {
+ u16 auto_speeds_supported;
+@@ -197,6 +211,20 @@ struct ocrdma_dev {
+ u64 stag_arr[OCRDMA_MAX_STAG];
+ u16 pvid;
+ u32 asic_id;
++
++ ulong last_stats_time;
++ struct mutex stats_lock; /* provide synch for debugfs operations */
++ struct stats_mem stats_mem;
++ struct ocrdma_stats rsrc_stats;
++ struct ocrdma_stats rx_stats;
++ struct ocrdma_stats wqe_stats;
++ struct ocrdma_stats tx_stats;
++ struct ocrdma_stats db_err_stats;
++ struct ocrdma_stats tx_qp_err_stats;
++ struct ocrdma_stats rx_qp_err_stats;
++ struct ocrdma_stats tx_dbg_stats;
++ struct ocrdma_stats rx_dbg_stats;
++ struct dentry *dir;
+ };
+
+ struct ocrdma_cq {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index f36aa5d..3642383 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -1176,6 +1176,48 @@ mbx_err:
+ return status;
+ }
+
++int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
++{
++ struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
++ struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
++ struct ocrdma_rdma_stats_resp *old_stats = NULL;
++ int status;
++
++ old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
++ if (old_stats == NULL)
++ return -ENOMEM;
++
++ memset(mqe, 0, sizeof(*mqe));
++ mqe->hdr.pyld_len = dev->stats_mem.size;
++ mqe->hdr.spcl_sge_cnt_emb |=
++ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
++ OCRDMA_MQE_HDR_SGE_CNT_MASK;
++ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
++ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
++ mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
++
++ /* Cache the old stats */
++ memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
++ memset(req, 0, dev->stats_mem.size);
++
++ ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
++ OCRDMA_CMD_GET_RDMA_STATS,
++ OCRDMA_SUBSYS_ROCE,
++ dev->stats_mem.size);
++ if (reset)
++ req->reset_stats = reset;
++
++ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
++ if (status)
++ /* Copy from cache, if mbox fails */
++ memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
++ else
++ ocrdma_le32_to_cpu(req, dev->stats_mem.size);
++
++ kfree(old_stats);
++ return status;
++}
++
+ int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
+ {
+ int status = -ENOMEM;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+index 3f8aa86..76ff06d 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+@@ -135,5 +135,6 @@ bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+ void ocrdma_flush_qp(struct ocrdma_qp *);
+ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+
++int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
+ char *port_speed_string(struct ocrdma_dev *dev);
+ #endif /* __OCRDMA_HW_H__ */
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 488a512..95b364d 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -39,6 +39,7 @@
+ #include "ocrdma_ah.h"
+ #include "be_roce.h"
+ #include "ocrdma_hw.h"
++#include "ocrdma_stats.h"
+ #include "ocrdma_abi.h"
+
+ MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
+@@ -436,6 +437,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+ spin_lock(&ocrdma_devlist_lock);
+ list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
+ spin_unlock(&ocrdma_devlist_lock);
++ /* Init stats */
++ ocrdma_add_port_stats(dev);
++
+ pr_info("%s %s: %s \"%s\" port %d\n",
+ dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
+ port_speed_string(dev), dev->model_number,
+@@ -473,6 +477,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
+ /* first unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
++ ocrdma_rem_port_stats(dev);
+ ib_unregister_device(&dev->ibdev);
+
+ spin_lock(&ocrdma_devlist_lock);
+@@ -561,6 +566,8 @@ static int __init ocrdma_init_module(void)
+ {
+ int status;
+
++ ocrdma_init_debugfs();
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
+ if (status)
+@@ -578,6 +585,7 @@ static void __exit ocrdma_exit_module(void)
+ {
+ be_roce_unregister_driver(&ocrdma_drv);
+ ocrdma_unregister_inet6addr_notifier();
++ ocrdma_rem_debugfs();
+ }
+
+ module_init(ocrdma_init_module);
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+index 9e72aec..6e048b7 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+@@ -72,6 +72,7 @@ enum {
+
+ OCRDMA_CMD_ATTACH_MCAST,
+ OCRDMA_CMD_DETACH_MCAST,
++ OCRDMA_CMD_GET_RDMA_STATS,
+
+ OCRDMA_CMD_MAX
+ };
+@@ -1745,6 +1746,157 @@ struct ocrdma_av {
+ u32 valid;
+ } __packed;
+
++struct ocrdma_rsrc_stats {
++ u32 dpp_pds;
++ u32 non_dpp_pds;
++ u32 rc_dpp_qps;
++ u32 uc_dpp_qps;
++ u32 ud_dpp_qps;
++ u32 rc_non_dpp_qps;
++ u32 rsvd;
++ u32 uc_non_dpp_qps;
++ u32 ud_non_dpp_qps;
++ u32 rsvd1;
++ u32 srqs;
++ u32 rbqs;
++ u32 r64K_nsmr;
++ u32 r64K_to_2M_nsmr;
++ u32 r2M_to_44M_nsmr;
++ u32 r44M_to_1G_nsmr;
++ u32 r1G_to_4G_nsmr;
++ u32 nsmr_count_4G_to_32G;
++ u32 r32G_to_64G_nsmr;
++ u32 r64G_to_128G_nsmr;
++ u32 r128G_to_higher_nsmr;
++ u32 embedded_nsmr;
++ u32 frmr;
++ u32 prefetch_qps;
++ u32 ondemand_qps;
++ u32 phy_mr;
++ u32 mw;
++ u32 rsvd2[7];
++};
++
++struct ocrdma_db_err_stats {
++ u32 sq_doorbell_errors;
++ u32 cq_doorbell_errors;
++ u32 rq_srq_doorbell_errors;
++ u32 cq_overflow_errors;
++ u32 rsvd[4];
++};
++
++struct ocrdma_wqe_stats {
++ u32 large_send_rc_wqes_lo;
++ u32 large_send_rc_wqes_hi;
++ u32 large_write_rc_wqes_lo;
++ u32 large_write_rc_wqes_hi;
++ u32 rsvd[4];
++ u32 read_wqes_lo;
++ u32 read_wqes_hi;
++ u32 frmr_wqes_lo;
++ u32 frmr_wqes_hi;
++ u32 mw_bind_wqes_lo;
++ u32 mw_bind_wqes_hi;
++ u32 invalidate_wqes_lo;
++ u32 invalidate_wqes_hi;
++ u32 rsvd1[2];
++ u32 dpp_wqe_drops;
++ u32 rsvd2[5];
++};
++
++struct ocrdma_tx_stats {
++ u32 send_pkts_lo;
++ u32 send_pkts_hi;
++ u32 write_pkts_lo;
++ u32 write_pkts_hi;
++ u32 read_pkts_lo;
++ u32 read_pkts_hi;
++ u32 read_rsp_pkts_lo;
++ u32 read_rsp_pkts_hi;
++ u32 ack_pkts_lo;
++ u32 ack_pkts_hi;
++ u32 send_bytes_lo;
++ u32 send_bytes_hi;
++ u32 write_bytes_lo;
++ u32 write_bytes_hi;
++ u32 read_req_bytes_lo;
++ u32 read_req_bytes_hi;
++ u32 read_rsp_bytes_lo;
++ u32 read_rsp_bytes_hi;
++ u32 ack_timeouts;
++ u32 rsvd[5];
++};
++
++
++struct ocrdma_tx_qp_err_stats {
++ u32 local_length_errors;
++ u32 local_protection_errors;
++ u32 local_qp_operation_errors;
++ u32 retry_count_exceeded_errors;
++ u32 rnr_retry_count_exceeded_errors;
++ u32 rsvd[3];
++};
++
++struct ocrdma_rx_stats {
++ u32 roce_frame_bytes_lo;
++ u32 roce_frame_bytes_hi;
++ u32 roce_frame_icrc_drops;
++ u32 roce_frame_payload_len_drops;
++ u32 ud_drops;
++ u32 qp1_drops;
++ u32 psn_error_request_packets;
++ u32 psn_error_resp_packets;
++ u32 rnr_nak_timeouts;
++ u32 rnr_nak_receives;
++ u32 roce_frame_rxmt_drops;
++ u32 nak_count_psn_sequence_errors;
++ u32 rc_drop_count_lookup_errors;
++ u32 rq_rnr_naks;
++ u32 srq_rnr_naks;
++ u32 roce_frames_lo;
++ u32 roce_frames_hi;
++ u32 rsvd;
++};
++
++struct ocrdma_rx_qp_err_stats {
++ u32 nak_invalid_requst_errors;
++ u32 nak_remote_operation_errors;
++ u32 nak_count_remote_access_errors;
++ u32 local_length_errors;
++ u32 local_protection_errors;
++ u32 local_qp_operation_errors;
++ u32 rsvd[2];
++};
++
++struct ocrdma_tx_dbg_stats {
++ u32 data[100];
++};
++
++struct ocrdma_rx_dbg_stats {
++ u32 data[200];
++};
++
++struct ocrdma_rdma_stats_req {
++ struct ocrdma_mbx_hdr hdr;
++ u8 reset_stats;
++ u8 rsvd[3];
++} __packed;
++
++struct ocrdma_rdma_stats_resp {
++ struct ocrdma_mbx_hdr hdr;
++ struct ocrdma_rsrc_stats act_rsrc_stats;
++ struct ocrdma_rsrc_stats th_rsrc_stats;
++ struct ocrdma_db_err_stats db_err_stats;
++ struct ocrdma_wqe_stats wqe_stats;
++ struct ocrdma_tx_stats tx_stats;
++ struct ocrdma_tx_qp_err_stats tx_qp_err_stats;
++ struct ocrdma_rx_stats rx_stats;
++ struct ocrdma_rx_qp_err_stats rx_qp_err_stats;
++ struct ocrdma_tx_dbg_stats tx_dbg_stats;
++ struct ocrdma_rx_dbg_stats rx_dbg_stats;
++} __packed;
++
++
+ struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+new file mode 100644
+index 0000000..6b3852a
+--- /dev/null
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+@@ -0,0 +1,623 @@
++/*******************************************************************
++ * This file is part of the Emulex RoCE Device Driver for *
++ * RoCE (RDMA over Converged Ethernet) adapters. *
++ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *
++ * Contact Information:
++ * linux-drivers@emulex.com
++ *
++ * Emulex
++ * 3333 Susan Street
++ * Costa Mesa, CA 92626
++ *******************************************************************/
++
++#include <rdma/ib_addr.h>
++#include "ocrdma_stats.h"
++
++static struct dentry *ocrdma_dbgfs_dir;
++
++static int ocrdma_add_stat(char *start, char *pcur,
++ char *name, u64 count)
++{
++ char buff[128] = {0};
++ int cpy_len = 0;
++
++ snprintf(buff, 128, "%s: %llu\n", name, count);
++ cpy_len = strlen(buff);
++
++ if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
++ pr_err("%s: No space in stats buff\n", __func__);
++ return 0;
++ }
++
++ memcpy(pcur, buff, cpy_len);
++ return cpy_len;
++}
++
++static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
++{
++ struct stats_mem *mem = &dev->stats_mem;
++
++ /* Alloc mbox command mem*/
++ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
++ sizeof(struct ocrdma_rdma_stats_resp));
++
++ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
++ &mem->pa, GFP_KERNEL);
++ if (!mem->va) {
++ pr_err("%s: stats mbox allocation failed\n", __func__);
++ return false;
++ }
++
++ memset(mem->va, 0, mem->size);
++
++ /* Alloc debugfs mem */
++ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
++ if (!mem->debugfs_mem) {
++ pr_err("%s: stats debugfs mem allocation failed\n", __func__);
++ return false;
++ }
++
++ return true;
++}
++
++static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
++{
++ struct stats_mem *mem = &dev->stats_mem;
++
++ if (mem->va)
++ dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
++ mem->va, mem->pa);
++ kfree(mem->debugfs_mem);
++}
++
++static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
++ (u64)rsrc_stats->dpp_pds);
++ pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
++ (u64)rsrc_stats->non_dpp_pds);
++ pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
++ (u64)rsrc_stats->rc_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
++ (u64)rsrc_stats->uc_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
++ (u64)rsrc_stats->ud_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
++ (u64)rsrc_stats->rc_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
++ (u64)rsrc_stats->uc_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
++ (u64)rsrc_stats->ud_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
++ (u64)rsrc_stats->srqs);
++ pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
++ (u64)rsrc_stats->rbqs);
++ pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
++ (u64)rsrc_stats->r64K_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
++ (u64)rsrc_stats->r64K_to_2M_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
++ (u64)rsrc_stats->r2M_to_44M_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
++ (u64)rsrc_stats->r44M_to_1G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
++ (u64)rsrc_stats->r1G_to_4G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
++ (u64)rsrc_stats->nsmr_count_4G_to_32G);
++ pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
++ (u64)rsrc_stats->r32G_to_64G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
++ (u64)rsrc_stats->r64G_to_128G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
++ (u64)rsrc_stats->r128G_to_higher_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
++ (u64)rsrc_stats->embedded_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
++ (u64)rsrc_stats->frmr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
++ (u64)rsrc_stats->prefetch_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
++ (u64)rsrc_stats->ondemand_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
++ (u64)rsrc_stats->phy_mr);
++ pcur += ocrdma_add_stat(stats, pcur, "active_mw",
++ (u64)rsrc_stats->mw);
++
++ /* Print the threshold stats */
++ rsrc_stats = &rdma_stats->th_rsrc_stats;
++
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
++ (u64)rsrc_stats->dpp_pds);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
++ (u64)rsrc_stats->non_dpp_pds);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
++ (u64)rsrc_stats->rc_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
++ (u64)rsrc_stats->uc_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
++ (u64)rsrc_stats->ud_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
++ (u64)rsrc_stats->rc_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
++ (u64)rsrc_stats->uc_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
++ (u64)rsrc_stats->ud_non_dpp_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
++ (u64)rsrc_stats->srqs);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
++ (u64)rsrc_stats->rbqs);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
++ (u64)rsrc_stats->r64K_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
++ (u64)rsrc_stats->r64K_to_2M_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
++ (u64)rsrc_stats->r2M_to_44M_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
++ (u64)rsrc_stats->r44M_to_1G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
++ (u64)rsrc_stats->r1G_to_4G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
++ (u64)rsrc_stats->nsmr_count_4G_to_32G);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
++ (u64)rsrc_stats->r32G_to_64G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
++ (u64)rsrc_stats->r64G_to_128G_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
++ (u64)rsrc_stats->r128G_to_higher_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
++ (u64)rsrc_stats->embedded_nsmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
++ (u64)rsrc_stats->frmr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
++ (u64)rsrc_stats->prefetch_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
++ (u64)rsrc_stats->ondemand_qps);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
++ (u64)rsrc_stats->phy_mr);
++ pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
++ (u64)rsrc_stats->mw);
++ return stats;
++}
++
++static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat
++ (stats, pcur, "roce_frame_bytes",
++ convert_to_64bit(rx_stats->roce_frame_bytes_lo,
++ rx_stats->roce_frame_bytes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
++ (u64)rx_stats->roce_frame_icrc_drops);
++ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
++ (u64)rx_stats->roce_frame_payload_len_drops);
++ pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
++ (u64)rx_stats->ud_drops);
++ pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
++ (u64)rx_stats->qp1_drops);
++ pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
++ (u64)rx_stats->psn_error_request_packets);
++ pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
++ (u64)rx_stats->psn_error_resp_packets);
++ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
++ (u64)rx_stats->rnr_nak_timeouts);
++ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
++ (u64)rx_stats->rnr_nak_receives);
++ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
++ (u64)rx_stats->roce_frame_rxmt_drops);
++ pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
++ (u64)rx_stats->nak_count_psn_sequence_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
++ (u64)rx_stats->rc_drop_count_lookup_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
++ (u64)rx_stats->rq_rnr_naks);
++ pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
++ (u64)rx_stats->srq_rnr_naks);
++ pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
++ convert_to_64bit(rx_stats->roce_frames_lo,
++ rx_stats->roce_frames_hi));
++
++ return stats;
++}
++
++static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
++ convert_to_64bit(tx_stats->send_pkts_lo,
++ tx_stats->send_pkts_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
++ convert_to_64bit(tx_stats->write_pkts_lo,
++ tx_stats->write_pkts_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
++ convert_to_64bit(tx_stats->read_pkts_lo,
++ tx_stats->read_pkts_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
++ convert_to_64bit(tx_stats->read_rsp_pkts_lo,
++ tx_stats->read_rsp_pkts_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
++ convert_to_64bit(tx_stats->ack_pkts_lo,
++ tx_stats->ack_pkts_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
++ convert_to_64bit(tx_stats->send_bytes_lo,
++ tx_stats->send_bytes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
++ convert_to_64bit(tx_stats->write_bytes_lo,
++ tx_stats->write_bytes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
++ convert_to_64bit(tx_stats->read_req_bytes_lo,
++ tx_stats->read_req_bytes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
++ convert_to_64bit(tx_stats->read_rsp_bytes_lo,
++ tx_stats->read_rsp_bytes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
++ (u64)tx_stats->ack_timeouts);
++
++ return stats;
++}
++
++static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
++ convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
++ wqe_stats->large_send_rc_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
++ convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
++ wqe_stats->large_write_rc_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
++ convert_to_64bit(wqe_stats->read_wqes_lo,
++ wqe_stats->read_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
++ convert_to_64bit(wqe_stats->frmr_wqes_lo,
++ wqe_stats->frmr_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
++ convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
++ wqe_stats->mw_bind_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
++ convert_to_64bit(wqe_stats->invalidate_wqes_lo,
++ wqe_stats->invalidate_wqes_hi));
++ pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
++ (u64)wqe_stats->dpp_wqe_drops);
++ return stats;
++}
++
++static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
++ (u64)db_err_stats->sq_doorbell_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
++ (u64)db_err_stats->cq_doorbell_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
++ (u64)db_err_stats->rq_srq_doorbell_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
++ (u64)db_err_stats->cq_overflow_errors);
++ return stats;
++}
++
++static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
++ &rdma_stats->rx_qp_err_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
++ (u64)rx_qp_err_stats->nak_invalid_requst_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
++ (u64)rx_qp_err_stats->nak_remote_operation_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
++ (u64)rx_qp_err_stats->nak_count_remote_access_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
++ (u64)rx_qp_err_stats->local_length_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
++ (u64)rx_qp_err_stats->local_protection_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
++ (u64)rx_qp_err_stats->local_qp_operation_errors);
++ return stats;
++}
++
++static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
++{
++ char *stats = dev->stats_mem.debugfs_mem, *pcur;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
++ &rdma_stats->tx_qp_err_stats;
++
++ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ pcur = stats;
++ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
++ (u64)tx_qp_err_stats->local_length_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
++ (u64)tx_qp_err_stats->local_protection_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
++ (u64)tx_qp_err_stats->local_qp_operation_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
++ (u64)tx_qp_err_stats->retry_count_exceeded_errors);
++ pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
++ (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
++ return stats;
++}
++
++static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
++{
++ int i;
++ char *pstats = dev->stats_mem.debugfs_mem;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_tx_dbg_stats *tx_dbg_stats =
++ &rdma_stats->tx_dbg_stats;
++
++ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ for (i = 0; i < 100; i++)
++ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
++ tx_dbg_stats->data[i]);
++
++ return dev->stats_mem.debugfs_mem;
++}
++
++static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
++{
++ int i;
++ char *pstats = dev->stats_mem.debugfs_mem;
++ struct ocrdma_rdma_stats_resp *rdma_stats =
++ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
++ struct ocrdma_rx_dbg_stats *rx_dbg_stats =
++ &rdma_stats->rx_dbg_stats;
++
++ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
++
++ for (i = 0; i < 200; i++)
++ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
++ rx_dbg_stats->data[i]);
++
++ return dev->stats_mem.debugfs_mem;
++}
++
++static void ocrdma_update_stats(struct ocrdma_dev *dev)
++{
++ ulong now = jiffies, secs;
++ int status = 0;
++
++ secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
++ if (secs) {
++ /* update */
++ status = ocrdma_mbx_rdma_stats(dev, false);
++ if (status)
++ pr_err("%s: stats mbox failed with status = %d\n",
++ __func__, status);
++ dev->last_stats_time = jiffies;
++ }
++}
++
++static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
++ size_t usr_buf_len, loff_t *ppos)
++{
++ struct ocrdma_stats *pstats = filp->private_data;
++ struct ocrdma_dev *dev = pstats->dev;
++ ssize_t status = 0;
++ char *data = NULL;
++
++ /* No partial reads */
++ if (*ppos != 0)
++ return 0;
++
++ mutex_lock(&dev->stats_lock);
++
++ ocrdma_update_stats(dev);
++
++ switch (pstats->type) {
++ case OCRDMA_RSRC_STATS:
++ data = ocrdma_resource_stats(dev);
++ break;
++ case OCRDMA_RXSTATS:
++ data = ocrdma_rx_stats(dev);
++ break;
++ case OCRDMA_WQESTATS:
++ data = ocrdma_wqe_stats(dev);
++ break;
++ case OCRDMA_TXSTATS:
++ data = ocrdma_tx_stats(dev);
++ break;
++ case OCRDMA_DB_ERRSTATS:
++ data = ocrdma_db_errstats(dev);
++ break;
++ case OCRDMA_RXQP_ERRSTATS:
++ data = ocrdma_rxqp_errstats(dev);
++ break;
++ case OCRDMA_TXQP_ERRSTATS:
++ data = ocrdma_txqp_errstats(dev);
++ break;
++ case OCRDMA_TX_DBG_STATS:
++ data = ocrdma_tx_dbg_stats(dev);
++ break;
++ case OCRDMA_RX_DBG_STATS:
++ data = ocrdma_rx_dbg_stats(dev);
++ break;
++
++ default:
++ status = -EFAULT;
++ goto exit;
++ }
++
++ if (usr_buf_len < strlen(data)) {
++ status = -ENOSPC;
++ goto exit;
++ }
++
++ status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
++ strlen(data));
++exit:
++ mutex_unlock(&dev->stats_lock);
++ return status;
++}
++
++int ocrdma_debugfs_open(struct inode *inode, struct file *file)
++{
++ if (inode->i_private)
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++static const struct file_operations ocrdma_dbg_ops = {
++ .owner = THIS_MODULE,
++ .open = ocrdma_debugfs_open,
++ .read = ocrdma_dbgfs_ops_read,
++};
++
++void ocrdma_add_port_stats(struct ocrdma_dev *dev)
++{
++ if (!ocrdma_dbgfs_dir)
++ return;
++
++ /* Create post stats base dir */
++ dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
++ if (!dev->dir)
++ goto err;
++
++ dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
++ dev->rsrc_stats.dev = dev;
++ if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
++ &dev->rsrc_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->rx_stats.type = OCRDMA_RXSTATS;
++ dev->rx_stats.dev = dev;
++ if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
++ &dev->rx_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->wqe_stats.type = OCRDMA_WQESTATS;
++ dev->wqe_stats.dev = dev;
++ if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
++ &dev->wqe_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->tx_stats.type = OCRDMA_TXSTATS;
++ dev->tx_stats.dev = dev;
++ if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
++ &dev->tx_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
++ dev->db_err_stats.dev = dev;
++ if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
++ &dev->db_err_stats, &ocrdma_dbg_ops))
++ goto err;
++
++
++ dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
++ dev->tx_qp_err_stats.dev = dev;
++ if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
++ &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
++ dev->rx_qp_err_stats.dev = dev;
++ if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
++ &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
++ goto err;
++
++
++ dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
++ dev->tx_dbg_stats.dev = dev;
++ if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
++ &dev->tx_dbg_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
++ dev->rx_dbg_stats.dev = dev;
++ if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
++ &dev->rx_dbg_stats, &ocrdma_dbg_ops))
++ goto err;
++
++ /* Now create dma_mem for stats mbx command */
++ if (!ocrdma_alloc_stats_mem(dev))
++ goto err;
++
++ mutex_init(&dev->stats_lock);
++
++ return;
++err:
++ ocrdma_release_stats_mem(dev);
++ debugfs_remove_recursive(dev->dir);
++ dev->dir = NULL;
++}
++
++void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
++{
++ if (!dev->dir)
++ return;
++ mutex_destroy(&dev->stats_lock);
++ ocrdma_release_stats_mem(dev);
++ debugfs_remove(dev->dir);
++}
++
++void ocrdma_init_debugfs(void)
++{
++ /* Create base dir in debugfs root dir */
++ ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
++}
++
++void ocrdma_rem_debugfs(void)
++{
++ debugfs_remove_recursive(ocrdma_dbgfs_dir);
++}
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+new file mode 100644
+index 0000000..5f5e20c
+--- /dev/null
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+@@ -0,0 +1,54 @@
++/*******************************************************************
++ * This file is part of the Emulex RoCE Device Driver for *
++ * RoCE (RDMA over Converged Ethernet) adapters. *
++ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *
++ * Contact Information:
++ * linux-drivers@emulex.com
++ *
++ * Emulex
++ * 3333 Susan Street
++ * Costa Mesa, CA 92626
++ *******************************************************************/
++
++#ifndef __OCRDMA_STATS_H__
++#define __OCRDMA_STATS_H__
++
++#include <linux/debugfs.h>
++#include "ocrdma.h"
++#include "ocrdma_hw.h"
++
++#define OCRDMA_MAX_DBGFS_MEM 4096
++
++enum OCRDMA_STATS_TYPE {
++ OCRDMA_RSRC_STATS,
++ OCRDMA_RXSTATS,
++ OCRDMA_WQESTATS,
++ OCRDMA_TXSTATS,
++ OCRDMA_DB_ERRSTATS,
++ OCRDMA_RXQP_ERRSTATS,
++ OCRDMA_TXQP_ERRSTATS,
++ OCRDMA_TX_DBG_STATS,
++ OCRDMA_RX_DBG_STATS
++};
++
++void ocrdma_rem_debugfs(void);
++void ocrdma_init_debugfs(void);
++void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
++void ocrdma_add_port_stats(struct ocrdma_dev *dev);
++
++#endif /* __OCRDMA_STATS_H__ */
+--
+1.7.1
+
--- /dev/null
+From 7e7996497f57550a5d0b100eff66cf668e22f3e6 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Tue, 18 Feb 2014 16:49:54 +0530
+Subject: [PATCH 15/16] RDMA/ocrdma: Display fw version
+
+Adding a sysfs file for getting the FW version.
+
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 41 +++++++++++++++++++++++++++-
+ 1 files changed, 40 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 95b364d..72389f6 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -399,9 +399,42 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
+ kfree(dev->sgid_tbl);
+ }
+
++/* OCRDMA sysfs interface */
++static ssize_t show_rev(struct device *device, struct device_attribute *attr,
++ char *buf)
++{
++ struct ocrdma_dev *dev = dev_get_drvdata(device);
++
++ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
++}
++
++static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
++ char *buf)
++{
++ struct ocrdma_dev *dev = dev_get_drvdata(device);
++
++ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
++}
++
++static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
++static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
++
++static struct device_attribute *ocrdma_attributes[] = {
++ &dev_attr_hw_rev,
++ &dev_attr_fw_ver
++};
++
++static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
++ device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
++}
++
+ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+ {
+- int status = 0;
++ int status = 0, i;
+ struct ocrdma_dev *dev;
+
+ dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
+@@ -434,6 +467,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+ if (status)
+ goto alloc_err;
+
++ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
++ if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
++ goto sysfs_err;
+ spin_lock(&ocrdma_devlist_lock);
+ list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
+ spin_unlock(&ocrdma_devlist_lock);
+@@ -448,6 +484,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ return dev;
+
++sysfs_err:
++ ocrdma_remove_sysfiles(dev);
+ alloc_err:
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+@@ -477,6 +515,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
+ /* first unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
++ ocrdma_remove_sysfiles(dev);
+ ocrdma_rem_port_stats(dev);
+ ib_unregister_device(&dev->ibdev);
+
+--
+1.7.1
+
--- /dev/null
+From 50e79c44289926c546c5d3467e8ad268b6e66d78 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <Devesh.Sharma@Emulex.Com>
+Date: Mon, 3 Feb 2014 19:03:44 +0530
+Subject: [PATCH 16/16] RDMA/ocrdma: code clean-up
+
+Driver code is cleaned up and couple of cosmetic changes are introduced.
+also modifying GSI QP to error during ocrdma_close is fixed.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/infiniband/hw/ocrdma/ocrdma.h | 3 +--
+ drivers/infiniband/hw/ocrdma/ocrdma_abi.h | 4 +---
+ drivers/infiniband/hw/ocrdma/ocrdma_ah.c | 2 +-
+ drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 20 ++++++++------------
+ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 2 +-
+ drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 16 ++++++++--------
+ drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 22 +++-------------------
+ 7 files changed, 23 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
+index 15c8ee4..ad9a227 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
+@@ -35,6 +35,7 @@
+
+ #include <rdma/ib_verbs.h>
+ #include <rdma/ib_user_verbs.h>
++#include <rdma/ib_addr.h>
+
+ #include <be_roce.h>
+ #include "ocrdma_sli.h"
+@@ -261,7 +262,6 @@ struct ocrdma_cq {
+
+ struct ocrdma_pd {
+ struct ib_pd ibpd;
+- struct ocrdma_dev *dev;
+ struct ocrdma_ucontext *uctx;
+ u32 id;
+ int num_dpp_qp;
+@@ -346,7 +346,6 @@ struct ocrdma_qp {
+ bool dpp_enabled;
+ u8 *ird_q_va;
+ bool signaled;
+- u16 db_cache;
+ };
+
+ struct ocrdma_hw_mr {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+index 5a82ce5..1554cca 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+@@ -108,9 +108,7 @@ struct ocrdma_create_qp_uresp {
+ u32 db_sq_offset;
+ u32 db_rq_offset;
+ u32 db_shift;
+- u64 rsvd1;
+- u64 rsvd2;
+- u64 rsvd3;
++ u64 rsvd[11];
+ } __packed;
+
+ struct ocrdma_create_srq_uresp {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+index 69da5dd..a507972 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+@@ -99,7 +99,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+ if (!(attr->ah_flags & IB_AH_GRH))
+ return ERR_PTR(-EINVAL);
+
+- ah = kzalloc(sizeof *ah, GFP_ATOMIC);
++ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 3642383..ef630b0 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -32,7 +32,6 @@
+
+ #include <rdma/ib_verbs.h>
+ #include <rdma/ib_user_verbs.h>
+-#include <rdma/ib_addr.h>
+
+ #include "ocrdma.h"
+ #include "ocrdma_hw.h"
+@@ -386,8 +385,8 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
+ }
+ }
+
+-static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
+- int queue_type)
++static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
++ struct ocrdma_queue_info *q, int queue_type)
+ {
+ u8 opcode = 0;
+ int status;
+@@ -778,7 +777,6 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
+ }
+ }
+
+-
+ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
+ {
+ /* async CQE processing */
+@@ -825,8 +823,6 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
+ ocrdma_process_acqe(dev, cqe);
+ else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
+ ocrdma_process_mcqe(dev, cqe);
+- else
+- pr_err("%s() cqe->compl is not set.\n", __func__);
+ memset(cqe, 0, sizeof(struct ocrdma_mcqe));
+ ocrdma_mcq_inc_tail(dev);
+ }
+@@ -1050,6 +1046,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ attr->max_qp =
+ (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
++ attr->max_srq =
++ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
++ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
+ attr->max_send_sge = ((rsp->max_write_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
+@@ -1065,9 +1064,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
+- attr->max_srq =
+- (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+- OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
+ attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
+@@ -1411,7 +1407,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
+
+ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
+ {
+- int i ;
++ int i;
+ int status = 0;
+ int max_ah;
+ struct ocrdma_create_ah_tbl *cmd;
+@@ -2296,7 +2292,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
+ memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
+ sizeof(cmd->params.dgid));
+ status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+- ah_attr->grh.sgid_index, &sgid);
++ ah_attr->grh.sgid_index, &sgid);
+ if (status)
+ return status;
+
+@@ -2685,7 +2681,7 @@ static int ocrdma_create_eqs(struct ocrdma_dev *dev)
+
+ for (i = 0; i < num_eq; i++) {
+ status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
+- OCRDMA_EQ_LEN);
++ OCRDMA_EQ_LEN);
+ if (status) {
+ status = -EINVAL;
+ break;
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+index 72389f6..61836b2 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+@@ -550,7 +550,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
+ cur_qp = dev->qp_tbl;
+ for (i = 0; i < OCRDMA_MAX_QP; i++) {
+ qp = cur_qp[i];
+- if (qp) {
++ if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
+ /* change the QP state to ERROR */
+ _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+index 6e048b7..96c9ee6 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+@@ -152,11 +152,10 @@ enum {
+ #define OCRDMA_MIN_Q_PAGE_SIZE (4096)
+ #define OCRDMA_MAX_Q_PAGES (8)
+
+-#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
+-#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
+-#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
+-#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
+-
++#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
++#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
++#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
++#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
+ /*
+ # 0: 4K Bytes
+ # 1: 8K Bytes
+@@ -633,7 +632,7 @@ enum {
+
+ enum {
+ OCRDMA_CREATE_CQ_VER2 = 2,
+- OCRDMA_CREATE_CQ_VER3 = 3,
++ OCRDMA_CREATE_CQ_VER3 = 3,
+
+ OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
+@@ -1093,6 +1092,7 @@ enum {
+ OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF <<
+ OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT
+ };
++
+ struct ocrdma_modify_qp_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+@@ -1105,8 +1105,8 @@ struct ocrdma_query_qp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+-#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
+-#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
++#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
++#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
+ u32 qp_id;
+ };
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 635a757..ce88c0b 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
+
+ dev = get_ocrdma_dev(ibdev);
+ memset(sgid, 0, sizeof(*sgid));
+- if (index >= OCRDMA_MAX_SGID)
++ if (index > OCRDMA_MAX_SGID)
+ return -EINVAL;
+
+ memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
+@@ -144,7 +144,6 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
+ }
+ }
+
+-
+ int ocrdma_query_port(struct ib_device *ibdev,
+ u8 port, struct ib_port_attr *props)
+ {
+@@ -1210,7 +1209,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ }
+
+-
+ static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+ {
+@@ -1296,17 +1294,6 @@ gen_err:
+ return ERR_PTR(status);
+ }
+
+-
+-static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
+-{
+- if (qp->db_cache) {
+- u32 val = qp->rq.dbid | (qp->db_cache <<
+- OCRDMA_DB_RQ_SHIFT);
+- iowrite32(val, qp->rq_db);
+- qp->db_cache = 0;
+- }
+-}
+-
+ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask)
+ {
+@@ -1325,9 +1312,6 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ if (status < 0)
+ return status;
+ status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+- if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
+- ocrdma_flush_rq_db(qp);
+-
+ return status;
+ }
+
+@@ -2043,7 +2027,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
+ fast_reg->size_sge =
+ get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
+- mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) &
++ mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
+ (OCRDMA_MAX_STAG - 1)];
+ build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
+ return 0;
+@@ -2878,7 +2862,7 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
+ goto mbx_err;
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+- dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
++ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64)mr;
+ return &mr->ibmr;
+ mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+--
+1.7.1
+
--- /dev/null
+From 9b13a993e13fc662fd44130093cf116114bae539 Mon Sep 17 00:00:00 2001
+From: Devesh Sharma <devesh.sharma@emulex.com>
+Date: Thu, 30 Jan 2014 10:00:37 +0530
+Subject: [PATCH] be2net: adding abi version between be2net and ocrdma
+
+This patch adds abi versioning between be2net and ocrdma
+driver modules. This is to catch the functional incompatibilities
+in the two drivers.
+
+Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
+---
+ drivers/net/ethernet/emulex/benet/be_roce.c | 6 ++++++
+ drivers/net/ethernet/emulex/benet/be_roce.h | 3 +++
+ 2 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
+index 9cd5415..aa7f943 100644
+--- a/drivers/net/ethernet/emulex/benet/be_roce.c
++++ b/drivers/net/ethernet/emulex/benet/be_roce.c
+@@ -35,6 +35,12 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
+
+ if (!ocrdma_drv)
+ return;
++
++ if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) {
++ dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n");
++ return;
++ }
++
+ if (pdev->device == OC_DEVICE_ID5) {
+ /* only msix is supported on these devices */
+ if (!msix_enabled(adapter))
+diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
+index 2cd1129..1bfb161 100644
+--- a/drivers/net/ethernet/emulex/benet/be_roce.h
++++ b/drivers/net/ethernet/emulex/benet/be_roce.h
+@@ -21,6 +21,8 @@
+ #include <linux/pci.h>
+ #include <linux/netdevice.h>
+
++#define BE_ROCE_ABI_VERSION 1
++
+ struct ocrdma_dev;
+
+ enum be_interrupt_mode {
+@@ -52,6 +54,7 @@ struct be_dev_info {
+ /* ocrdma driver register's the callback functions with nic driver. */
+ struct ocrdma_driver {
+ unsigned char name[32];
++ u32 be_abi_version;
+ struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);
+ void (*remove) (struct ocrdma_dev *);
+ void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
+--
+1.7.1
+