#define DRV_NAME "mthca"
#define PFX DRV_NAME ": "
#define DRV_VERSION "1.0.0000.506"
-#define DRV_RELDATE "09/19/2006"\r
+#define DRV_RELDATE "09/19/2006"
#define HZ 1000000 /* 1 sec in usecs */
struct _ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
- enum ib_event_type event_type);
+ enum ib_event_type event_type, u8 vendor_code);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
} cmd;\r
struct {\r
__be32 qpn;\r
+ u32 reserved1;\r
+ u32 reserved2;\r
+ u8 reserved3[1];\r
+ u8 vendor_code;\r
+ u8 reserved4[2];\r
} qp;\r
struct { \r
__be32 srqn; \r
\r
case MTHCA_EVENT_TYPE_PATH_MIG:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_PATH_MIG);\r
+ IB_EVENT_PATH_MIG, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_COMM_EST:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_COMM_EST);\r
+ IB_EVENT_COMM_EST, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_SQ_DRAINED:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_SQ_DRAINED);\r
+ IB_EVENT_SQ_DRAINED, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_QP_LAST_WQE_REACHED);\r
+ IB_EVENT_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_SRQ_LIMIT:\r
\r
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_QP_FATAL);\r
+ IB_EVENT_QP_FATAL, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_PATH_MIG_ERR);\r
+ IB_EVENT_PATH_MIG_ERR, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_QP_REQ_ERR);\r
+ IB_EVENT_QP_REQ_ERR, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_QP_ACCESS_ERR);\r
+ IB_EVENT_QP_ACCESS_ERR, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_CMD:\r
}\r
\r
\r
+\r
+\r
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
- enum ib_event_type event_type)
+ enum ib_event_type event_type, u8 vendor_code)
{
struct mthca_qp *qp;
struct ib_event event;
spin_unlock(&lh);
if (!qp) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_QP ,("QP %06x Async event for bogus \n", qpn));
+ HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_QP,("QP %06x Async event for bogus \n", qpn));
return;
}
event.device = &dev->ib_dev;
event.event = event_type;
event.element.qp = &qp->ibqp;
+ event.vendor_specific = vendor_code;
+ HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_QP,("QP %06x Async event event_type 0x%x vendor_code 0x%x\n",
+ qpn,event_type,vendor_code));
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
mthca_alloc_cleanup(&dev->qp_table.alloc);
}
+
+