return IB_INVALID_PD_HANDLE;\r
}\r
\r
+ if( !p_qp_create->rq_depth && !p_qp_create->sq_depth )\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+ return IB_INVALID_MAX_WRS;\r
+ }\r
+ if( !p_qp_create->sq_sge && !p_qp_create->sq_sge)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") );\r
+ return IB_INVALID_MAX_SGE;\r
+\r
+ }\r
+ if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr)\r
+ {\r
+ if ((p_qp_create->rq_depth > h_pd->obj.p_ci_ca->p_pnp_attr->max_wrs) ||\r
+ (p_qp_create->sq_depth > h_pd->obj.p_ci_ca->p_pnp_attr->max_wrs))\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+ return IB_INVALID_MAX_WRS;\r
+ }\r
+ if ((p_qp_create->rq_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_sges) ||\r
+ (p_qp_create->sq_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_sges))\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") );\r
+ return IB_INVALID_MAX_SGE;\r
+ }\r
+ }\r
status = create_qp(\r
h_pd, p_qp_create, qp_context, pfn_qp_event_cb, ph_qp, NULL );\r
\r
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
int mthca_arbel_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_arbel_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe);
if (mthca_is_memfree(dev)) {
dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
- dev->ib_dev.post_send = mthca_arbel_post_send;
- dev->ib_dev.post_recv = mthca_arbel_post_receive;
+ dev->ib_dev.post_send = mthca_arbel_post_send;
+ dev->ib_dev.post_recv = mthca_arbel_post_recv;
} else {
dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
- dev->ib_dev.post_send = mthca_tavor_post_send;
- dev->ib_dev.post_recv = mthca_tavor_post_receive;
+ dev->ib_dev.post_send = mthca_tavor_post_send;
+ dev->ib_dev.post_recv = mthca_tavor_post_recv;
}
KeInitializeMutex(&dev->cap_mask_mutex, 0);
return err;
}
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
return err;
}
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_arbel_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr)
{
struct mthca_qp *qp = to_mqp(ibqp);
struct ibv_create_cq_resp *p_resp;\r
struct ibv_cq *ibv_cq;\r
mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca);\r
- mlnx_ual_cq_info_t *p_new_cq = NULL;\r
+\r
\r
UVP_ENTER(UVP_DBG_CQ);\r
\r
goto err_create_cq;\r
}\r
\r
- /* allocate cq */\r
- p_new_cq = (mlnx_ual_cq_info_t *)cl_zalloc( sizeof(mlnx_ual_cq_info_t) );\r
- if( !p_new_cq ) {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_memory;\r
- }\r
-\r
- /* return results */\r
- p_new_cq->ibv_cq = ibv_cq;\r
- p_new_cq->p_hobul = p_hobul;\r
- p_new_cq->cq_size = size;\r
- *ph_uvp_cq = (ib_cq_handle_t)p_new_cq;\r
+ *ph_uvp_cq = (ib_cq_handle_t)ibv_cq;\r
}\r
goto end;\r
- \r
-err_memory: \r
+\r
p_hobul->ibv_ctx->ops.destroy_cq(ibv_cq);\r
err_create_cq:\r
end: \r
OUT uint32_t* const p_size,\r
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
- mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq);\r
+ struct ibv_cq *ibv_cq = (struct ibv_cq *)h_uvp_cq;\r
\r
UVP_ENTER(UVP_DBG_CQ);\r
\r
- *p_size = p_cq_info->cq_size;\r
+ *p_size = ibv_cq->cqe;\r
\r
UVP_EXIT(UVP_DBG_CQ);\r
return IB_VERBS_PROCESSING_DONE;\r
IN ib_api_status_t ioctl_status)\r
{\r
int err;\r
- mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *) ((void*)h_uvp_cq);\r
+ struct ibv_cq *ibv_cq = (struct ibv_cq *)h_uvp_cq;\r
UNREFERENCED_PARAMETER(ioctl_status);\r
\r
UVP_ENTER(UVP_DBG_CQ);\r
\r
- CL_ASSERT(p_cq_info || p_cq_info->ibv_cq);\r
+ CL_ASSERT(ibv_cq);\r
\r
if (IB_SUCCESS == ioctl_status) {\r
- err = p_cq_info->p_hobul->ibv_ctx->ops.destroy_cq( p_cq_info->ibv_cq );\r
+ err = ibv_cq->context->ops.destroy_cq( ibv_cq );\r
if (err) \r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ, ("mthca_destroy_cq failed (%d)\n", err));\r
- cl_free (p_cq_info);\r
+ //cl_free (p_cq_info);\r
}\r
\r
UVP_EXIT(UVP_DBG_CQ);\r
mlnx_ual_hobul_t *p_hobul;\r
} mlnx_ual_pd_info_t;\r
\r
-\r
-typedef struct _ib_cq\r
-{\r
- struct ibv_cq *ibv_cq;\r
- mlnx_ual_hobul_t *p_hobul; \r
- uint32_t cq_size;\r
-} mlnx_ual_cq_info_t;\r
-\r
-\r
-typedef struct _ib_qp\r
-{\r
- struct ibv_qp *ibv_qp;\r
- mlnx_ual_pd_info_t *h_uvp_pd; \r
-} mlnx_ual_qp_info_t;\r
-\r
-\r
typedef struct _ib_mw\r
{\r
ib_pd_handle_t h_uvp_pd; \r
{\r
int err;\r
ib_api_status_t status = IB_SUCCESS;\r
- mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp);\r
- mlnx_ual_hobul_t *p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+ struct mthca_qp *qp = (struct mthca_qp *) ((void*)h_qp);\r
\r
- UVP_ENTER(UVP_DBG_QP);\r
+ UVP_ENTER(UVP_DBG_QP);\r
\r
- CL_ASSERT (p_qp_info || p_qp_info->h_uvp_pd || p_qp_info->h_uvp_pd->p_hobul);\r
- p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+ CL_ASSERT (qp);\r
\r
CL_ASSERT( p_send_wr );\r
\r
- err = p_hobul->ibv_ctx->ops.post_send(p_qp_info->ibv_qp, p_send_wr, pp_send_failure );\r
+ err = qp->ibv_qp.context->ops.post_send(&qp->ibv_qp, p_send_wr, pp_send_failure );\r
+\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_post_send failed (%d)\n", err));\r
if (err == -ENOMEM)\r
status = errno_to_iberr(err);\r
}\r
\r
- UVP_EXIT(UVP_DBG_QP);\r
- return status;\r
+ UVP_EXIT(UVP_DBG_QP);\r
+ return status;\r
}\r
\r
-\r
ib_api_status_t\r
-mlnx_post_srq_recv (\r
- IN const void* __ptr64 h_srq,\r
+mlnx_post_recv (\r
+ IN const void* __ptr64 h_qp,\r
IN ib_recv_wr_t* const p_recv_wr,\r
OUT ib_recv_wr_t** pp_recv_failure )\r
{\r
int err;\r
ib_api_status_t status = IB_SUCCESS;\r
- struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq);\r
+ struct mthca_qp *qp = (struct mthca_qp *) ((void*)h_qp);\r
\r
- UVP_ENTER(UVP_DBG_QP);\r
+ UVP_ENTER(UVP_DBG_QP);\r
\r
- CL_ASSERT (srq);\r
+ CL_ASSERT (qp);\r
\r
CL_ASSERT( p_recv_wr );\r
\r
- err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure );\r
+ err = qp->ibv_qp.context->ops.post_recv(&qp->ibv_qp, p_recv_wr, pp_recv_failure );\r
+\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err));\r
if (err == -ENOMEM)\r
status = errno_to_iberr(err);\r
}\r
\r
- UVP_EXIT(UVP_DBG_QP);\r
- return status;\r
+ UVP_EXIT(UVP_DBG_QP);\r
+ return status;\r
}\r
\r
\r
ib_api_status_t\r
-mlnx_post_recv (\r
- IN const void* __ptr64 h_qp,\r
+mlnx_post_srq_recv (\r
+ IN const void* __ptr64 h_srq,\r
IN ib_recv_wr_t* const p_recv_wr,\r
OUT ib_recv_wr_t** pp_recv_failure )\r
{\r
int err;\r
ib_api_status_t status = IB_SUCCESS;\r
- mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp);\r
- mlnx_ual_hobul_t *p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+ struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq);\r
\r
- UVP_ENTER(UVP_DBG_QP);\r
+ UVP_ENTER(UVP_DBG_QP);\r
\r
- CL_ASSERT (p_qp_info || p_qp_info->h_uvp_pd || p_qp_info->h_uvp_pd->p_hobul);\r
- p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+ CL_ASSERT (srq);\r
\r
CL_ASSERT( p_recv_wr );\r
\r
- err = p_hobul->ibv_ctx->ops.post_recv(p_qp_info->ibv_qp, p_recv_wr, pp_recv_failure );\r
+ err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure );\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err));\r
if (err == -ENOMEM)\r
status = errno_to_iberr(err);\r
}\r
\r
- UVP_EXIT(UVP_DBG_QP);\r
- return status;\r
+ UVP_EXIT(UVP_DBG_QP);\r
+ return status;\r
}\r
\r
\r
{\r
int err;\r
ib_api_status_t status = IB_SUCCESS;\r
- mlnx_ual_hobul_t *p_hobul;\r
- mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq);\r
+ struct mthca_cq *cq = (struct mthca_cq *) ((void*)h_cq);\r
\r
UVP_ENTER(UVP_DBG_CQ);\r
- CL_ASSERT (p_cq_info);\r
-\r
- p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul;\r
- CL_ASSERT (p_hobul);\r
+ CL_ASSERT (cq);\r
\r
if (!pp_free_wclist || !*pp_free_wclist || !pp_done_wclist)\r
{\r
goto err_invalid_params;\r
}\r
\r
- err = p_hobul->ibv_ctx->ops.poll_cq_list(p_cq_info->ibv_cq, pp_free_wclist, pp_done_wclist );\r
+ err = cq->ibv_cq.context->ops.poll_cq_list(&cq->ibv_cq, pp_free_wclist, pp_done_wclist );\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_poll_cq failed (%d)\n", err));\r
status = errno_to_iberr(err);\r
{\r
int err;\r
ib_api_status_t status = IB_SUCCESS;\r
- mlnx_ual_hobul_t *p_hobul;\r
- mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq);\r
+ struct mthca_cq *cq = (struct mthca_cq *) ((void*)h_cq);\r
\r
UVP_ENTER(UVP_DBG_CQ);\r
- CL_ASSERT (p_cq_info);\r
+ CL_ASSERT (cq);\r
\r
- p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul;\r
- CL_ASSERT (p_hobul);\r
-\r
- err = p_hobul->ibv_ctx->ops.req_notify_cq(p_cq_info->ibv_cq, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );\r
+ err = cq->ibv_cq.context->ops.req_notify_cq(&cq->ibv_cq, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_enable_cq_notify failed (%d)\n", err));\r
status = errno_to_iberr(err);\r
IN const uint32_t n_cqes )\r
{\r
// Not yet implemented\r
- ib_api_status_t status = IB_UNSUPPORTED;\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ ib_api_status_t status = IB_UNSUPPORTED;\r
+ UVP_ENTER(UVP_DBG_SHIM);\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mlnx_enable_ncomp_cq_notify is not implemented yet\n"));\r
UVP_EXIT(UVP_DBG_SHIM);\r
- return status;\r
+ return status;\r
}\r
\r
struct ibv_create_qp *p_create_qp;\r
ib_api_status_t status = IB_SUCCESS;\r
size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
- mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
- mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
- ib_ca_attr_t *p_hca_attr = p_hobul->p_hca_attr;\r
+ struct ibv_pd *ibv_pd = h_uvp_pd->ibv_pd;\r
\r
UVP_ENTER(UVP_DBG_QP);\r
\r
- /* sanity checks */\r
- if(p_create_attr->sq_depth > p_hca_attr->max_wrs ||p_create_attr->rq_depth > p_hca_attr->max_wrs )\r
- status = IB_INVALID_MAX_WRS;\r
- else \r
- if(p_create_attr->sq_sge> p_hca_attr->max_sges ||p_create_attr->rq_sge> p_hca_attr->max_sges )\r
- status = IB_INVALID_MAX_SGE;\r
- if (status)\r
- goto err_params;\r
-\r
CL_ASSERT(p_umv_buf);\r
\r
if( !p_umv_buf->p_inout_buf )\r
p_umv_buf->command = TRUE;\r
\r
/* convert attributes */\r
- attr.send_cq = p_create_attr->h_sq_cq->ibv_cq;\r
- attr.recv_cq = p_create_attr->h_rq_cq->ibv_cq;\r
+ attr.send_cq = (struct ibv_cq *)p_create_attr->h_sq_cq;\r
+ attr.recv_cq = (struct ibv_cq *)p_create_attr->h_rq_cq;\r
attr.srq = (struct ibv_srq*)p_create_attr->h_srq;\r
attr.cap.max_send_wr = p_create_attr->sq_depth;\r
attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
\r
/* allocate ibv_qp */\r
p_create_qp = (struct ibv_create_qp *)p_umv_buf->p_inout_buf;\r
- ibv_qp = p_hobul->ibv_ctx->ops.create_qp_pre(p_pd->ibv_pd, &attr, p_create_qp);\r
+ ibv_qp = ibv_pd->context->ops.create_qp_pre(ibv_pd, &attr, p_create_qp);\r
if (IS_ERR(ibv_qp)) {\r
err = PTR_ERR(ibv_qp);\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_create_qp_pre failed (%d)\n", err));\r
err_alloc_qp:\r
cl_free(p_umv_buf->p_inout_buf);\r
err_memory:\r
-err_params:\r
end:\r
UVP_EXIT(UVP_DBG_QP);\r
return status;\r
}\r
\r
void\r
- mlnx_post_create_qp (\r
- IN const ib_pd_handle_t h_uvp_pd,\r
- IN ib_api_status_t ioctl_status,\r
- OUT ib_qp_handle_t *ph_uvp_qp,\r
- IN ci_umv_buf_t *p_umv_buf )\r
+mlnx_post_create_qp (\r
+ IN const ib_pd_handle_t h_uvp_pd,\r
+ IN ib_api_status_t ioctl_status,\r
+ OUT ib_qp_handle_t *ph_uvp_qp,\r
+ IN ci_umv_buf_t *p_umv_buf )\r
{\r
int err;\r
struct ibv_qp *ibv_qp;\r
struct ibv_create_qp_resp *p_resp;\r
struct ibv_create_qp *p_create_qp;\r
ib_api_status_t status = IB_SUCCESS;\r
- mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
- mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
- mlnx_ual_qp_info_t *p_new_qp = NULL;\r
+ struct ibv_pd *ibv_pd = h_uvp_pd->ibv_pd;\r
\r
UVP_ENTER(UVP_DBG_QP);\r
\r
- CL_ASSERT(p_hobul);\r
+\r
CL_ASSERT(p_umv_buf);\r
p_resp = (struct ibv_create_qp_resp *)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status) {\r
\r
/* allocate ibv_qp */\r
- ibv_qp = p_hobul->ibv_ctx->ops.create_qp_post(p_pd->ibv_pd, p_resp);\r
+ ibv_qp = ibv_pd->context->ops.create_qp_post(ibv_pd, p_resp);\r
if (IS_ERR(ibv_qp)) {\r
err = PTR_ERR(ibv_qp);\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_create_qp_post failed (%d)\n", err));\r
goto err_create_cq;\r
}\r
\r
- /* allocate qp */\r
- p_new_qp = (mlnx_ual_qp_info_t *)cl_zalloc( sizeof(mlnx_ual_qp_info_t) );\r
- if( !p_new_qp ) {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_memory;\r
- }\r
-\r
- /* return results */\r
- p_new_qp->h_uvp_pd = p_pd;\r
- p_new_qp->ibv_qp = ibv_qp;\r
- *ph_uvp_qp = (ib_qp_handle_t)p_new_qp;\r
+ *ph_uvp_qp = (ib_qp_handle_t)ibv_qp;\r
}\r
goto end;\r
\r
-err_memory: \r
- p_hobul->ibv_ctx->ops.destroy_qp(ibv_qp);\r
+ ibv_pd->context->ops.destroy_qp(ibv_qp);\r
err_create_cq:\r
end: \r
if (p_resp)\r
\r
ib_api_status_t\r
mlnx_pre_modify_qp (\r
- IN const ib_qp_handle_t h_uvp_qp,\r
- IN const ib_qp_mod_t *p_modify_attr,\r
- IN OUT ci_umv_buf_t *p_umv_buf)\r
+ IN const ib_qp_handle_t h_uvp_qp,\r
+ IN const ib_qp_mod_t *p_modify_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
ib_api_status_t status = IB_SUCCESS;\r
- UNREFERENCED_PARAMETER(h_uvp_qp);\r
- UNREFERENCED_PARAMETER(p_modify_attr);\r
+ UNREFERENCED_PARAMETER(h_uvp_qp);\r
+ UNREFERENCED_PARAMETER(p_modify_attr);\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
\r
void\r
mlnx_post_modify_qp (\r
- IN const ib_qp_handle_t h_uvp_qp,\r
- IN ib_api_status_t ioctl_status,\r
- IN OUT ci_umv_buf_t *p_umv_buf)\r
+ IN const ib_qp_handle_t h_uvp_qp,\r
+ IN ib_api_status_t ioctl_status,\r
+ IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
int err;\r
- ib_api_status_t status;\r
+ ib_api_status_t status;\r
struct ibv_modify_qp_resp *p_resp; \r
struct ibv_qp_attr attr;\r
- mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+ struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
\r
- UVP_ENTER(UVP_DBG_SHIM);\r
- CL_ASSERT(p_umv_buf);\r
+ UVP_ENTER(UVP_DBG_SHIM);\r
+ CL_ASSERT(p_umv_buf);\r
\r
p_resp = (struct ibv_modify_qp_resp *)p_umv_buf->p_inout_buf;\r
\r
- if (IB_SUCCESS == ioctl_status) \r
- {\r
+ if (IB_SUCCESS == ioctl_status) \r
+ {\r
memset( &attr, 0, sizeof(attr));\r
attr.qp_state = p_resp->qp_state;\r
- if (p_qp_info->ibv_qp) {\r
- err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.modify_qp(\r
- h_uvp_qp->ibv_qp, &attr, p_resp->attr_mask);\r
+ if (ibv_qp) {\r
+ err = ibv_qp->context->ops.modify_qp( ibv_qp,\r
+ &attr, p_resp->attr_mask);\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
status = errno_to_iberr(err);\r
}\r
UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_SHIM ,\r
("Committed to modify QP to state %d\n", p_resp->qp_state));\r
- }\r
+ }\r
\r
\r
err_modify_qp:\r
- if (p_resp)\r
- cl_free (p_resp);\r
- UVP_EXIT(UVP_DBG_SHIM);\r
- return;\r
-}\r
+ if (p_resp)\r
+ cl_free (p_resp);\r
+ UVP_EXIT(UVP_DBG_SHIM);\r
+ return;\r
+ }\r
\r
\r
ib_api_status_t\r
mlnx_pre_query_qp (\r
- IN ib_qp_handle_t h_uvp_qp,\r
- IN OUT ci_umv_buf_t *p_umv_buf)\r
+ IN ib_qp_handle_t h_uvp_qp,\r
+ IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
- UNREFERENCED_PARAMETER(h_uvp_qp);\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UNREFERENCED_PARAMETER(h_uvp_qp);\r
+ UVP_ENTER(UVP_DBG_SHIM);\r
p_umv_buf->input_size = p_umv_buf->output_size = 0;\r
p_umv_buf->command = FALSE;\r
p_umv_buf->status = IB_SUCCESS;\r
- UVP_EXIT(UVP_DBG_SHIM);\r
- return IB_SUCCESS;\r
+ UVP_EXIT(UVP_DBG_SHIM);\r
+ return IB_SUCCESS;\r
}\r
\r
\r
IN OUT ib_qp_attr_t *p_query_attr,\r
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
- struct mthca_qp *p_qp_info = (struct mthca_qp *)h_uvp_qp->ibv_qp;\r
+ struct mthca_qp *p_mthca_qp = (struct mthca_qp *)h_uvp_qp;\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
UNREFERENCED_PARAMETER(p_umv_buf);\r
if(IB_SUCCESS == ioctl_status)\r
{\r
- p_query_attr->sq_max_inline = p_qp_info->max_inline_data;\r
- p_query_attr->sq_sge = p_qp_info->sq.max_gs;\r
- p_query_attr->sq_depth = p_qp_info->sq.max;\r
- p_query_attr->rq_sge = p_qp_info->rq.max_gs;\r
- p_query_attr->rq_depth = p_qp_info->rq.max;\r
+ p_query_attr->sq_max_inline = p_mthca_qp->max_inline_data;\r
+ p_query_attr->sq_sge = p_mthca_qp->sq.max_gs;\r
+ p_query_attr->sq_depth = p_mthca_qp->sq.max;\r
+ p_query_attr->rq_sge = p_mthca_qp->rq.max_gs;\r
+ p_query_attr->rq_depth = p_mthca_qp->rq.max;\r
}\r
UVP_EXIT(UVP_DBG_SHIM);\r
}\r
IN const ib_qp_handle_t h_uvp_qp)\r
{\r
int err;\r
- mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
- mthca_destroy_qp_pre(p_qp_info->ibv_qp);\r
+ mthca_destroy_qp_pre((struct ibv_qp*)h_uvp_qp);\r
\r
UVP_EXIT(UVP_DBG_SHIM);\r
return IB_SUCCESS;\r
IN ib_api_status_t ioctl_status)\r
{\r
int err;\r
- mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
- CL_ASSERT(p_qp_info || p_qp_info->ibv_qp);\r
+ CL_ASSERT(h_uvp_qp);\r
\r
- mthca_destroy_qp_post(p_qp_info->ibv_qp, (int)ioctl_status);\r
- if (ioctl_status == IB_SUCCESS) \r
- cl_free (p_qp_info);\r
- else\r
+ mthca_destroy_qp_post((struct ibv_qp*)h_uvp_qp, (int)ioctl_status);\r
+ if (ioctl_status != IB_SUCCESS) \r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp_post failed (%d)\n", ioctl_status));\r
\r
UVP_EXIT(UVP_DBG_SHIM);\r
// fill the rest qp fields
qp->ibv_qp.pd = pd;
+ qp->ibv_qp.context= pd->context;
qp->ibv_qp.send_cq = attr->send_cq;
qp->ibv_qp.recv_cq = attr->recv_cq;
qp->ibv_qp.srq = attr->srq;
uint32_t qp_num;
enum ibv_qp_state state;
ib_qp_type_t qp_type;
+ struct ibv_context *context;
};
struct ibv_cq {
*/\r
\r
typedef void\r
-(AL_API *uvp_post_attach_mcast_t) (\r
+(AL_API *uvp_post_attach_mcast) (\r
IN const ib_qp_handle_t h_uvp_qp,\r
IN ib_api_status_t ioctl_status,\r
OUT ib_mcast_handle_t *ph_mcast,\r
*/\r
\r
typedef void\r
-(AL_API *uvp_post_detach_mcast_t) (\r
+(AL_API *uvp_post_detach_mcast) (\r
IN ib_mcast_handle_t h_uvp_mcast,\r
IN ib_api_status_t ioctl_status );\r
\r
* Multicast Support Verbs\r
*/\r
uvp_pre_attach_mcast pre_attach_mcast;\r
- uvp_post_attach_mcast_t post_attach_mcast;\r
+ uvp_post_attach_mcast post_attach_mcast;\r
uvp_pre_detach_mcast pre_detach_mcast;\r
- uvp_post_detach_mcast_t post_detach_mcast;\r
+ uvp_post_detach_mcast post_detach_mcast;\r
\r
} uvp_interface_t;\r
\r