pd->device = device;
pd->ucontext = context;
atomic_set(&pd->usecnt, 0);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
}
int ibv_dealloc_pd(struct ib_pd *pd)
{
if (atomic_read(&pd->usecnt)) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("resources are not released (cnt %d)\n", pd->usecnt));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,("resources are not released (cnt %d)\n", pd->usecnt));
return -EBUSY;
}
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
return pd->device->dealloc_pd(pd);
}
struct ib_ucontext *ucontext;
struct ib_mr * ib_mr;
+ HCA_ENTER(HCA_DBG_AV);
pd = ah->pd;
ucontext = ah->ucontext;
ib_mr = ah->ib_mr;
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
}
release_user_cq_qp_resources(ucontext, ib_mr);
-
+ HCA_EXIT(HCA_DBG_AV);
return ret;
}
mlnx_get_cq_interface (\r
IN OUT uvp_interface_t *p_uvp )\r
{\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_DEV);\r
\r
CL_ASSERT(p_uvp);\r
\r
p_uvp->pre_destroy_cq = mlnx_pre_destroy_cq;\r
p_uvp->post_destroy_cq = mlnx_post_destroy_cq;\r
\r
- UVP_EXIT(UVP_DBG_SHIM);\r
+ UVP_EXIT(UVP_DBG_DEV);\r
}\r
\r
ib_api_status_t\r
struct ibv_create_cq *p_create_cq;\r
int err;\r
\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_CQ);\r
\r
CL_ASSERT(p_umv_buf);\r
\r
ibv_cq = p_hobul->ibv_ctx->ops.create_cq_pre(p_hobul->ibv_ctx, p_size, p_create_cq);\r
if (IS_ERR(ibv_cq)) {\r
err = PTR_ERR(ibv_cq);\r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_cq_pre failed (%d)\n", err));\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_alloc_cq_pre failed (%d)\n", err));\r
status = errno_to_iberr(err);\r
goto err_alloc_cq;\r
}\r
cl_free(p_umv_buf->p_inout_buf);\r
err_memory:\r
end:\r
- UVP_EXIT(UVP_DBG_SHIM);\r
- return status;\r
+ UVP_EXIT(UVP_DBG_CQ);\r
+ return status;\r
}\r
\r
\r
{\r
mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq);\r
\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_CQ);\r
\r
*p_size = p_cq_info->cq_size;\r
\r
- UVP_EXIT(UVP_DBG_SHIM);\r
+ UVP_EXIT(UVP_DBG_CQ);\r
return IB_VERBS_PROCESSING_DONE;\r
}\r
\r
mlnx_pre_destroy_cq (\r
IN const ib_cq_handle_t h_uvp_cq)\r
{\r
- UVP_ENTER(UVP_DBG_SHIM);\r
- UVP_EXIT(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_CQ);\r
+ UVP_EXIT(UVP_DBG_CQ);\r
return IB_SUCCESS;\r
}\r
\r
mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *) ((void*)h_uvp_cq);\r
UNREFERENCED_PARAMETER(ioctl_status);\r
\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_CQ);\r
\r
CL_ASSERT(p_cq_info || p_cq_info->ibv_cq);\r
\r
err = p_cq_info->p_hobul->ibv_ctx->ops.destroy_cq( p_cq_info->ibv_cq );\r
if (err) \r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_cq failed (%d)\n", err));\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ, ("mthca_destroy_cq failed (%d)\n", err));\r
\r
cl_free (p_cq_info);\r
- UVP_EXIT(UVP_DBG_SHIM);\r
+ UVP_EXIT(UVP_DBG_CQ);\r
return;\r
}\r
\r