#define __AL_VERBS_H__\r
\r
#include "al_ca.h"\r
+#include "al_cq.h"\r
#include "al_pd.h"\r
#include "al_qp.h"\r
\r
h_ca->obj.p_ci_ca->verbs.modify_ca( h_ca->obj.p_ci_ca->h_ci_ca,\\r
port_num, ca_mod, p_port_attr_mod )\r
\r
-#define verbs_create_cq(h_ca, p_cq_create, h_cq) \\r
- h_ca->obj.p_ci_ca->verbs.create_cq( h_ca->obj.p_ci_ca->h_ci_ca,\\r
- h_cq, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf )\r
+static inline ib_api_status_t\r
+verbs_create_cq(\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN OUT ib_cq_create_t* const p_cq_create,\r
+ IN ib_cq_handle_t h_cq,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf )\r
+{\r
+ return h_ca->obj.p_ci_ca->verbs.create_cq(\r
+ (p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca,\r
+ h_cq, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf );\r
+}\r
\r
#define verbs_check_cq(h_cq) ((h_cq)->h_ci_cq)\r
#define verbs_destroy_cq(h_cq) \\r
h_qp->obj.p_ci_ca->verbs.bind_mw( h_mw->h_ci_mw,\\r
h_qp->h_ci_qp, p_mw_bind, p_rkey )\r
\r
-#define verbs_allocate_pd(h_ca, h_pd) \\r
- h_ca->obj.p_ci_ca->verbs.allocate_pd(\\r
- h_ca->obj.p_ci_ca->h_ci_ca, h_pd->type, &h_pd->h_ci_pd, p_umv_buf )\r
+static inline ib_api_status_t\r
+verbs_allocate_pd(\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN ib_pd_handle_t h_pd,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf )\r
+{\r
+ return h_ca->obj.p_ci_ca->verbs.allocate_pd(\r
+ (p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca,\r
+ h_pd->type, &h_pd->h_ci_pd, p_umv_buf );\r
+}\r
\r
/*\r
* Reference the hardware PD.\r
h_mcast->obj.p_ci_ca->verbs.detach_mcast( \\r
h_mcast->h_ci_mcast )\r
\r
+static inline ib_api_status_t\r
+verbs_ci_call(\r
+ IN ib_ca_handle_t h_ca,\r
+ IN const void* __ptr64 * const handle_array OPTIONAL,\r
+ IN uint32_t num_handles,\r
+ IN ib_ci_op_t* const p_ci_op,\r
+ IN ci_umv_buf_t* const p_umv_buf OPTIONAL )\r
+{\r
+ return h_ca->obj.p_ci_ca->verbs.vendor_call(\r
+ p_umv_buf ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca,\r
+ handle_array, num_handles, p_ci_op, p_umv_buf );\r
+}\r
\r
\r
#else\r
#define verbs_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod) \\r
ual_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod)\r
\r
-#define verbs_create_cq(h_ca, p_cq_create, h_cq) \\r
- ual_create_cq(h_ca->obj.p_ci_ca, p_cq_create, h_cq); \\r
- UNUSED_PARAM( p_umv_buf )\r
+static inline ib_api_status_t\r
+verbs_create_cq(\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN OUT ib_cq_create_t* const p_cq_create,\r
+ IN ib_cq_handle_t h_cq,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf )\r
+{\r
+ UNUSED_PARAM( p_umv_buf );\r
+ return ual_create_cq( h_ca->obj.p_ci_ca, p_cq_create, h_cq );\r
+}\r
\r
\r
#define verbs_check_cq(h_cq) ((h_cq)->h_ci_cq || (h_cq)->obj.hdl)\r
#define verbs_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey) \\r
ual_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey)\r
\r
-#define verbs_allocate_pd(h_ca, h_pd) \\r
- ual_allocate_pd(h_ca, h_pd->type, h_pd); \\r
- UNUSED_PARAM( p_umv_buf )\r
+static inline ib_api_status_t\r
+verbs_allocate_pd(\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN ib_pd_handle_t h_pd,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf )\r
+{\r
+ UNUSED_PARAM( p_umv_buf );\r
+ return ual_allocate_pd( h_ca, h_pd->type, h_pd );\r
+}\r
\r
/*\r
* Get an alias to the kernel's hardware PD.\r
\r
if( !p_umv_buf->command )\r
{\r
+ p_um_ca = (mlnx_um_ca_t*)cl_zalloc( sizeof(mlnx_um_ca_t) );\r
+ if( !p_um_ca )\r
+ {\r
+ p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
+ goto mlnx_um_open_err1;\r
+ }\r
+ /* Copy the dev info. */\r
+ p_um_ca->dev_info = *hca_ul_info;\r
+ p_um_ca->hob_p = hob_p;\r
+ *ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
p_umv_buf->status = IB_SUCCESS;\r
- goto mlnx_um_open_err1;\r
+ p_umv_buf->output_size = 0;\r
+ HCA_EXIT( MLNX_DBG_TRACE );\r
+ return IB_SUCCESS;\r
}\r
\r
/*\r
{\r
/* Copy the dev info. */\r
p_um_ca->dev_info = *hca_ul_info;\r
+ p_um_ca->hob_p = hob_p;\r
*ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
(*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr;\r
p_umv_buf->status = IB_SUCCESS;\r
if( !p_um_ca )\r
return;\r
\r
+ if( !p_um_ca->p_mapped_addr )\r
+ goto done;\r
+\r
THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res );\r
\r
mlnx_um_close_cleanup:\r
MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
IoFreeMdl( p_um_ca->p_mdl );\r
+done:\r
cl_free( p_um_ca );\r
\r
HCA_EXIT( MLNX_DBG_TRACE );\r
OUT ib_pd_handle_t *ph_pd,\r
IN OUT ci_umv_buf_t *p_umv_buf )\r
{\r
- mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca;\r
+ mlnx_hob_t *hob_p;\r
mlnx_hobul_t *hobul_p;\r
HH_hca_dev_t *hca_ul_info;\r
HHUL_pd_hndl_t hhul_pd_hndl = 0;\r
\r
CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
\r
+ if( p_umv_buf )\r
+ hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p;\r
+ else\r
+ hob_p = (mlnx_hob_t *)h_ca;\r
+ \r
hobul_p = mlnx_hobs_get_hobul(hob_p);\r
if (NULL == hobul_p) {\r
status = IB_INVALID_CA_HANDLE;\r
{\r
ib_api_status_t status;\r
\r
- mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca;\r
+ mlnx_hob_t *hob_p;\r
u_int32_t cq_idx;\r
u_int32_t cq_num;\r
u_int32_t cq_size = 0;\r
\r
CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
\r
+ if( p_umv_buf )\r
+ hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p;\r
+ else\r
+ hob_p = (mlnx_hob_t *)h_ca;\r
+\r
hobul_p = mlnx_hobs_get_hobul(hob_p);\r
if (NULL == hobul_p) {\r
status = IB_INVALID_CA_HANDLE;\r