"AL_OBJ_TYPE_RES_MGR",\r
"AL_OBJ_TYPE_H_CA_ATTR",\r
"AL_OBJ_TYPE_H_PNP_EVENT",\r
- "AL_OBJ_TYPE_H_SA_REG"\r
+ "AL_OBJ_TYPE_H_SA_REG",\r
+ "AL_OBJ_TYPE_H_FMR"\r
};\r
#endif\r
\r
#define AL_OBJ_TYPE_RES_MGR 36\r
#define AL_OBJ_TYPE_H_CA_ATTR 37\r
#define AL_OBJ_TYPE_H_PNP_EVENT 38\r
-#define AL_OBJ_TYPE_H_SA_REG 40\r
-#define AL_OBJ_TYPE_INVALID 39 /* Must be last type. */\r
+#define AL_OBJ_TYPE_H_SA_REG 39\r
+#define AL_OBJ_TYPE_H_FMR 40\r
+#define AL_OBJ_TYPE_INVALID 41 /* Must be last type. */\r
\r
/* Kernel object for a user-mode app. */\r
#define AL_OBJ_SUBTYPE_UM_EXPORT 0x80000000\r
} ib_mr_t;\r
\r
\r
-\r
cl_status_t\r
mr_ctor(\r
IN void* const p_object,\r
IN void* context );\r
\r
\r
-\r
ib_api_status_t\r
reg_mem(\r
IN const ib_pd_handle_t h_pd,\r
OUT net32_t* const p_rkey,\r
OUT ib_mr_handle_t* const ph_mr );\r
\r
+\r
+#ifdef CL_KERNEL\r
+\r
+typedef struct _ib_fmr\r
+{\r
+ al_obj_t obj;\r
+ ib_fmr_handle_t h_ci_fmr; /* Actual HW handle. */\r
+} ib_fmr_t;\r
+\r
+cl_status_t\r
+fmr_ctor(\r
+ IN void* const p_object,\r
+ IN void* context,\r
+ OUT cl_pool_item_t** const pp_pool_item );\r
+\r
+\r
+void\r
+fmr_dtor(\r
+ IN const cl_pool_item_t* const p_pool_item,\r
+ IN void* context );\r
+\r
+\r
+ib_api_status_t\r
+create_fmr(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t* const p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_fmr );\r
+\r
+\r
+ib_api_status_t\r
+map_phys_fmr(\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey);\r
+\r
+\r
+ib_api_status_t\r
+unmap_fmr(\r
+ IN const ib_fmr_handle_t h_fmr);\r
+\r
+ib_api_status_t\r
+destroy_fmr(\r
+ IN ib_fmr_handle_t const ph_fmr );\r
+\r
+#endif\r
+\r
+\r
#endif /* __AL_MR_H__ */\r
IN al_obj_t *p_obj );\r
\r
\r
+#ifdef CL_KERNEL\r
+static void\r
+__cleanup_fmr(\r
+ IN struct _al_obj *p_obj );\r
+\r
+static void\r
+__return_fmr(\r
+ IN al_obj_t *p_obj );\r
+#endif\r
+\r
\r
cl_status_t\r
mr_ctor(\r
CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
return status;\r
}\r
+\r
+\r
+#ifdef CL_KERNEL\r
+cl_status_t\r
+fmr_ctor(\r
+ IN void* const p_object,\r
+ IN void* context,\r
+ OUT cl_pool_item_t** const pp_pool_item )\r
+{\r
+ ib_api_status_t status;\r
+ ib_fmr_handle_t h_fmr;\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ h_fmr = (ib_fmr_handle_t)p_object;\r
+ cl_memclr( h_fmr, sizeof( ib_fmr_t ) );\r
+\r
+ construct_al_obj( &h_fmr->obj, AL_OBJ_TYPE_H_FMR );\r
+ status = init_al_obj( &h_fmr->obj, NULL, FALSE, NULL,\r
+ __cleanup_fmr, __return_fmr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ return CL_ERROR;\r
+ }\r
+\r
+ *pp_pool_item = &((ib_fmr_handle_t)p_object)->obj.pool_item;\r
+\r
+ /* Release the reference taken in init_al_obj. */\r
+ deref_al_obj( &h_fmr->obj );\r
+\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+\r
+void\r
+fmr_dtor(\r
+ IN const cl_pool_item_t* const p_pool_item,\r
+ IN void* context )\r
+{\r
+ al_obj_t *p_obj;\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item );\r
+\r
+ /*\r
+ * The FMR is being totally destroyed. Modify the free_cb to destroy the\r
+ * AL object.\r
+ */\r
+ p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj;\r
+ ref_al_obj( p_obj );\r
+ p_obj->pfn_destroy( p_obj, NULL );\r
+}\r
+\r
+\r
+\r
+static void\r
+__cleanup_fmr(\r
+ IN struct _al_obj *p_obj )\r
+{\r
+ ib_api_status_t status;\r
+ ib_fmr_handle_t h_fmr;\r
+\r
+ CL_ASSERT( p_obj );\r
+ h_fmr = PARENT_STRUCT( p_obj, ib_fmr_t, obj );\r
+\r
+ /* Deregister the memory. */\r
+ if( verbs_check_fmr(h_fmr) )\r
+ {\r
+ status = verbs_destroy_fmr( h_fmr);\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+\r
+ h_fmr->h_ci_fmr = NULL;\r
+ }\r
+}\r
+\r
+\r
+\r
+static void\r
+__return_fmr(\r
+ IN al_obj_t *p_obj )\r
+{\r
+ ib_fmr_handle_t h_fmr;\r
+\r
+ h_fmr = PARENT_STRUCT( p_obj, ib_fmr_t, obj );\r
+ reset_al_obj( p_obj );\r
+ put_fmr( h_fmr );\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+ib_create_fmr(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t* const p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_fmr )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PD_HANDLE\n") );\r
+ return IB_INVALID_PD_HANDLE;\r
+ }\r
+\r
+ status = create_fmr( h_pd, p_fmr_create, ph_fmr);\r
+\r
+ /* Release the reference taken in alloc_fmr for initialization. */\r
+ if( status == IB_SUCCESS )\r
+ deref_al_obj( &(*ph_fmr)->obj );\r
+\r
+ AL_EXIT( AL_DBG_MR );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+create_fmr(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t* const p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_fmr)\r
+{\r
+ ib_fmr_handle_t h_fmr;\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( !p_fmr_create || !ph_fmr )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Get a MR tracking structure. */\r
+ h_fmr = alloc_fmr();\r
+ if( !h_fmr )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ ("unable to allocate memory handle\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ status = attach_al_obj( &h_pd->obj, &h_fmr->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Register the memory region. */\r
+ status = verbs_create_fmr( h_pd, p_fmr_create, h_fmr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ ("unable to register memory: %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ *ph_fmr = h_fmr;\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_map_phys_fmr(\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey)\r
+{\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INVALID_FMR_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_fmr->obj );\r
+\r
+ status = map_phys_fmr( h_fmr, paddr_list, list_len, p_vaddr, p_lkey, p_rkey );\r
+\r
+ deref_al_obj( &h_fmr->obj );\r
+\r
+ AL_EXIT( AL_DBG_MR );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+map_phys_fmr(\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey)\r
+{\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( !paddr_list || !p_vaddr || !p_lkey || !p_rkey)\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Register the memory region. */\r
+ status = verbs_map_phys_fmr( h_fmr, paddr_list, list_len, p_vaddr, p_lkey, p_rkey);\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ //TODO: do we need to do something more about the error ?\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ ("unable to map FMR: %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+ib_api_status_t\r
+ib_unmap_fmr(\r
+ IN const ib_fmr_handle_t h_fmr)\r
+{\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INVALID_FMR_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_fmr->obj );\r
+\r
+ status = unmap_fmr( h_fmr );\r
+\r
+ deref_al_obj( &h_fmr->obj );\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+unmap_fmr(\r
+ IN const ib_fmr_handle_t h_fmr)\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_MR );\r
+\r
+ if( !verbs_check_fmr(h_fmr) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INVALID_FMR_HANDLE;\r
+ }\r
+\r
+ status = verbs_unmap_fmr(h_fmr);\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_destroy_fmr(\r
+ IN const ib_fmr_handle_t h_fmr )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ CL_ENTER( AL_DBG_MR, g_al_dbg_lvl );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INVALID_FMR_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_fmr->obj );\r
+\r
+ status = destroy_fmr( h_fmr );\r
+ if( status != IB_SUCCESS )\r
+ deref_al_obj( &h_fmr->obj );\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+destroy_fmr(\r
+ IN const ib_fmr_handle_t h_fmr )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_MR );\r
+\r
+ if( !verbs_check_fmr(h_fmr) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INVALID_FMR_HANDLE;\r
+ }\r
+\r
+ /* FMR's are destroyed synchronously */\r
+ status = verbs_destroy_fmr(h_fmr);\r
+\r
+ if( status == IB_SUCCESS )\r
+ {\r
+ h_fmr->h_ci_fmr = NULL;\r
+ /* We're good to destroy the object. */\r
+ h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
+ }\r
+\r
+ CL_EXIT( AL_DBG_MR, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+#endif\r
+\r
+\r
\r
#define AL_MR_POOL_SIZE (4096 / sizeof( ib_mr_t ))\r
#define AL_AV_POOL_SIZE (4096 / sizeof( ib_av_t ))\r
-\r
+#ifdef CL_KERNEL\r
+#define AL_FMR_POOL_SIZE (4096 / sizeof( ib_fmr_t ))\r
+#endif\r
\r
al_res_mgr_t *gp_res_mgr;\r
\r
/* Construct the resource manager. */\r
cl_qpool_construct( &gp_res_mgr->av_pool );\r
cl_qpool_construct( &gp_res_mgr->mr_pool );\r
+#ifdef CL_KERNEL\r
+ cl_qpool_construct( &gp_res_mgr->fmr_pool );\r
+#endif\r
\r
construct_al_obj( &gp_res_mgr->obj, AL_OBJ_TYPE_RES_MGR );\r
status = init_al_obj( &gp_res_mgr->obj, gp_res_mgr, TRUE,\r
return ib_convert_cl_status( cl_status );\r
}\r
\r
+#ifdef CL_KERNEL\r
+ /* Initialize the pool of fast memory regions. */\r
+ cl_status = cl_qpool_init( &gp_res_mgr->fmr_pool,\r
+ AL_FMR_POOL_SIZE, 0, AL_FMR_POOL_SIZE, sizeof( ib_fmr_t ),\r
+ fmr_ctor, fmr_dtor, gp_res_mgr );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ gp_res_mgr->obj.pfn_destroy( &gp_res_mgr->obj, NULL );\r
+ return ib_convert_cl_status( cl_status );\r
+ }\r
+#endif\r
+\r
/* Release the reference taken in init_al_obj. */\r
deref_al_obj( &gp_res_mgr->obj );\r
\r
\r
cl_qpool_destroy( &gp_res_mgr->av_pool );\r
cl_qpool_destroy( &gp_res_mgr->mr_pool );\r
+#ifdef CL_KERNEL\r
+ cl_qpool_destroy( &gp_res_mgr->fmr_pool );\r
+#endif\r
\r
destroy_al_obj( p_obj );\r
cl_free ( gp_res_mgr );\r
deref_al_obj( &gp_res_mgr->obj );\r
}\r
\r
+#ifdef CL_KERNEL\r
+\r
+/*\r
+ * Get a fast memory region structure to track registration requests.\r
+ */\r
+ib_fmr_handle_t\r
+alloc_fmr()\r
+{\r
+ al_obj_t *p_obj;\r
+ cl_pool_item_t *p_pool_item;\r
+\r
+ cl_spinlock_acquire( &gp_res_mgr->obj.lock );\r
+ p_pool_item = cl_qpool_get( &gp_res_mgr->fmr_pool );\r
+ cl_spinlock_release( &gp_res_mgr->obj.lock );\r
+\r
+ if( !p_pool_item )\r
+ return NULL;\r
+\r
+ ref_al_obj( &gp_res_mgr->obj );\r
+ p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item );\r
+\r
+ /*\r
+ * Hold an extra reference on the object until creation is complete.\r
+ * This prevents a client's destruction of the object during asynchronous\r
+ * event callback processing from deallocating the object before the\r
+ * creation is complete.\r
+ */\r
+ ref_al_obj( p_obj );\r
+\r
+ return PARENT_STRUCT( p_obj, ib_fmr_t, obj );\r
+}\r
+\r
+\r
+\r
+/*\r
+ * Return a memory region structure to the available pool.\r
+ */\r
+void\r
+put_fmr(\r
+ IN ib_fmr_handle_t h_fmr )\r
+{\r
+ cl_spinlock_acquire( &gp_res_mgr->obj.lock );\r
+ cl_qpool_put( &gp_res_mgr->fmr_pool, &h_fmr->obj.pool_item );\r
+ cl_spinlock_release( &gp_res_mgr->obj.lock );\r
+ deref_al_obj( &gp_res_mgr->obj );\r
+}\r
\r
+#endif\r
\r
/*\r
* Get an address vector from the available pool.\r
\r
cl_qpool_t mr_pool;\r
cl_qpool_t av_pool;\r
-\r
+#ifdef CL_KERNEL\r
+ cl_qpool_t fmr_pool;\r
+#endif\r
} al_res_mgr_t;\r
\r
\r
put_mr(\r
IN ib_mr_handle_t h_mr );\r
\r
+#ifdef CL_KERNEL\r
+ib_fmr_handle_t\r
+alloc_fmr(void);\r
+\r
+\r
+void\r
+put_fmr(\r
+ IN ib_fmr_handle_t h_fmr );\r
+\r
+#endif\r
\r
ib_av_handle_t\r
alloc_av(void);\r
um_call )\r
\r
#define verbs_check_mr(h_mr) ((h_mr)->h_ci_mr)\r
+#define verbs_check_fmr(h_fmr) ((h_fmr)->h_ci_fmr)\r
#define verbs_deregister_mr(h_mr) \\r
h_mr->obj.p_ci_ca->verbs.deregister_mr( h_mr->h_ci_mr )\r
\r
access_ctrl, p_vaddr, p_lkey, p_rkey, &(ph_mr->h_ci_mr), \\r
um_call )\r
\r
+#define verbs_create_fmr(h_pd, p_fmr_create, h_fmr) \\r
+ h_fmr->obj.p_ci_ca->verbs.alloc_fmr( h_pd->h_ci_pd,\\r
+ p_fmr_create, &h_fmr->h_ci_fmr)\r
+\r
+#define verbs_map_phys_fmr( h_fmr, plist_addr, list_len, p_vaddr, p_lkey, p_rkey) \\r
+ h_fmr->obj.p_ci_ca->verbs.map_phys_fmr( h_fmr->h_ci_fmr,\\r
+ plist_addr, list_len, p_vaddr, p_lkey, p_rkey)\r
+\r
+#define verbs_unmap_fmr( h_fmr) \\r
+ h_fmr->obj.p_ci_ca->verbs.unmap_fmr( h_fmr->h_ci_fmr)\r
+\r
+#define verbs_destroy_fmr(h_fmr) \\r
+ h_fmr->obj.p_ci_ca->verbs.dealloc_fmr(h_fmr->h_ci_fmr)\r
+ \r
+\r
#define verbs_create_mw(h_pd, p_rkey, h_mw) \\r
h_mw->obj.p_ci_ca->verbs.create_mw( h_pd->h_ci_pd,\\r
p_rkey, &h_mw->h_ci_mw, p_umv_buf )\r
"IB_INVALID_QP_HANDLE",\r
"IB_INVALID_PD_HANDLE",\r
"IB_INVALID_MR_HANDLE",\r
+ "IB_INVALID_FMR_HANDLE",\r
"IB_INVALID_MW_HANDLE",\r
"IB_INVALID_MCAST_HANDLE",\r
"IB_INVALID_CALLBACK",\r
p_ifc->close_al = ib_close_al;\r
p_ifc->get_err_str = ib_get_err_str;\r
p_ifc->get_wc_status_str = ib_get_wc_status_str;\r
+ p_ifc->create_fmr = ib_create_fmr;\r
+ p_ifc->map_phys_fmr = ib_map_phys_fmr;\r
+ p_ifc->unmap_fmr = ib_unmap_fmr;\r
+ p_ifc->destroy_fmr = ib_destroy_fmr;\r
\r
BUS_EXIT( BUS_DBG_PNP );\r
}\r
mthca_log.mc \\r
mthca_log.rc \\r
hca.rc \\r
+ \\r
..\hca_utils.c \\r
+ \\r
hca_data.c \\r
- hca_mcast.c \\r
- hca_verbs.c \\r
- hca_pnp.c \\r
- hca_pci.c \\r
- hca_driver.c \\r
hca_direct.c \\r
+ hca_driver.c \\r
+ hca_mcast.c \\r
hca_memory.c \\r
+ hca_pci.c \\r
+ hca_pnp.c \\r
hca_smp.c \\r
+ hca_verbs.c \\r
\\r
+ mt_cache.c \\r
+ mt_device.c \\r
mt_l2w.c \\r
mt_memory.c \\r
- mt_cache.c \\r
mt_packer.c \\r
- mt_ud_header.c \\r
- mt_device.c \\r
- mt_verbs.c \\r
mt_reset_tavor.c \\r
+ mt_ud_header.c \\r
mt_uverbs.c \\r
mt_uverbsmem.c \\r
+ mt_verbs.c \\r
\\r
mthca_allocator.c \\r
mthca_av.c \\r
+ mthca_catas.c \\r
mthca_cmd.c \\r
mthca_cq.c \\r
mthca_eq.c \\r
+ mthca_log.c \\r
+ mthca_mad.c \\r
mthca_main.c \\r
+ mthca_mcg.c \\r
mthca_memfree.c \\r
mthca_mr.c \\r
- mthca_mcg.c \\r
- mthca_mad.c \\r
mthca_pd.c \\r
mthca_profile.c \\r
mthca_provider.c \\r
mthca_qp.c \\r
mthca_srq.c \\r
mthca_uar.c \\r
- mthca_log.c \\r
- mthca_catas.c \r
\r
\r
INCLUDES=\\r
$(DDK_LIB_PATH)\wdmguid.lib\r
\r
\r
-#LINKER_FLAGS=/MAP /MAPINFO:LINES\r
+#LINKER_FLAGS=/MAP \r
\r
!IFDEF ENABLE_EVENT_TRACING\r
\r
ca_attr_p->max_mcast_grps = hca_info_p->max_mcast_grp;\r
ca_attr_p->max_mcast_qps = hca_info_p->max_total_mcast_qp_attach;\r
ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
+ ca_attr_p->max_fmr = hca_info_p->max_fmr;\r
+ ca_attr_p->max_map_per_fmr = hca_info_p->max_map_per_fmr;\r
+ \r
ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
}\r
\r
// fill the results\r
- p_res->va = ua;\r
+ p_res->va = (uint64_t)(ULONG_PTR)ua;\r
p_res->size = sz;\r
\r
// resource tracking\r
\r
UNUSED_PARAM( um_call );\r
\r
- HCA_ENTER(HCA_DBG_SHIM);\r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ if (mthca_is_livefish(to_mdev(ib_pd_p->device))) {\r
+ mr_p = kzalloc(sizeof *mr_p, GFP_KERNEL);\r
+ if (!mr_p) {\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_mem;\r
+ }\r
+ mr_p->device = ib_pd_p->device;\r
+ mr_p->pd = ib_pd_p;\r
+ goto done;\r
+ }\r
\r
// sanity checks\r
if( !cl_is_blockable() ) {\r
}\r
\r
// results\r
+done:\r
if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;\r
*p_lkey = mr_p->lkey;\r
*p_rkey = cl_hton32( mr_p->rkey );\r
err_reg_phys_mr:\r
err_invalid_parm:\r
err_unsupported:\r
+err_mem:\r
HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,\r
("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
return status;\r
{\r
ib_api_status_t status;\r
int err;\r
- PREP_IBDEV_FOR_PRINT(((struct ib_mr *)h_mr)->device)\r
+ struct ib_mr *ib_mr = (struct ib_mr *)h_mr;\r
+ PREP_IBDEV_FOR_PRINT(ib_mr->device)\r
\r
HCA_ENTER(HCA_DBG_SHIM);\r
\r
+ if (mthca_is_livefish(to_mdev(ib_mr->device))) {\r
+ kfree(ib_mr);\r
+ goto done;\r
+ }\r
+\r
// sanity checks\r
if( !cl_is_blockable() ) {\r
status = IB_UNSUPPORTED;\r
goto err_dereg_mr;\r
}\r
\r
+done:\r
status = IB_SUCCESS;\r
\r
err_dereg_mr:\r
\r
}\r
\r
+ib_api_status_t\r
+mlnx_alloc_fmr (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t *p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_fmr\r
+ )\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_fmr * fmr_p;\r
+ struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+ struct ib_fmr_attr fmr_attr;\r
+ PREP_IBDEV_FOR_PRINT(ib_pd_p->device)\r
+\r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+ if (!p_fmr_create ) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_parm; \r
+ }\r
+ //TODO: temporary limitation, till implementing somewhat like Gen2's FMR_POOL\r
+ if (p_fmr_create->max_maps != 1) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_parm; \r
+ }\r
+\r
+ // prepare parameters\r
+ RtlZeroMemory(&fmr_attr, sizeof(struct ib_fmr_attr));\r
+ fmr_attr.max_maps = p_fmr_create->max_maps;\r
+ fmr_attr.max_pages = p_fmr_create->max_pages;\r
+ fmr_attr.page_shift = p_fmr_create->page_size;\r
+\r
+ // register mr \r
+ fmr_p = ibv_alloc_fmr(ib_pd_p, \r
+ map_qp_ibal_acl(p_fmr_create->access_ctrl), &fmr_attr);\r
+ if (IS_ERR(fmr_p)) {\r
+ err = PTR_ERR(fmr_p);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
+ ("mthca_alloc_fmr failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_alloc_fmr;\r
+ }\r
+\r
+ // results\r
+ if (ph_fmr) *ph_fmr = (ib_fmr_handle_t)fmr_p;\r
+ status = IB_SUCCESS;\r
+\r
+err_alloc_fmr:\r
+err_invalid_parm:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_map_phys_fmr (\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 page_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey\r
+ )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr;\r
+ void *vaddr = PAGE_ALIGN(*p_vaddr);\r
+ uint64_t *paddr_list = (uint64_t *__ptr64)page_list;\r
+ PREP_IBDEV_FOR_PRINT(ib_fmr->device)\r
+\r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+\r
+ \r
+ // mapping \r
+ err = ibv_map_phys_fmr(ib_fmr, paddr_list, list_len, (uint64_t)(ULONG_PTR)vaddr);\r
+ if (err) {\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
+ ("mthca_dereg_mr failed (%d) for mr %p\n", err, h_fmr));\r
+ goto err_dealloc_fmr;\r
+ }\r
+\r
+ // return the results\r
+ *p_vaddr = vaddr;\r
+ *p_lkey = ib_fmr->lkey;\r
+ *p_rkey = ib_fmr->rkey;\r
+ \r
+ status = IB_SUCCESS;\r
+ \r
+err_dealloc_fmr:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+ \r
+ \r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_unmap_fmr (\r
+ IN const ib_fmr_handle_t h_fmr)\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr;\r
+ struct list_head fmr_list;\r
+ PREP_IBDEV_FOR_PRINT(ib_fmr->device)\r
+\r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+ \r
+ // deregister \r
+ INIT_LIST_HEAD(&fmr_list);\r
+ list_add_tail(&ib_fmr->list, &fmr_list);\r
+ err = ibv_unmap_fmr(&fmr_list);\r
+ if (err) {\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
+ ("ibv_unmap_fmr failed (%d) for fmr %p\n", err, h_fmr));\r
+ goto err_unmap_fmr;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_unmap_fmr:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+ \r
+ \r
+}\r
+ \r
+\r
+ \r
+ib_api_status_t\r
+mlnx_dealloc_fmr (\r
+ IN ib_fmr_handle_t const h_fmr\r
+ )\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_fmr *fmr = (struct ib_fmr *)h_fmr;\r
+ PREP_IBDEV_FOR_PRINT(fmr->device)\r
+\r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+\r
+ \r
+ // deregister \r
+ err = ibv_dealloc_fmr((struct ib_fmr *)h_fmr);\r
+ if (err) {\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
+ ("mthca_dereg_mr failed (%d) for mr %p\n", err, h_fmr));\r
+ goto err_dealloc_fmr;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_dealloc_fmr:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+ \r
+}\r
+\r
+\r
+\r
/*\r
* Memory Window Verbs.\r
*/\r
p_interface->register_smr = mlnx_register_smr;\r
p_interface->deregister_mr = mlnx_deregister_mr;\r
\r
+ p_interface->alloc_fmr = mlnx_alloc_fmr;\r
+ p_interface->map_phys_fmr = mlnx_map_phys_fmr;\r
+ p_interface->unmap_fmr = mlnx_unmap_fmr;\r
+ p_interface->dealloc_fmr = mlnx_dealloc_fmr;\r
+\r
p_interface->create_mw = mlnx_create_mw;\r
p_interface->query_mw = mlnx_query_mw;\r
p_interface->destroy_mw = mlnx_destroy_mw;\r
\r
HCA_ENTER( HCA_DBG_PNP );\r
\r
+ // there will be no resources for "livefish" (PCI memory controller mode)\r
+ if (!pHcaResList || !pHostResList)\r
+ goto done;\r
+ \r
p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
\r
ASSERT( pHostResList->List[0].PartialResourceList.Version == 1 );\r
status = STATUS_UNSUCCESSFUL;\r
}\r
\r
+done:\r
HCA_EXIT( HCA_DBG_PNP );\r
return status;\r
}\r
return status;\r
}\r
\r
- /*\r
- * Change the state since the PnP callback can happen\r
- * before the callback returns.\r
- */\r
- p_ext->state = HCA_STARTED;\r
-\r
/*leo: get node GUID */\r
{\r
int err;\r
}\r
\r
\r
+/* release the resources, allocated in hca_start */\r
static void\r
-hca_release_resources(\r
+__hca_release_resources(\r
IN DEVICE_OBJECT* const p_dev_obj )\r
{\r
- hca_dev_ext_t *p_ext;\r
- POWER_STATE powerState;\r
+ hca_dev_ext_t *p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
\r
HCA_ENTER( HCA_DBG_PNP );\r
\r
- p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
-\r
if( p_ext->state == HCA_REGISTERED )\r
{\r
CL_ASSERT( p_ext->ci_ifc.deregister_ca );\r
p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
/* Release AL's CI interface. */\r
p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+ p_ext->state = HCA_STARTED;\r
}\r
\r
+ /* dequeue HCA */\r
+ mlnx_hca_remove( &p_ext->hca );\r
+\r
if( p_ext->pnp_target_entry )\r
{\r
ASSERT( p_ext->pnp_ifc_entry );\r
IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+ p_ext->pnp_target_entry = NULL;\r
}\r
\r
- if( p_ext->pnp_ifc_entry )\r
+ if( p_ext->pnp_ifc_entry ) {\r
IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry );\r
+ p_ext->pnp_ifc_entry = NULL;\r
+ }\r
\r
- if( p_ext->p_al_file_obj )\r
+ if( p_ext->p_al_file_obj ) {\r
ObDereferenceObject( p_ext->p_al_file_obj );\r
+ p_ext->p_al_file_obj = NULL;\r
+ }\r
\r
mthca_remove_one( p_ext );\r
\r
- if( p_ext->p_dma_adapter )\r
+ if( p_ext->p_dma_adapter ) {\r
p_ext->p_dma_adapter->DmaOperations->PutDmaAdapter( p_ext->p_dma_adapter );\r
+ p_ext->p_dma_adapter = NULL;\r
+ }\r
\r
hca_disable_pci( &p_ext->hcaBusIfc );\r
\r
//cl_event_destroy( &p_ext->mutex );\r
__UnmapHcaMemoryResources( p_dev_obj );\r
\r
+ p_ext->state = HCA_ADDED;\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+static void\r
+hca_release_resources(\r
+ IN DEVICE_OBJECT* const p_dev_obj )\r
+{\r
+ hca_dev_ext_t *p_ext;\r
+ POWER_STATE powerState;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ /* release all the resources, allocated in hca_start */\r
+ __hca_release_resources(p_dev_obj);\r
+\r
/* Notify the power manager that the device is powered down. */\r
powerState.DeviceState = PowerDeviceD3;\r
PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState );\r
if( pCaps->DeviceD2 )\r
{\r
HCA_PRINT( TRACE_LEVEL_WARNING,HCA_DBG_PNP,\r
- ("WARINING: Device reports support for DeviceD2 power state.\n"));\r
+ ("WARNING: Device reports support for DeviceD2 power state.\n"));\r
pCaps->DeviceD2 = FALSE;\r
}\r
\r
if( pCaps->SystemWake != PowerSystemUnspecified )\r
{\r
HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
- ("WARINING: Device reports support for system wake.\n"));\r
+ ("WARNING: Device reports support for system wake.\n"));\r
pCaps->SystemWake = PowerSystemUnspecified;\r
}\r
\r
if( pCaps->DeviceWake != PowerDeviceUnspecified )\r
{\r
HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP,\r
- ("WARINING: Device reports support for device wake.\n"));\r
+ ("WARNING: Device reports support for device wake.\n"));\r
pCaps->DeviceWake = PowerDeviceUnspecified;\r
}\r
\r
IoFreeWorkItem( p_ext->pPoWorkItem );\r
p_ext->pPoWorkItem = NULL;\r
\r
+ /* restart the HCA */\r
status = mthca_init_one( p_ext );\r
if( !NT_SUCCESS( status ) )\r
goto done;\r
IN IRP *p_irp,\r
IN void *context )\r
{\r
- NTSTATUS status;\r
+ NTSTATUS status = STATUS_SUCCESS;\r
hca_dev_ext_t *p_ext;\r
IO_STACK_LOCATION *pIoStack;\r
+ KIRQL irql = KeGetCurrentIrql( );\r
+ \r
+ \r
\r
HCA_ENTER( HCA_DBG_PO );\r
\r
PoSetPowerState( p_dev_obj, DevicePowerState,\r
pIoStack->Parameters.Power.State );\r
\r
- /* Process in a work item - mthca_start blocks. */\r
- ASSERT( !p_ext->pPoWorkItem );\r
- p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj );\r
- if( !p_ext->pPoWorkItem )\r
- {\r
- IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
+ if (irql > PASSIVE_LEVEL) {\r
+ /* Process in a work item - mthca_start blocks. */\r
+ ASSERT( !p_ext->pPoWorkItem );\r
+ p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj );\r
+ if( !p_ext->pPoWorkItem )\r
+ {\r
+ IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
\r
- PoStartNextPowerIrp( p_irp );\r
- IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+ PoStartNextPowerIrp( p_irp );\r
+ IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
\r
- return STATUS_SUCCESS;\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ /* Process in work item callback. */\r
+ IoMarkIrpPending( p_irp );\r
+ IoQueueWorkItem( p_ext->pPoWorkItem, __PowerUpCb, DelayedWorkQueue, p_irp );\r
}\r
+ else {\r
\r
- /* Process in work item callback. */\r
- IoMarkIrpPending( p_irp );\r
- IoQueueWorkItem( p_ext->pPoWorkItem, __PowerUpCb, DelayedWorkQueue, p_irp );\r
- /* TODO: Start the HCA. */\r
- status = mthca_init_one( p_ext );\r
- if( !NT_SUCCESS( status ) )\r
- goto done;\r
+ /* restart the HCA */\r
+ status = mthca_init_one( p_ext );\r
+ if( !NT_SUCCESS( status ) )\r
+ goto done;\r
\r
- if( p_ext->p_al_dev )\r
- status = __hca_register( p_dev_obj );\r
+ if( p_ext->p_al_dev )\r
+ status = __hca_register( p_dev_obj );\r
+ }\r
\r
done:\r
if( !NT_SUCCESS( status ) )\r
\r
PoSetPowerState( p_dev_obj, DevicePowerState,\r
pIoStack->Parameters.Power.State );\r
+\r
if( p_ext->state == HCA_REGISTERED )\r
{\r
+ CL_ASSERT( p_ext->ci_ifc.deregister_ca );\r
+ CL_ASSERT( p_ext->p_al_dev );\r
+ CL_ASSERT( p_ext->p_al_file_obj );\r
/* Notify AL that the CA is being removed. */\r
p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
/* Release AL's CI interface. */\r
p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-\r
p_ext->state = HCA_STARTED;\r
}\r
\r
* @list_len: The number of pages in page_list.
* @iova: The I/O virtual address to use with the mapped region.
*/
-static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
+int ibv_map_phys_fmr(struct ib_fmr *fmr,
u64 *page_list, int list_len,
- u64 iova)
-{
- return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
-}
+ u64 iova);
/**
* ibv_unmap_fmr - Removes the mapping from a list of fast memory regions.
{
struct ib_pd *pd;
- pd = device->alloc_pd(device, context, p_umv_buf);
+ // direct call is a must, because "lifefish" devices doesn't fill driver i/f table
+ pd = mthca_alloc_pd(device, context, p_umv_buf);
if (!IS_ERR(pd)) {
pd->device = device;
int ibv_dealloc_pd(struct ib_pd *pd)
{
+ if (mthca_is_livefish(to_mdev(pd->device)))
+ goto done;
+
// we need first to release list of AV MRs to decrease pd->usecnt
if (pd->ucontext) {
struct ib_mr *ib_mr, *tmp;
return -EBUSY;
}
+done:
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
- return pd->device->dealloc_pd(pd);
+ // direct call is a must, because "lifefish" devices doesn't fill driver i/f table
+ return mthca_dealloc_pd(pd);
}
/* Address handles */
{
struct ib_mr *mr;
- mr = pd->device->get_dma_mr(pd, mr_access_flags);
+ // direct call is a must, because "lifefish" devices doesn't fill driver i/f table
+ mr = mthca_get_dma_mr(pd, mr_access_flags);
if (!IS_ERR(mr)) {
mr->device = pd->device;
return -EBUSY;
pd = mr->pd;
- ret = mr->device->dereg_mr(mr);
+ // direct call is a must, because "lifefish" devices doesn't fill driver i/f table
+ ret = mthca_dereg_mr(mr);
if (!ret) {
atomic_dec(&pd->usecnt);
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
return fmr;
}
+int ibv_map_phys_fmr(struct ib_fmr *fmr,
+ u64 *page_list, int list_len,
+ u64 iova)
+{
+ return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
+}
+
int ibv_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;
; empty since we don't support W9x/Me\r
\r
[HCA.DeviceSection.ntx86]\r
-%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
-%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
-%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
-%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
-%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45\r
+%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279\r
+%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D\r
+%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275\r
\r
[HCA.DeviceSection.ntamd64]\r
-%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
-%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
-%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
-%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
-%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45\r
+%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279\r
+%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D\r
+%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275\r
\r
[HCA.DeviceSection.ntia64]\r
-%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
-%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
-%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
-%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
-%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45\r
+%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279\r
+%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D\r
+%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275\r
\r
[MTHCA.DDInstall.ntx86]\r
CopyFiles = MTHCA.CopyFiles\r
MTL="Mellanox Technologies Ltd."\r
MTHCA.ServiceDesc = "Driver for Mellanox InfiniHost Devices"\r
MT23108.DeviceDesc="InfiniHost (MT23108) - Mellanox InfiniBand HCA"\r
+MT23109.DeviceDesc="InfiniHost (MT23109) - Mellanox InfiniBand HCA (burner device)"\r
MT25208.DeviceDesc="InfiniHost (MT25208) - Mellanox InfiniBand HCA for PCI Express"\r
+MT25209.DeviceDesc="InfiniHost (MT25209) - Mellanox InfiniBand HCA for PCI Express (burner device)"\r
MT25218.DeviceDesc="InfiniHost III Ex (MT25218) - Mellanox InfiniBand HCA for PCI Express"\r
MT24204.DeviceDesc="InfiniHost III Lx (MT24204) - Mellanox InfiniBand HCA for PCI Express"\r
+MT24205.DeviceDesc="InfiniHost III Lx (MT24205) - Mellanox InfiniBand HCA for PCI Express (burner device)"\r
MT25204.DeviceDesc="InfiniHost III Lx (MT25204) - Mellanox InfiniBand HCA for PCI Express"\r
+MT25205.DeviceDesc="InfiniHost III Lx (MT25205) - Mellanox InfiniBand HCA for PCI Express (burner device)"\r
DiskId = "Mellanox InfiniBand HCA installation disk"\r
SPSVCINST_NULL = 0x0\r
SPSVCINST_ASSOCSERVICE = 0x00000002\r
goto out;
{
- //TODO: Questions:
- // Can it once be on behalf of user request, which would require UserRequest and UserMode
- // Can it be alertable ?
NTSTATUS res;
LARGE_INTEGER interval;
+ BOOLEAN interruptible = (ExGetPreviousMode() == UserMode) ? TRUE : FALSE;
interval.QuadPart = (-10)* (__int64)timeout;
- res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE, &interval );
+ res = KeWaitForSingleObject( &context->event, Executive, KernelMode, interruptible, &interval );
if (res != STATUS_SUCCESS) {
err = -EBUSY;
goto out;
MTHCA_FLAG_FMR = 1 << 6,
MTHCA_FLAG_MEMFREE = 1 << 7,
MTHCA_FLAG_PCIE = 1 << 8,
- MTHCA_FLAG_LIVEFISH = 1 << 9
+ MTHCA_FLAG_SINAI_OPT = 1 << 9,
+ MTHCA_FLAG_LIVEFISH = 1 << 10
};
enum {
u8 status;
int p;
+ ext->hca.mdev = NULL;
if (mdev) {
+ mdev->state = MTHCA_DEV_UNINITIALIZED;
if (mthca_is_livefish(mdev))
goto done;
mthca_unregister_device(mdev);
mthca_cmd_cleanup(mdev);
done:
kfree(mdev);
- ext->hca.mdev = NULL;
}
}
#define MTHCA_MPT_STATUS_SW 0xF0
#define MTHCA_MPT_STATUS_HW 0x00
+#define SINAI_FMR_KEY_INC 0x1000000
static void dump_mtt(u32 print_lvl, __be64 *mtt_entry ,int list_len)
{
return tavor_key_to_hw_index(key);
}
+
+static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
+{
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ return ((key << 20) & 0x800000) | (key & 0x7fffff);
+ else
+ return key;
+}
+
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, mthca_mpt_access_t access, struct mthca_mr *mr)
{
int err = -ENOMEM;
int i;
CPU_2_BE64_PREP;
-
+
if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
return -EINVAL;
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
+ key = adjust_key(dev, key);
idx = key & (dev->limits.num_mpts - 1);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
BUG_ON(!mr->mem.arbel.mpt);
} else
mr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx);
+ sizeof *(mr->mem.tavor.mpt) * idx);
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt))
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
+
if (err) {
HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("SW2HW_MPT failed (%d)\n", err));
goto err_out_mailbox_free;
return err;
}
+
int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (fmr->maps)
return 0;
}
+
static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
int list_len, u64 iova)
{
return -EINVAL;
/* Trust the user not to pass misaligned data in page_list */
- #ifdef WIN_TO_BE_CHANGED
- //TODO: do we need that ?
+ #if 0
for (i = 0; i < list_len; ++i) {
if (page_list[i] & ~page_mask)
return -EINVAL;
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
for (i = 0; i < list_len; ++i) {
- // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword
+ __be64 mtt_entry;
u64 val = page_list[i];
- __be64 mtt_entry = cl_hton64(val);
+ // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword
*(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT;
+ mtt_entry = cl_hton64(val);
mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
}
++fmr->maps;
key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ key += SINAI_FMR_KEY_INC;
+ else
+ key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
return 0;
}
+
void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
u32 key;
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
}
+
void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
u32 key;
else
dev->mthca_flags |= MTHCA_FLAG_FMR;
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Memory key throughput optimization activated.\n"));
+
err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
fls(dev->limits.num_mtt_segs - 1));
ioremap(dev->mr_table.mtt_base,
(1 << i) * MTHCA_MTT_SEG_SIZE,
&dev->mr_table.tavor_fmr.mtt_base_size );
+
if (!dev->mr_table.tavor_fmr.mtt_base) {
HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("MTT ioremap for FMR failed.\n"));
err = -ENOMEM;
goto err_reserve_fmr;
dev->mr_table.fmr_mtt_buddy =
- &dev->mr_table.tavor_fmr.mtt_buddy;
+ &dev->mr_table.tavor_fmr.mtt_buddy;
} else
dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
struct mthca_fmr *fmr;
int err;
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
+ fmr = kzalloc(sizeof *fmr, GFP_KERNEL);
if (!fmr)
return ERR_PTR(-ENOMEM);
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
+ RtlCopyMemory(&fmr->attr, fmr_attr, sizeof *fmr_attr);
err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
map_qp_mpt(acc), fmr);
mlnx_pre_destroy_qp (\r
IN const ib_qp_handle_t h_uvp_qp)\r
{\r
- UNREFERENCED_PARAMETER(h_uvp_qp);\r
- UVP_ENTER(UVP_DBG_SHIM);\r
- UVP_EXIT(UVP_DBG_SHIM);\r
- return IB_SUCCESS;\r
+ int err;\r
+ mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+\r
+ UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+ mthca_destroy_qp_pre(p_qp_info->ibv_qp);\r
+\r
+ UVP_EXIT(UVP_DBG_SHIM);\r
+ return IB_SUCCESS;\r
}\r
\r
void\r
{\r
int err;\r
mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
- UNREFERENCED_PARAMETER(ioctl_status);\r
\r
- UVP_ENTER(UVP_DBG_SHIM);\r
+ UVP_ENTER(UVP_DBG_SHIM);\r
\r
- CL_ASSERT(p_qp_info || p_qp_info->ibv_qp);\r
+ CL_ASSERT(p_qp_info || p_qp_info->ibv_qp);\r
\r
- err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.destroy_qp( p_qp_info->ibv_qp );\r
- if (err) \r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", err));\r
+ mthca_destroy_qp_post(p_qp_info->ibv_qp, (int)ioctl_status);\r
+ if (ioctl_status == IB_SUCCESS) \r
+ cl_free (p_qp_info);\r
+ else\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", ioctl_status));\r
\r
- cl_free (p_qp_info);\r
- UVP_EXIT(UVP_DBG_SHIM);\r
- return;\r
+ UVP_EXIT(UVP_DBG_SHIM);\r
+ return;\r
}\r
\r
mthca_create_qp_pre,
mthca_create_qp_post,
mthca_modify_qp,
- mthca_destroy_qp,
+ NULL,
NULL, /* post_send */
NULL, /* post_recv */
mthca_attach_mcast,
struct ibv_create_qp_resp *resp);
int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
enum ibv_qp_attr_mask attr_mask);
-int mthca_destroy_qp(struct ibv_qp *qp);
+void mthca_destroy_qp_pre(struct ibv_qp *qp);
+void mthca_destroy_qp_post(struct ibv_qp *qp, int ret);
void mthca_init_qp_indices(struct mthca_qp *qp);
int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
if(ibqp->state == IBV_QPS_RESET) {
ret = -EBUSY;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto err_busy;
}
for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
qp->sq.head, qp->sq.tail,
qp->sq.max, nreq));
ret = -ENOMEM;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if (opcode == MTHCA_OPCODE_INVALID) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num));
ret = -EINVAL;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if ((int)(int)wr->num_ds > qp->sq.max_gs) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x too many gathers\n",ibqp->qp_num));
ret = -ERANGE;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
//TODO sleybo:
if (s > qp->max_inline_data) {
ret = -1;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
ind = qp->rq.next_ind;
if(ibqp->state == IBV_QPS_RESET) {
ret = -EBUSY;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto err_busy;
}
qp->rq.head, qp->rq.tail,
qp->rq.max, nreq));
ret = -ENOMEM;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if (unlikely((int)wr->num_ds > qp->rq.max_gs)) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x too many gathers\n",ibqp->qp_num));
ret = -ERANGE;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
ind = qp->sq.head & (qp->sq.max - 1);
if(ibqp->state == IBV_QPS_RESET) {
ret = -EBUSY;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto err_busy;
}
for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
qp->sq.head, qp->sq.tail,
qp->sq.max, nreq));
ret = -ENOMEM;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if ((int)wr->num_ds > qp->sq.max_gs) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x full too many gathers\n",ibqp->qp_num));
ret = -ERANGE;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if (s > qp->max_inline_data) {
ret = -1;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if (opcode == MTHCA_OPCODE_INVALID) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num));
ret = -EINVAL;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
ind = qp->rq.head & (qp->rq.max - 1);
if(ibqp->state == IBV_QPS_RESET) {
ret = -EBUSY;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto err_busy;
}
for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
qp->rq.head, qp->rq.tail,
qp->rq.max, nreq));
ret = -ENOMEM;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
if (unlikely((int)wr->num_ds > qp->rq.max_gs)) {
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full too many scatter\n",ibqp->qp_num));
ret = -ERANGE;
- *bad_wr = wr;
+ if (bad_wr)
+ *bad_wr = wr;
goto out;
}
return ret;
}
-int mthca_destroy_qp(struct ibv_qp *qp)
+
+void mthca_destroy_qp_pre(struct ibv_qp *qp)
{
int ret;
if (qp->send_cq != qp->recv_cq)
cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
+}
- if (mthca_is_memfree(qp->pd->context)) {
- mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
- to_mqp(qp)->rq.db_index);
- mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
- to_mqp(qp)->sq.db_index);
+void mthca_destroy_qp_post(struct ibv_qp *qp, int ret)
+{
+ if (ret) {
+ cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
+ if (qp->send_cq != qp->recv_cq)
+ cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
+ mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp));
+ if (qp->send_cq != qp->recv_cq)
+ cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
+ cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
}
+ else {
+ if (mthca_is_memfree(qp->pd->context)) {
+ mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
+ to_mqp(qp)->rq.db_index);
+ mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
+ to_mqp(qp)->sq.db_index);
+ }
- cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
- cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
+ cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
+ cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
#ifdef NOT_USE_VIRTUAL_ALLOC
- cl_free(to_mqp(qp)->buf);
+ cl_free(to_mqp(qp)->buf);
#else
- VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
+ VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
#endif
- cl_free(to_mqp(qp)->wrid);
- cl_free(to_mqp(qp));
+ cl_free(to_mqp(qp)->wrid);
+ cl_free(to_mqp(qp));
+ }
- return 0;
}
int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
* ib_reg_mem, ib_reg_phys, ib_reg_shared\r
*****/\r
\r
+#ifdef CL_KERNEL\r
+\r
+/****f* Access Layer/ib_create_fmr\r
+* NAME\r
+* ib_create_fmr\r
+*\r
+* DESCRIPTION\r
+* Modifies the attributes of an existing memory region.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_create_fmr(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t* const p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_fmr);\r
+/*\r
+* PARAMETERS\r
+* h_pd\r
+* [in] An optionally provided parameter used to modify the protection\r
+* domain of a registered region.\r
+* p_fmr_create\r
+* [in] This references information needed to perform the modification on\r
+* the registered memory region. This parameter may be NULL if only the\r
+* protection domain will be modified.\r
+* ph_fmr\r
+* [out] A handle to the registered memory region being modified.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The memory region attributes were modified successfully.\r
+*\r
+* IB_INVALID_MR_HANDLE\r
+* The memory region handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the lkey or rkey was not provided or the specified\r
+* modify mask is invalid.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified memory region attributes are invalid.\r
+*\r
+* IB_INVALID_PD_HANDLE\r
+* The protection domain handle was invalid.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to modify the memory region.\r
+*\r
+* IB_UNSUPPORTED\r
+* The requested access rights are not supported by the channel adapter.\r
+*\r
+* IB_INVALID_PERMISSION\r
+* The requested access rights are invalid.\r
+*\r
+* IB_RESOURCE_BUSY\r
+* The memory region has windows bound to it.\r
+*\r
+* NOTES\r
+* //TODO \r
+*\r
+* SEE ALSO\r
+* ib_destroy_fmr, ib_fmr_create_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_map_fmr\r
+* NAME\r
+* ib_map_fmr\r
+*\r
+* DESCRIPTION\r
+* //TODO\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_map_phys_fmr(\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey);\r
+/*\r
+* PARAMETERS\r
+* h_fmr\r
+* [in] Handle to the fast memory region that these pages map to \r
+* page_list\r
+* [in] array of phys address\r
+* list_len\r
+* [in] number of pages in the list\r
+* p_vaddr\r
+* [in/out] On input, references the requested virtual address for the\r
+* start of the FMR. On output, references the actual\r
+* virtual address assigned to the FMR.\r
+* p_lkey\r
+* [out] The local access key associated with this registered memory\r
+* region.\r
+* p_rkey\r
+* [out] A key that may be used by a remote end-point when performing\r
+* RDMA or atomic operations to this registered memory region.\r
+*\r
+\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The memory region attributes were modified successfully.\r
+*\r
+* IB_INVALID_MR_HANDLE\r
+* The memory region handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the lkey or rkey was not provided or the specified\r
+* modify mask is invalid.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified memory region attributes are invalid.\r
+*\r
+* IB_INVALID_PD_HANDLE\r
+* The protection domain handle was invalid.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to modify the memory region.\r
+*\r
+* IB_UNSUPPORTED\r
+* The requested access rights are not supported by the channel adapter.\r
+*\r
+* IB_INVALID_PERMISSION\r
+* The requested access rights are invalid.\r
+*\r
+* IB_RESOURCE_BUSY\r
+* The memory region has windows bound to it.\r
+*\r
+* NOTES\r
+* //TODO \r
+*\r
+* SEE ALSO\r
+* ib_destroy_fmr, ib_fmr_create_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_unmap_fmr\r
+* NAME\r
+* ib_unmap_fmr\r
+*\r
+* DESCRIPTION\r
+* //TODO\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_unmap_fmr(\r
+ IN const ib_fmr_handle_t h_fmr);\r
+/*\r
+* PARAMETERS\r
+* h_fmr\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The memory region attributes were modified successfully.\r
+*\r
+* IB_INVALID_MR_HANDLE\r
+* The memory region handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the lkey or rkey was not provided or the specified\r
+* modify mask is invalid.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified memory region attributes are invalid.\r
+*\r
+* IB_INVALID_PD_HANDLE\r
+* The protection domain handle was invalid.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to modify the memory region.\r
+*\r
+* IB_UNSUPPORTED\r
+* The requested access rights are not supported by the channel adapter.\r
+*\r
+* IB_INVALID_PERMISSION\r
+* The requested access rights are invalid.\r
+*\r
+* IB_RESOURCE_BUSY\r
+* The memory region has windows bound to it.\r
+*\r
+* NOTES\r
+* //TODO \r
+*\r
+* SEE ALSO\r
+* ib_destroy_fmr, ib_fmr_create_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_destroy_fmr\r
+* NAME\r
+* ib_destroy_fmr\r
+*\r
+* DESCRIPTION\r
+* Modifies the attributes of an existing memory region.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_destroy_fmr(\r
+ IN ib_fmr_handle_t const h_fmr);\r
+/*\r
+* PARAMETERS\r
+* h_fmr\r
+* [in] A handle to the registered memory region being modified.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The memory region attributes were modified successfully.\r
+*\r
+* IB_INVALID_MR_HANDLE\r
+* The memory region handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the lkey or rkey was not provided or the specified\r
+* modify mask is invalid.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified memory region attributes are invalid.\r
+*\r
+* IB_INVALID_PD_HANDLE\r
+* The protection domain handle was invalid.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to modify the memory region.\r
+*\r
+* IB_UNSUPPORTED\r
+* The requested access rights are not supported by the channel adapter.\r
+*\r
+* IB_INVALID_PERMISSION\r
+* The requested access rights are invalid.\r
+*\r
+* IB_RESOURCE_BUSY\r
+* The memory region has windows bound to it.\r
+*\r
+* NOTES\r
+* //TODO \r
+*\r
+* SEE ALSO\r
+* ib_destroy_fmr, ib_fmr_create_t\r
+*****/\r
+\r
+#endif\r
\r
/****f* Access Layer/ib_create_mw\r
* NAME\r
* definition.\r
*/\r
#define VERBS_MAJOR_VER (0x0001)\r
-#define VERBS_MINOR_VER (0x0001)\r
+#define VERBS_MINOR_VER (0x0002)\r
\r
#define VERBS_VERSION (((VERBS_MAJOR_VER) << 16) | (VERBS_MINOR_VER))\r
#define MK_VERBS_VERSION(maj,min) ((((maj) & 0xFFFF) << 16) | \\r
******\r
*/\r
\r
+#ifdef CL_KERNEL\r
+\r
+/****f* Verbs/ci_alloc_fmr\r
+* NAME\r
+* ci_alloc_fmr -- Allocate a fast memory region with the HCA.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_alloc_fmr) (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN ib_fmr_create_t const *p_fmr_ctreate,\r
+ OUT ib_fmr_handle_t* const ph_fmr);\r
+/*\r
+* DESCRIPTION\r
+* //TODO \r
+* PARAMETERS\r
+* h_pd\r
+* [in] Handle to the PD on which fast memory is being registered\r
+* mr_access_flags\r
+* [in] mask of the access rights to the memory region\r
+* p_fmr_attr\r
+* [in] attribute of this fmr\r
+* ph_fmr\r
+* [out] Handle to the fast memory region. This handle is used when\r
+* mapin/unmaping fmr\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* Registration with the adapter was successful.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to satisfy request.\r
+* IB_INVALID_PARAMETER\r
+* One of the input pointers was NULL.\r
+* IB_INVALID_PD_HANDLE\r
+* Invalid pd handle\r
+* IB_INVALID_PERMISSION\r
+* Invalid access rights.\r
+* NOTES\r
+* The Alloc operation does not map nor pinned any memory.\r
+* In order to use the FMR the user need to call map \r
+*\r
+* SEE ALSO\r
+* ci_dealloc_fmr, ci_map_phys_fmr, ci_unmap_fmr\r
+******\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_map_phys_fmr\r
+* NAME\r
+* ci_map_phys_fmr -- Map a fast memory region with given page list.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_map_phys_fmr) (\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey);\r
+/*\r
+* DESCRIPTION\r
+* //TODO \r
+* PARAMETERS\r
+* h_fmr\r
+* [in] Handle to the fast memory region that these pages map to \r
+* page_list\r
+* [in] array of phys address\r
+* list_len\r
+* [in] number of pages in the list\r
+* p_vaddr\r
+* [in/out] On input, references the requested virtual address for the\r
+* start of the FMR. On output, references the actual\r
+* virtual address assigned to the FMR.\r
+* p_lkey\r
+* [out] The local access key associated with this registered memory\r
+* region.\r
+* p_rkey\r
+* [out] A key that may be used by a remote end-point when performing\r
+* RDMA or atomic operations to this registered memory region.\r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* Registration with the adapter was successful.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to satisfy request.\r
+* IB_INVALID_PARAMETER\r
+* One of the input pointers was NULL.\r
+* IB_INVALID_PD_HANDLE\r
+* Invalid pd handle\r
+* IB_INVALID_PERMISSION\r
+* Invalid access rights.\r
+* NOTES\r
+* The Alloc operation does not map nor pinned any memory.\r
+* In order to use the FMR the user need to call map \r
+*\r
+* SEE ALSO\r
+* ci_dealloc_fmr, ci_alloc_fmr, ci_unmap_fmr\r
+******\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_unmap_fmr\r
+* NAME\r
+* ci_unmap_fmr -- UnMap a fast memory region with given page list.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_unmap_fmr) (\r
+ IN const ib_fmr_handle_t h_fmr);\r
+/*\r
+* DESCRIPTION\r
+* //TODO \r
+* PARAMETERS\r
+* h_fmr\r
+* [in] Handle to the fast memory region that these pages map to \r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* Registration with the adapter was successful.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to satisfy request.\r
+* IB_INVALID_PARAMETER\r
+* One of the input pointers was NULL.\r
+* IB_INVALID_PD_HANDLE\r
+* Invalid pd handle\r
+* IB_INVALID_PERMISSION\r
+* Invalid access rights.\r
+* NOTES\r
+* The Alloc operation does not map nor pinned any memory.\r
+* In order to use the FMR the user need to call map \r
+*\r
+* SEE ALSO\r
+* ci_dealloc_fmr, ci_alloc_fmr, ci_map_phy_fmr\r
+******\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_dealloc_fmr\r
+* NAME\r
+* ci_dealloc_fmr -- Deallocate a fast memory region with the HCA.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_dealloc_fmr) (\r
+ IN ib_fmr_handle_t const h_fmr);\r
+/*\r
+* DESCRIPTION\r
+* //TODO \r
+* PARAMETERS\r
+ h_fmr\r
+* [out] Handle to the fast memory region. This handle is used when\r
+* mapin/unmaping fmr\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* Registration with the adapter was successful.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to satisfy request.\r
+* IB_INVALID_PARAMETER\r
+* One of the input pointers was NULL.\r
+* IB_INVALID_PD_HANDLE\r
+* Invalid pd handle\r
+* IB_INVALID_PERMISSION\r
+* Invalid access rights.\r
+* NOTES\r
+* //TODO\r
+\r
+* SEE ALSO\r
+* ci_dealloc_fmr, ci_map_phys_fmr, ci_unmap_fmr\r
+******\r
+*/\r
+\r
+#endif\r
+\r
/****f* Verbs/ci_create_mw\r
* NAME\r
* ci_create_mw -- Create a memory window entry for later use\r
ci_register_smr register_smr;\r
ci_deregister_mr deregister_mr;\r
\r
+#ifdef CL_KERNEL\r
+\r
+ /*\r
+ * Fast Memory Management Verbs\r
+ */\r
+ ci_alloc_fmr alloc_fmr;\r
+ ci_map_phys_fmr map_phys_fmr;\r
+ ci_unmap_fmr unmap_fmr;\r
+ ci_dealloc_fmr dealloc_fmr;\r
+\r
+#endif\r
+\r
/*\r
* Memory Window Verbs\r
*/\r
typedef struct _ib_ca* __ptr64 ib_ca_handle_t;\r
typedef struct _ib_pd* __ptr64 ib_pd_handle_t;\r
typedef struct _ib_mr* __ptr64 ib_mr_handle_t;\r
+typedef struct _ib_fmr* __ptr64 ib_fmr_handle_t;\r
typedef struct _ib_mw* __ptr64 ib_mw_handle_t;\r
typedef struct _ib_qp* __ptr64 ib_qp_handle_t;\r
typedef struct _ib_cq* __ptr64 ib_cq_handle_t;\r
IB_INVALID_QP_HANDLE,\r
IB_INVALID_PD_HANDLE,\r
IB_INVALID_MR_HANDLE,\r
+ IB_INVALID_FMR_HANDLE,\r
IB_INVALID_MW_HANDLE,\r
IB_INVALID_MCAST_HANDLE,\r
IB_INVALID_CALLBACK,\r
uint32_t max_mcast_grps;\r
uint32_t max_mcast_qps;\r
uint32_t max_qps_per_mcast_grp;\r
+ uint32_t max_fmr;\r
+ uint32_t max_map_per_fmr;\r
\r
/*\r
* local_ack_delay:\r
* ib_access_t\r
*****/\r
\r
+#ifdef CL_KERNEL\r
+\r
+/****s* Access Layer/ib_fmr_create_t\r
+* NAME\r
+* ib_fmr_create_t\r
+*\r
+* DESCRIPTION\r
+* Information required to create a fast memory region.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ib_fmr_create\r
+{\r
+ int max_pages;\r
+ int max_maps;\r
+ uint8_t page_size;\r
+ ib_access_t access_ctrl;\r
+\r
+} ib_fmr_create_t;\r
+/*\r
+* FIELDS\r
+* max_pages\r
+* max pages in the region.\r
+*\r
+* max_maps\r
+* max times, the region can be mapped before remapping.\r
+*\r
+* page_size\r
+* log2 of the page size (e.g. 12 for 4KB).\r
+*\r
+* access_ctrl\r
+* Access rights of the registered region.\r
+*\r
+* SEE ALSO\r
+* ib_access_t\r
+*****/\r
+\r
+#endif\r
\r
/****s* Access Layer/ib_phys_range_t\r
* NAME\r
* IB resources provided by HCAs.\r
*********/\r
\r
-#define AL_INTERFACE_VERSION (7)\r
+#define AL_INTERFACE_VERSION (8)\r
\r
\r
\r
(*ib_pfn_dereg_mr_t)(\r
IN const ib_mr_handle_t h_mr );\r
\r
+typedef ib_api_status_t\r
+(*ib_pfn_create_fmr_t)(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_fmr_create_t* const p_fmr_create,\r
+ OUT ib_fmr_handle_t* const ph_mr);\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_map_phys_fmr_t)(\r
+ IN const ib_fmr_handle_t h_fmr,\r
+ IN const void* __ptr64 paddr_list,\r
+ IN const int list_len,\r
+ IN OUT void** __ptr64 const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey);\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_unmap_fmr_t)(\r
+ IN const ib_fmr_handle_t h_fmr);\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_destroy_fmr_t)(\r
+ IN ib_fmr_handle_t const h_fmr);\r
+\r
typedef ib_api_status_t\r
(*ib_pfn_create_mw_t)(\r
IN const ib_pd_handle_t h_pd,\r
ib_pfn_close_al_t close_al;\r
ib_pfn_get_err_str_t get_err_str;\r
ib_pfn_get_wc_status_str_t get_wc_status_str;\r
+ ib_pfn_create_fmr_t create_fmr;\r
+ ib_pfn_map_phys_fmr_t map_phys_fmr;\r
+ ib_pfn_unmap_fmr_t unmap_fmr;\r
+ ib_pfn_destroy_fmr_t destroy_fmr;\r
\r
} ib_al_ifc_t;\r
\r
typedef
struct _map_crspace {
- void * va; /* address of CRSPACE, mapped to user space */
+ unsigned __int64 va; /* address of CRSPACE, mapped to user space */
unsigned long size; /* size of CRSPACE, mapped to user space */
+ unsigned long reserved; /* to align on quadword boundary */
} map_crspace;
/* Definitions for hca_driver commands*/
MTCR_NO_USB=1\r
\r
\r
-!if !defined(MTCR_NO_USB)\r
+!if "$(_BUILDARCH)" == "x86"\r
TARGETLIBS=$(TARGETLIBS)\\r
.\usb\usbi2cio.lib \\r
.\usb\I2cBrdg.lib\r
goto ErrExit;\r
}\r
\r
- mf->s.ptr = mf->cr_map.va;\r
+ mf->s.ptr = (void*)(ULONG_PTR)mf->cr_map.va;\r
}\r
\r
} else if (dev_id == DEVASYS_DEV_ID) {\r
void vstat_print_ca_attr(int idx, ib_ca_attr_t* ca_attr, BOOLEAN fullPrint){\r
int i;\r
\r
- printf("\thca_idx=%d\n",idx);\r
+ printf("\n\thca_idx=%d\n",idx);\r
printf("\tpci_location={BUS=NA,DEV/FUNC=NA}\n");\r
printf("\tvendor_id=0x%04x\n", ca_attr->vend_id);\r
printf("\tvendor_part_id=0x%04x\n", ca_attr->dev_id);\r
printf("\tmax_mcast_grp_num = 0x%x (Maximum Number of multicast groups)\n", ca_attr->max_mcast_grps);\r
printf("\tmax_mcast_qp_attach_num = 0x%x (Maximum number of QP per multicast group)\n", ca_attr->max_qps_per_mcast_grp);\r
printf("\tmax_ah_num = 0x%x (Maximum number of address handles)\n", ca_attr->max_addr_handles);\r
- printf("\tmax_num_fmr = 0 (Maximum number FMRs)\n");\r
- printf("\tmax_num_map_per_fmr = 0 (Maximum number of (re)maps per FMR before an unmap operation in required)\n");\r
+ printf("\tmax_num_fmr = 0x%x (Maximum number FMRs)\n", ca_attr->max_fmr);\r
+ printf("\tmax_num_map_per_fmr = 0x%x (Maximum number of (re)maps per FMR before an unmap operation in required)\n", ca_attr->max_map_per_fmr);\r
}else{\r
printf("\tnum_phys_ports=%d\n", ca_attr->num_ports);\r
}\r
uint32_t max_mcast_grps;
uint32_t max_mcast_qps;
uint32_t max_qps_per_mcast_grp;
+ uint32_t max_fmr;
+ uint32_t max_map_per_fmr;
/*
* local_ack_delay:
uint32_t max_mcast_grps;
uint32_t max_mcast_qps;
uint32_t max_qps_per_mcast_grp;
+ uint32_t max_fmr;
+ uint32_t max_map_per_fmr;
/*
* local_ack_delay: