#include "al_mgr.h"\r
#include "al_pnp.h"\r
#include "al_qp.h"\r
+#include "al_srq.h"\r
#include "ib_common.h"\r
\r
\r
case IB_AE_SQ_DRAINED:\r
case IB_AE_WQ_REQ_ERROR:\r
case IB_AE_WQ_ACCESS_ERROR:\r
+ case IB_AE_SRQ_QP_LAST_WQE_REACHED:\r
qp_async_event_cb( &p_event_item->event_rec );\r
break;\r
\r
+ case IB_AE_SRQ_LIMIT_REACHED:\r
+ case IB_AE_SRQ_CATAS_ERROR:\r
+ srq_async_event_cb( &p_event_item->event_rec );\r
+ break;\r
+\r
case IB_AE_CQ_ERROR:\r
cq_async_event_cb( &p_event_item->event_rec );\r
break;\r
* Different types of AL object's. Note that the upper byte signifies\r
* a subtype.\r
*/\r
-#define AL_OBJ_TYPE_UNKNOWN 0\r
+#define AL_OBJ_TYPE_UNKNOWN 0\r
#define AL_OBJ_TYPE_H_AL 1\r
#define AL_OBJ_TYPE_H_QP 2\r
#define AL_OBJ_TYPE_H_AV 3\r
#define AL_OBJ_TYPE_H_CONN 9\r
#define AL_OBJ_TYPE_H_LISTEN 10\r
#define AL_OBJ_TYPE_H_IOC 11\r
-#define AL_OBJ_TYPE_H_SVC_ENTRY 12\r
+#define AL_OBJ_TYPE_H_SVC_ENTRY 12\r
#define AL_OBJ_TYPE_H_PNP 13\r
#define AL_OBJ_TYPE_H_SA_REQ 14\r
-#define AL_OBJ_TYPE_H_MCAST 15\r
+#define AL_OBJ_TYPE_H_MCAST 15\r
#define AL_OBJ_TYPE_H_ATTACH 16\r
#define AL_OBJ_TYPE_H_MAD 17\r
-#define AL_OBJ_TYPE_H_MAD_POOL 18\r
-#define AL_OBJ_TYPE_H_POOL_KEY 19\r
+#define AL_OBJ_TYPE_H_MAD_POOL 18\r
+#define AL_OBJ_TYPE_H_POOL_KEY 19\r
#define AL_OBJ_TYPE_H_MAD_SVC 20\r
#define AL_OBJ_TYPE_CI_CA 21\r
#define AL_OBJ_TYPE_CM 22\r
#define AL_OBJ_TYPE_MAD_POOL 27\r
#define AL_OBJ_TYPE_MAD_DISP 28\r
#define AL_OBJ_TYPE_AL_MGR 29\r
-#define AL_OBJ_TYPE_PNP_MGR 30\r
-#define AL_OBJ_TYPE_IOC_PNP_MGR 31\r
-#define AL_OBJ_TYPE_IOC_PNP_SVC 32\r
+#define AL_OBJ_TYPE_PNP_MGR 30\r
+#define AL_OBJ_TYPE_IOC_PNP_MGR 31\r
+#define AL_OBJ_TYPE_IOC_PNP_SVC 32\r
#define AL_OBJ_TYPE_QUERY_SVC 33\r
#define AL_OBJ_TYPE_MCAST_SVC 34\r
-#define AL_OBJ_TYPE_SA_REQ_SVC 35\r
-#define AL_OBJ_TYPE_RES_MGR 36\r
+#define AL_OBJ_TYPE_SA_REQ_SVC 35\r
+#define AL_OBJ_TYPE_RES_MGR 36\r
#define AL_OBJ_TYPE_H_CA_ATTR 37\r
-#define AL_OBJ_TYPE_H_PNP_EVENT 38\r
+#define AL_OBJ_TYPE_H_PNP_EVENT 38\r
#define AL_OBJ_TYPE_H_SA_REG 39\r
#define AL_OBJ_TYPE_H_FMR 40\r
-#define AL_OBJ_TYPE_INVALID 41 /* Must be last type. */\r
+#define AL_OBJ_TYPE_H_SRQ 41\r
+#define AL_OBJ_TYPE_INVALID 42 /* Must be last type. */\r
\r
/* Kernel object for a user-mode app. */\r
#define AL_OBJ_SUBTYPE_UM_EXPORT 0x80000000\r
*/\r
boolean_t hdl_valid;\r
#endif\r
-\r
} al_obj_t;\r
\r
\r
WPP_DEFINE_BIT( AL_DBG_AV)\\r
WPP_DEFINE_BIT( AL_DBG_CQ)\\r
WPP_DEFINE_BIT( AL_DBG_QP)\\r
- WPP_DEFINE_BIT( AL_DBG_RES3) \\r
+ WPP_DEFINE_BIT( AL_DBG_SRQ)\\r
WPP_DEFINE_BIT( AL_DBG_MW)\\r
WPP_DEFINE_BIT( AL_DBG_RES4) \\r
WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\\r
WPP_DEFINE_BIT( AL_DBG_AV)\\r
WPP_DEFINE_BIT( AL_DBG_CQ)\\r
WPP_DEFINE_BIT( AL_DBG_QP)\\r
- WPP_DEFINE_BIT( AL_DBG_RES3) \\r
+ WPP_DEFINE_BIT( AL_DBG_SRQ)\\r
WPP_DEFINE_BIT( AL_DBG_MW)\\r
WPP_DEFINE_BIT( AL_DBG_RES4) \\r
WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\\r
#define AL_DBG_AV (1 << 17)\r
#define AL_DBG_CQ (1 << 18)\r
#define AL_DBG_QP (1 << 19)\r
+#define AL_DBG_SRQ (1 << 20)\r
#define AL_DBG_MW (1 << 21)\r
#define AL_DBG_PROXY_CB (1 << 23)\r
#define AL_DBG_UAL (1 << 24)\r
#define AL_DEVICE_NAME L"\\Device\\ibal"\r
#define ALDEV_KEY (0x3B) /* Matches FILE_DEVICE_INFINIBAND from wdm.h */\r
\r
-#define AL_IOCTL_VERSION (2)\r
+#define AL_IOCTL_VERSION (3)\r
\r
#ifdef CL_KERNEL\r
\r
ual_query_av_ioctl_cmd,\r
ual_modify_av_ioctl_cmd,\r
ual_destroy_av_ioctl_cmd,\r
+ ual_create_srq_ioctl_cmd,\r
+ ual_query_srq_ioctl_cmd,\r
+ ual_modify_srq_ioctl_cmd,\r
+ ual_destroy_srq_ioctl_cmd,\r
ual_create_qp_ioctl_cmd,\r
ual_query_qp_ioctl_cmd,\r
ual_modify_qp_ioctl_cmd,\r
ual_destroy_mw_ioctl_cmd,\r
ual_post_send_ioctl_cmd,\r
ual_post_recv_ioctl_cmd,\r
+ ual_post_srq_recv_ioctl_cmd,\r
ual_peek_cq_ioctl_cmd,\r
ual_poll_cq_ioctl_cmd,\r
ual_rearm_cq_ioctl_cmd,\r
#define UAL_QUERY_AV IOCTL_CODE(ALDEV_KEY, ual_query_av_ioctl_cmd)\r
#define UAL_MODIFY_AV IOCTL_CODE(ALDEV_KEY, ual_modify_av_ioctl_cmd)\r
#define UAL_DESTROY_AV IOCTL_CODE(ALDEV_KEY, ual_destroy_av_ioctl_cmd)\r
+#define UAL_CREATE_SRQ IOCTL_CODE(ALDEV_KEY, ual_create_srq_ioctl_cmd)\r
+#define UAL_QUERY_SRQ IOCTL_CODE(ALDEV_KEY, ual_query_srq_ioctl_cmd)\r
+#define UAL_MODIFY_SRQ IOCTL_CODE(ALDEV_KEY, ual_modify_srq_ioctl_cmd)\r
+#define UAL_DESTROY_SRQ IOCTL_CODE(ALDEV_KEY, ual_destroy_srq_ioctl_cmd)\r
#define UAL_CREATE_QP IOCTL_CODE(ALDEV_KEY, ual_create_qp_ioctl_cmd)\r
#define UAL_QUERY_QP IOCTL_CODE(ALDEV_KEY, ual_query_qp_ioctl_cmd)\r
#define UAL_MODIFY_QP IOCTL_CODE(ALDEV_KEY, ual_modify_qp_ioctl_cmd)\r
#define UAL_DESTROY_MW IOCTL_CODE(ALDEV_KEY, ual_destroy_mw_ioctl_cmd)\r
#define UAL_POST_SEND IOCTL_CODE(ALDEV_KEY, ual_post_send_ioctl_cmd)\r
#define UAL_POST_RECV IOCTL_CODE(ALDEV_KEY, ual_post_recv_ioctl_cmd)\r
+#define UAL_POST_SRQ_RECV IOCTL_CODE(ALDEV_KEY, ual_post_srq_recv_ioctl_cmd)\r
#define UAL_PEEK_CQ IOCTL_CODE(ALDEV_KEY, ual_peek_cq_ioctl_cmd)\r
#define UAL_POLL_CQ IOCTL_CODE(ALDEV_KEY, ual_poll_cq_ioctl_cmd)\r
#define UAL_REARM_CQ IOCTL_CODE(ALDEV_KEY, ual_rearm_cq_ioctl_cmd)\r
#include "al_mw.h"\r
#include "al_pd.h"\r
#include "al_qp.h"\r
+#include "al_srq.h"\r
#include "al_verbs.h"\r
\r
#include "ib_common.h"\r
cl_free( h_pd );\r
}\r
\r
+ib_api_status_t\r
+ib_create_srq(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const void* const srq_context,\r
+ IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL,\r
+ OUT ib_srq_handle_t* const ph_srq )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_SRQ );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
+ return IB_INVALID_PD_HANDLE;\r
+ }\r
+\r
+ if( !p_srq_attr || !ph_srq)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !p_srq_attr->max_wr)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+ return IB_INVALID_MAX_WRS;\r
+ }\r
+\r
+ if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr)\r
+ {\r
+ if (p_srq_attr->max_wr > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+ return IB_INVALID_MAX_WRS;\r
+ }\r
+ if (p_srq_attr->max_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_sges)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") );\r
+ return IB_INVALID_MAX_SGE;\r
+ }\r
+ }\r
+ \r
+ status = create_srq(\r
+ h_pd, p_srq_attr, srq_context, pfn_srq_event_cb, ph_srq, NULL );\r
+\r
+ /* Release the reference taken in init_al_obj (init_base_srq). */\r
+ if( status == IB_SUCCESS )\r
+ deref_al_obj( &(*ph_srq)->obj );\r
+\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return status;\r
+}\r
\r
\r
ib_api_status_t\r
{\r
CA_ERROR_REC,\r
QP_ERROR_REC,\r
+ SRQ_ERROR_REC,\r
CQ_ERROR_REC,\r
MCAST_REC,\r
MAD_SEND_REC,\r
return IB_INVALID_PARAMETER;\r
}\r
\r
+ if (p_qp_create->h_srq && \r
+ AL_OBJ_INVALID_HANDLE( p_qp_create->h_srq, AL_OBJ_TYPE_H_SRQ ) )\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );\r
+ return IB_INVALID_SRQ_HANDLE;\r
+ }\r
+ \r
/* Allocate a QP. */\r
status = alloc_qp( p_qp_create->qp_type, &h_qp );\r
if( status != IB_SUCCESS )\r
cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
\r
+ h_qp->h_srq = p_qp_create->h_srq;\r
+ h_qp->srq_rel.p_child_obj = (cl_obj_t*)h_qp;\r
+ if (h_qp->h_srq)\r
+ srq_attach_qp( h_qp->h_srq, &h_qp->srq_rel );\r
+\r
h_qp->num = qp_attr.num;\r
\r
return IB_SUCCESS;\r
/* Multicast membership gets cleaned up by object hierarchy. */\r
cq_detach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
cq_detach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
+ if (h_qp->h_srq)\r
+ srq_detach_qp( h_qp->h_srq, &h_qp->srq_rel );\r
}\r
}\r
\r
deref_al_obj( &h_qp->h_recv_cq->obj );\r
if( h_qp->h_send_cq )\r
deref_al_obj( &h_qp->h_send_cq->obj );\r
+ if( h_qp->h_srq )\r
+ deref_al_obj( &h_qp->h_srq->obj );\r
}\r
}\r
\r
p_qp_attr->h_rq_cq = h_qp->h_recv_cq;\r
p_qp_attr->h_sq_cq = h_qp->h_send_cq;\r
p_qp_attr->qp_type = h_qp->type;\r
+ p_qp_attr->h_srq = h_qp->h_srq;\r
\r
AL_EXIT( AL_DBG_QP );\r
return IB_SUCCESS;\r
cl_obj_rel_t recv_cq_rel;\r
cl_obj_rel_t send_cq_rel;\r
\r
+ ib_srq_handle_t h_srq;\r
+ cl_obj_rel_t srq_rel;\r
+\r
ib_pfn_event_cb_t pfn_event_cb;\r
\r
ib_pfn_modify_qp_t pfn_modify_qp;\r
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: al_qp.c 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#include <complib/cl_async_proc.h>
+#include <complib/cl_memory.h>
+#include <complib/cl_timer.h>
+
+#include "al.h"
+#include "al_ca.h"
+#include "al_debug.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "al_srq.tmh"
+#endif
+#include "al_mgr.h"
+#include "al_mr.h"
+#include "al_pd.h"
+#include "al_srq.h"
+#include "al_verbs.h"
+
+#include "ib_common.h"
+
+/*
+ * Function prototypes.
+ */
+void
+destroying_srq(
+ IN struct _al_obj *p_obj );
+
+void
+cleanup_srq(
+ IN al_obj_t *p_obj );
+
+void
+free_srq(
+ IN al_obj_t *p_obj );
+
+
+ib_destroy_srq(
+ IN const ib_srq_handle_t h_srq,
+ IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )
+{
+ AL_ENTER( AL_DBG_SRQ );
+
+ if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+ return IB_INVALID_SRQ_HANDLE;
+ }
+
+ /* Don't destroy while there are bound QPs. */
+ cl_spinlock_acquire( &h_srq->obj.lock );
+ if (!cl_is_qlist_empty( &h_srq->qp_list ))
+ {
+ cl_spinlock_release( &h_srq->obj.lock );
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_RESOURCE_BUSY\n") );
+ return IB_RESOURCE_BUSY;
+ }
+ cl_spinlock_release( &h_srq->obj.lock );
+
+ ref_al_obj( &h_srq->obj );
+ h_srq->obj.pfn_destroy( &h_srq->obj, pfn_destroy_cb );
+
+ AL_EXIT( AL_DBG_SRQ );
+ return IB_SUCCESS;
+}
+
+
+void
+destroying_srq(
+ IN struct _al_obj *p_obj )
+{
+ ib_srq_handle_t h_srq;
+ cl_list_item_t *p_item;
+ cl_obj_rel_t *p_rel;
+ ib_qp_handle_t h_qp;
+
+ CL_ASSERT( p_obj );
+ h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+ /* Initiate destruction of all bound QPs. */
+ cl_spinlock_acquire( &h_srq->obj.lock );
+ for( p_item = cl_qlist_remove_tail( &h_srq->qp_list );
+ p_item != cl_qlist_end( &h_srq->qp_list );
+ p_item = cl_qlist_remove_tail( &h_srq->qp_list ) )
+ {
+ p_rel = PARENT_STRUCT( p_item, cl_obj_rel_t, pool_item.list_item );
+ p_rel->p_parent_obj = NULL;
+ h_qp = (ib_qp_handle_t)p_rel->p_child_obj;
+ if( h_qp )
+ {
+ /* Take a reference to prevent the QP from being destroyed. */
+ ref_al_obj( &h_qp->obj );
+ cl_spinlock_release( &h_srq->obj.lock );
+ h_qp->obj.pfn_destroy( &h_qp->obj, NULL );
+ cl_spinlock_acquire( &h_srq->obj.lock );
+ }
+ }
+ cl_spinlock_release( &h_srq->obj.lock );
+}
+
+void
+cleanup_srq(
+ IN struct _al_obj *p_obj )
+{
+ ib_srq_handle_t h_srq;
+ ib_api_status_t status;
+
+ CL_ASSERT( p_obj );
+ h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+ /* Deallocate the CI srq. */
+ if( verbs_check_srq( h_srq ) )
+ {
+ status = verbs_destroy_srq( h_srq );
+ CL_ASSERT( status == IB_SUCCESS );
+ }
+}
+
+
+/*
+ * Release all resources associated with the completion queue.
+ */
+void
+free_srq(
+ IN al_obj_t *p_obj )
+{
+ ib_srq_handle_t h_srq;
+
+ CL_ASSERT( p_obj );
+ h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+ destroy_al_obj( &h_srq->obj );
+ cl_free( h_srq );
+}
+
+
+void
+srq_attach_qp(
+ IN const ib_srq_handle_t h_srq,
+ IN cl_obj_rel_t* const p_qp_rel )
+{
+ p_qp_rel->p_parent_obj = (cl_obj_t*)h_srq;
+ ref_al_obj( &h_srq->obj );
+ cl_spinlock_acquire( &h_srq->obj.lock );
+ cl_qlist_insert_tail( &h_srq->qp_list, &p_qp_rel->pool_item.list_item );
+ cl_spinlock_release( &h_srq->obj.lock );
+}
+
+
+void
+srq_detach_qp(
+ IN const ib_srq_handle_t h_srq,
+ IN cl_obj_rel_t* const p_qp_rel )
+{
+ if( p_qp_rel->p_parent_obj )
+ {
+ CL_ASSERT( p_qp_rel->p_parent_obj == (cl_obj_t*)h_srq );
+ p_qp_rel->p_parent_obj = NULL;
+ cl_spinlock_acquire( &h_srq->obj.lock );
+ cl_qlist_remove_item( &h_srq->qp_list, &p_qp_rel->pool_item.list_item );
+ cl_spinlock_release( &h_srq->obj.lock );
+ }
+}
+
+
+ib_api_status_t
+ib_modify_srq(
+ IN const ib_srq_handle_t h_srq,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const ib_srq_attr_mask_t srq_attr_mask )
+{
+ return modify_srq( h_srq, p_srq_attr, srq_attr_mask, NULL );
+}
+
+
+ib_api_status_t
+modify_srq(
+ IN const ib_srq_handle_t h_srq,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const ib_srq_attr_mask_t srq_attr_mask,
+ IN OUT ci_umv_buf_t* const p_umv_buf )
+{
+ ib_api_status_t status;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+ return IB_INVALID_SRQ_HANDLE;
+ }
+
+ if( !p_srq_attr )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+ return IB_INVALID_PARAMETER;
+ }
+
+ if( !( srq_attr_mask & (IB_SRQ_MAX_WR |IB_SRQ_LIMIT)) ||
+ ( srq_attr_mask & ~(IB_SRQ_MAX_WR |IB_SRQ_LIMIT)))
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+ return IB_INVALID_SETTING;
+ }
+
+ if((srq_attr_mask & IB_SRQ_LIMIT) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr )
+ {
+ if (p_srq_attr->srq_limit > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") );
+ return IB_INVALID_SETTING;
+ }
+ }
+
+ if((srq_attr_mask & IB_SRQ_MAX_WR) && !p_srq_attr->max_wr)
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") );
+ return IB_INVALID_SETTING;
+ }
+
+ if ((srq_attr_mask & IB_SRQ_MAX_WR) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr)
+ {
+ if (p_srq_attr->max_wr > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );
+ return IB_INVALID_MAX_WRS;
+ }
+ }
+
+ status = verbs_modify_srq( h_srq, p_srq_attr, srq_attr_mask );
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+
+ib_api_status_t
+ib_query_srq(
+ IN const ib_srq_handle_t h_srq,
+ OUT ib_srq_attr_t* const p_srq_attr )
+{
+ return query_srq( h_srq, p_srq_attr, NULL );
+}
+
+
+
+ib_api_status_t
+query_srq(
+ IN const ib_srq_handle_t h_srq,
+ OUT ib_srq_attr_t* const p_srq_attr,
+ IN OUT ci_umv_buf_t* const p_umv_buf )
+{
+ ib_api_status_t status;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+ return IB_INVALID_SRQ_HANDLE;
+ }
+ if( !p_srq_attr )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+ return IB_INVALID_PARAMETER;
+ }
+
+ status = verbs_query_srq( h_srq, p_srq_attr );
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+/*
+ * Initializes the QP information structure.
+ */
+ib_api_status_t
+create_srq(
+ IN const ib_pd_handle_t h_pd,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const void* const srq_context,
+ IN const ib_pfn_event_cb_t pfn_srq_event_cb,
+ OUT ib_srq_handle_t* const ph_srq,
+ IN OUT ci_umv_buf_t* const p_umv_buf )
+{
+ ib_srq_handle_t h_srq;
+ ib_api_status_t status;
+ al_obj_type_t obj_type = AL_OBJ_TYPE_H_SRQ;
+
+ h_srq = cl_zalloc( sizeof( ib_srq_t ) );
+ if( !h_srq )
+ {
+ return IB_INSUFFICIENT_MEMORY;
+ }
+
+ if( p_umv_buf )
+ obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT;
+
+ /* Construct the SRQ. */
+ construct_al_obj( &h_srq->obj, obj_type );
+
+ cl_qlist_init( &h_srq->qp_list );
+ h_srq->pfn_event_cb = pfn_srq_event_cb;
+
+ /* Initialize the SRQ. */
+ status = init_al_obj( &h_srq->obj, srq_context, TRUE,
+ destroying_srq, cleanup_srq, free_srq );
+ if( status != IB_SUCCESS )
+ {
+ free_srq( &h_srq->obj );
+ return status;
+ }
+ status = attach_al_obj( &h_pd->obj, &h_srq->obj );
+ if( status != IB_SUCCESS )
+ {
+ h_srq->obj.pfn_destroy( &h_srq->obj, NULL );
+ return status;
+ }
+
+ status = verbs_create_srq( h_pd, h_srq, p_srq_attr, p_umv_buf );
+ if( status != IB_SUCCESS )
+ {
+ h_srq->obj.pfn_destroy( &h_srq->obj, NULL );
+ return status;
+ }
+
+ *ph_srq = h_srq;
+
+ /*
+ * Note that we don't release the reference taken in init_al_obj here.
+ * For kernel clients, it is release in ib_create_srq. For user-mode
+ * clients is released by the proxy after the handle is extracted.
+ */
+ return IB_SUCCESS;
+}
+
+
+/*
+ * Process an asynchronous event on the QP. Notify the user of the event.
+ */
+void
+srq_async_event_cb(
+ IN ib_async_event_rec_t* const p_event_rec )
+{
+ ib_srq_handle_t h_srq;
+
+ CL_ASSERT( p_event_rec );
+ h_srq = (ib_srq_handle_t)p_event_rec->context;
+
+#if defined(CL_KERNEL)
+ switch( p_event_rec->code )
+ {
+ case IB_AE_SRQ_LIMIT_REACHED:
+ AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ,
+ ("IB_AE_SRQ_LIMIT_REACHED for srq %p \n", h_srq) );
+ //TODO: handle this error.
+ break;
+ case IB_AE_SRQ_CATAS_ERROR:
+ AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ,
+ ("IB_AE_SRQ_CATAS_ERROR for srq %p \n", h_srq) );
+ //TODO: handle this error.
+ break;
+ default:
+ break;
+ }
+#endif
+
+ p_event_rec->context = (void*)h_srq->obj.context;
+ p_event_rec->handle.h_srq = h_srq;
+
+ if( h_srq->pfn_event_cb )
+ h_srq->pfn_event_cb( p_event_rec );
+}
+
+ib_api_status_t
+ib_post_srq_recv(
+ IN const ib_srq_handle_t h_srq,
+ IN ib_recv_wr_t* const p_recv_wr,
+ OUT ib_recv_wr_t **pp_recv_failure OPTIONAL )
+{
+ ib_api_status_t status;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+ return IB_INVALID_QP_HANDLE;
+ }
+ if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) )
+ {
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+ return IB_INVALID_PARAMETER;
+ }
+
+ status =
+ h_srq->pfn_post_srq_recv( h_srq->h_recv_srq, p_recv_wr, pp_recv_failure );
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: al_srq.h 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#if !defined(__AL_SRQ_H__)
+#define __AL_SRQ_H__
+
+#include <iba/ib_al.h>
+#include <iba/ib_ci.h>
+#include <complib/cl_qlist.h>
+#include <complib/cl_vector.h>
+
+#include "al_ca.h"
+#include "al_common.h"
+
+
+typedef ib_api_status_t
+(*ib_pfn_post_srq_recv_t)(
+ IN const ib_srq_handle_t h_srq,
+ IN ib_recv_wr_t* const p_recv_wr,
+ IN ib_recv_wr_t **p_recv_failure OPTIONAL );
+
+
+/*
+ * Shared queue pair information required by the access layer. This structure
+ * is referenced by a user's SRQ handle.
+ */
+typedef struct _ib_srq
+{
+ al_obj_t obj; /* Must be first. */
+
+ ib_srq_handle_t h_ci_srq; /* kernel SRQ handle */
+ ib_pfn_post_srq_recv_t pfn_post_srq_recv; /* post_srq_recv call */
+ ib_srq_handle_t h_recv_srq; /* srq handle for the post_srq_recv call */
+ ib_pfn_event_cb_t pfn_event_cb; /* user async event handler */
+ cl_qlist_t qp_list; /* List of QPs bound to this CQ. */
+
+} ib_srq_t;
+
+ib_api_status_t
+create_srq(
+ IN const ib_pd_handle_t h_pd,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const void* const srq_context,
+ IN const ib_pfn_event_cb_t pfn_srq_event_cb,
+ OUT ib_srq_handle_t* const ph_srq,
+ IN OUT ci_umv_buf_t* const p_umv_buf );
+
+
+ib_api_status_t
+query_srq(
+ IN const ib_srq_handle_t h_srq,
+ OUT ib_srq_attr_t* const p_srq_attr,
+ IN OUT ci_umv_buf_t* const p_umv_buf );
+
+
+ib_api_status_t
+modify_srq(
+ IN const ib_srq_handle_t h_srq,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const ib_srq_attr_mask_t srq_attr_mask,
+ IN OUT ci_umv_buf_t* const p_umv_buf );
+
+
+void
+srq_async_event_cb(
+ IN ib_async_event_rec_t* const p_event_rec );
+
+void
+srq_attach_qp(
+ IN const ib_srq_handle_t h_srq,
+ IN cl_obj_rel_t* const p_qp_rel );
+
+void
+srq_detach_qp(
+ IN const ib_srq_handle_t h_srq,
+ IN cl_obj_rel_t* const p_qp_rel );
+
+#endif /* __AL_QP_H__ */
+
#include "al_cq.h"\r
#include "al_pd.h"\r
#include "al_qp.h"\r
+#include "al_srq.h"\r
\r
#ifndef CL_KERNEL\r
#include "ual_mad.h"\r
#define verbs_deallocate_pd(h_pd) \\r
h_pd->obj.p_ci_ca->verbs.deallocate_pd( h_pd->h_ci_pd )\r
\r
+static inline ib_api_status_t\r
+verbs_create_srq(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ status = h_srq->obj.p_ci_ca->verbs.create_srq(\r
+ h_pd->h_ci_pd, h_srq, p_srq_attr,\r
+ &h_srq->h_ci_srq, p_umv_buf );\r
+\r
+ h_srq->h_recv_srq = h_srq->h_ci_srq;\r
+ h_srq->pfn_post_srq_recv = h_srq->obj.p_ci_ca->verbs.post_srq_recv;\r
+ return status;\r
+}\r
+\r
+#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq)\r
+\r
+#define verbs_destroy_srq(h_srq) \\r
+ h_srq->obj.p_ci_ca->verbs.destroy_srq( h_srq->h_ci_srq )\r
+\r
+#define verbs_query_srq(h_srq, p_srq_attr) \\r
+ h_srq->obj.p_ci_ca->verbs.query_srq( h_srq->h_ci_srq,\\r
+ p_srq_attr, p_umv_buf )\r
+\r
+#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \\r
+ h_srq->obj.p_ci_ca->verbs.modify_srq( h_srq->h_ci_srq,\\r
+ p_srq_attr, srq_attr_mask, p_umv_buf )\r
+\r
+#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \\r
+ h_srq->obj.p_ci_ca->verbs.post_srq_recv( h_srq->h_ci_srq,\\r
+ p_recv_wr, pp_recv_failure )\r
+\r
#define convert_qp_handle( qp_create ) {\\r
CL_ASSERT( qp_create.h_rq_cq ); \\r
qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; \\r
CL_ASSERT( qp_create.h_sq_cq ); \\r
qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; \\r
+ if (qp_create.h_srq) \\r
+ qp_create.h_srq = qp_create.h_srq->h_ci_srq; \\r
}\r
\r
-\r
static inline ib_api_status_t\r
verbs_get_spl_qp(\r
IN ib_pd_handle_t h_pd,\r
return status;\r
}\r
\r
-\r
#define verbs_check_qp(h_qp) ((h_qp)->h_ci_qp)\r
#define verbs_destroy_qp(h_qp) \\r
h_qp->obj.p_ci_ca->verbs.destroy_qp( h_qp->h_ci_qp, h_qp->timewait )\r
#define verbs_deallocate_pd(h_pd) \\r
ual_deallocate_pd(h_pd)\r
\r
+#define verbs_create_srq(h_pd, h_srq, p_srq_attr, p_umv_buf) \\r
+ ual_create_srq (h_pd, h_srq, p_srq_attr); \\r
+ UNUSED_PARAM( p_umv_buf )\r
+\r
+#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq || (h_srq)->obj.hdl)\r
+\r
+#define verbs_destroy_srq(h_srq) \\r
+ ual_destroy_srq(h_srq)\r
+\r
+#define verbs_query_srq(h_srq, p_srq_attr) \\r
+ ual_query_srq(h_srq, p_srq_attr); \\r
+ UNUSED_PARAM( p_umv_buf );\r
+\r
+#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \\r
+ ual_modify_srq(h_srq, p_srq_attr, srq_attr_mask); \\r
+ UNUSED_PARAM( p_umv_buf );\r
+\r
+#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \\r
+ ual_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure)\r
+\r
+\r
/* For user-mode, handle conversion is done in ual files */\r
\r
#define convert_qp_handle( qp_create )\r
"IB_INVALID_MAX_WRS",\r
"IB_INVALID_MAX_SGE",\r
"IB_INVALID_CQ_SIZE",\r
+ "IB_INVALID_SRQ_SIZE",\r
"IB_INVALID_SERVICE_TYPE",\r
"IB_INVALID_GID",\r
"IB_INVALID_LID",\r
"IB_INVALID_AV_HANDLE",\r
"IB_INVALID_CQ_HANDLE",\r
"IB_INVALID_QP_HANDLE",\r
+ "IB_INVALID_SRQ_HANDLE",\r
"IB_INVALID_PD_HANDLE",\r
"IB_INVALID_MR_HANDLE",\r
"IB_INVALID_FMR_HANDLE",\r
"IB_INVALID_AL_HANDLE",\r
"IB_INVALID_HANDLE",\r
"IB_ERROR",\r
- "IB_REMOTE_ERROR", /* Infiniband Access Layer */\r
+ "IB_REMOTE_ERROR",\r
"IB_VERBS_PROCESSING_DONE",\r
"IB_INVALID_WR_TYPE",\r
"IB_QP_IN_TIMEWAIT",\r
return( __ib_wr_type_str[wr_type] );\r
}\r
\r
+static const char* const __ib_qp_type_str[] =\r
+{\r
+ "IB_QPT_RELIABLE_CONN"\r
+ "IB_QPT_UNRELIABLE_CONN",\r
+ "IB_QPT_UNKNOWN",\r
+ "IB_QPT_UNRELIABLE_DGRM",\r
+ "IB_QPT_QP0",\r
+ "IB_QPT_QP1",\r
+ "IB_QPT_RAW_IPV6",\r
+ "IB_QPT_RAW_ETHER",\r
+ "IB_QPT_MAD",\r
+ "IB_QPT_QP0_ALIAS",\r
+ "IB_QPT_QP1_ALIAS",\r
+ "IB_QPT_UNKNOWN"\r
+
+};\r
+\r
+\r
+const char* \r
+ib_get_qp_type_str(\r
+ IN uint8_t qp_type )\r
+{\r
+ if( qp_type > IB_QPT_UNKNOWN )\r
+ qp_type = IB_QPT_UNKNOWN;\r
+ return( __ib_qp_type_str[qp_type] );\r
+}\r
+\r
+\r
..\al_query.c \\r
..\al_reg_svc.c \\r
..\al_res_mgr.c \\r
+ ..\al_srq.c \\r
..\al_sub.c \\r
..\ib_common.c \\r
..\ib_statustext.c\r
}\r
\r
/* Initialize the AL device management agent. */\r
+\r
+/*\r
+ Disable support of DM agent.\r
+\r
status = create_dm_agent( &gp_al_mgr->obj );\r
if( status != IB_SUCCESS )\r
{\r
("create_dm_agent failed, status = 0x%x.\n", status) );\r
return status;\r
}\r
-\r
+*/\r
status = create_ioc_pnp( &gp_al_mgr->obj );\r
if( status != IB_SUCCESS )\r
{\r
if( type == AL_OBJ_TYPE_UNKNOWN &&\r
p_h->type != AL_OBJ_TYPE_H_PD && p_h->type != AL_OBJ_TYPE_H_CQ &&\r
p_h->type != AL_OBJ_TYPE_H_AV && p_h->type != AL_OBJ_TYPE_H_QP &&\r
- p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW )\r
+ p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW &&\r
+ p_h->type != AL_OBJ_TYPE_H_SRQ )\r
{\r
cl_spinlock_release( &h_al->obj.lock );\r
return NULL;\r
#include "al_ca.h"\r
#include "al_pd.h"\r
#include "al_qp.h"\r
+#include "al_srq.h"\r
#include "al_cq.h"\r
#include "al_mr.h"\r
#include "al_mw.h"\r
}\r
\r
\r
+/*\r
+ * Proxy's SRQ error handler\r
+ */\r
+static void\r
+proxy_srq_err_cb(\r
+ IN ib_async_event_rec_t *p_err_rec )\r
+{\r
+ ib_srq_handle_t h_srq = p_err_rec->handle.h_srq;\r
+ al_dev_open_context_t *p_context = h_srq->obj.h_al->p_context;\r
+ misc_cb_ioctl_info_t cb_info;\r
+\r
+ AL_ENTER( AL_DBG_QP );\r
+\r
+ /*\r
+ * If we're already closing the device - do not queue a callback, since\r
+ * we're cleaning up the callback lists.\r
+ */\r
+ if( !proxy_context_ref( p_context ) )\r
+ {\r
+ proxy_context_deref( p_context );\r
+ return;\r
+ }\r
+\r
+ /* Set up context and callback record type appropriate for UAL */\r
+ cb_info.rec_type = SRQ_ERROR_REC;\r
+ /* Return the Proxy's SRQ handle and the user's context */\r
+ cb_info.ioctl_rec.event_rec = *p_err_rec;\r
+ cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t)h_srq->obj.hdl;\r
+\r
+ /* The proxy handle must be valid now. */\r
+ if( !h_srq->obj.hdl_valid )\r
+ h_srq->obj.hdl_valid = TRUE;\r
+\r
+ proxy_queue_cb_buf(\r
+ UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_srq->obj );\r
+\r
+ proxy_context_deref( p_context );\r
+\r
+ AL_EXIT( AL_DBG_QP );\r
+}\r
+\r
+/*\r
+ * Process the ioctl UAL_CREATE_SRQ\r
+ *\r
+ * Returns the srq_list_obj as the handle to UAL\r
+ */\r
+static cl_status_t\r
+proxy_create_srq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ual_create_srq_ioctl_t *p_ioctl =\r
+ (ual_create_srq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+ al_dev_open_context_t *p_context =\r
+ (al_dev_open_context_t *)p_open_context;\r
+ ib_pd_handle_t h_pd;\r
+ ib_srq_handle_t h_srq;\r
+ ci_umv_buf_t *p_umv_buf = NULL;\r
+ ib_api_status_t status;\r
+ ib_pfn_event_cb_t pfn_ev;\r
+\r
+ AL_ENTER( AL_DBG_SRQ );\r
+\r
+ /* Validate input buffers. */\r
+ if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+ cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+ {\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Validate handles. */\r
+ h_pd = (ib_pd_handle_t)\r
+ al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
+ if( !h_pd)\r
+ {\r
+ status = IB_INVALID_PD_HANDLE;\r
+ goto proxy_create_srq_err1;\r
+ }\r
+\r
+ status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_create_srq_err1;\r
+\r
+ if( p_ioctl->in.ev_notify )\r
+ pfn_ev = proxy_srq_err_cb;\r
+ else\r
+ pfn_ev = NULL;\r
+\r
+ status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context,\r
+ pfn_ev, &h_srq, p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_create_srq_err1;\r
+\r
+ status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+ if( status == IB_SUCCESS )\r
+ {\r
+ p_ioctl->out.h_srq = h_srq->obj.hdl;\r
+ h_srq->obj.hdl_valid = TRUE;\r
+ /* Release the reference taken in create_srq (by init_al_obj) */\r
+ deref_al_obj( &h_srq->obj );\r
+ }\r
+ else\r
+ {\r
+proxy_create_srq_err1:\r
+ p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+ p_ioctl->out.h_srq = AL_INVALID_HANDLE;\r
+ }\r
+ free_umvbuf( p_umv_buf );\r
+\r
+ p_ioctl->out.status = status;\r
+ *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+ if( h_pd )\r
+ deref_al_obj( &h_pd->obj );\r
+\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_QUERY_SRQ:\r
+ */\r
+static\r
+cl_status_t\r
+proxy_query_srq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ual_query_srq_ioctl_t *p_ioctl =\r
+ (ual_query_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+ al_dev_open_context_t *p_context =\r
+ (al_dev_open_context_t *)p_open_context;\r
+ ib_srq_handle_t h_srq;\r
+ ci_umv_buf_t *p_umv_buf = NULL;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_SRQ );\r
+\r
+ /* Validate input buffers. */\r
+ if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+ cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+ {\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Validate SRQ handle */\r
+ h_srq = (ib_srq_handle_t)\r
+ al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+ if( !h_srq )\r
+ {\r
+ status = IB_INVALID_SRQ_HANDLE;\r
+ goto proxy_query_srq_err;\r
+ }\r
+\r
+ status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_query_srq_err;\r
+\r
+ status = query_srq( h_srq, &p_ioctl->out.srq_attr, p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_query_srq_err;\r
+\r
+ status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+proxy_query_srq_err:\r
+ p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+ cl_memclr( &p_ioctl->out.srq_attr, sizeof(ib_srq_attr_t) );\r
+ }\r
+ free_umvbuf( p_umv_buf );\r
+\r
+ if( h_srq )\r
+ deref_al_obj( &h_srq->obj );\r
+\r
+ p_ioctl->out.status = status;\r
+ *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_MODIFY_SRQ:\r
+ */\r
+static\r
+cl_status_t\r
+proxy_modify_srq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ual_modify_srq_ioctl_t *p_ioctl =\r
+ (ual_modify_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+ al_dev_open_context_t *p_context =\r
+ (al_dev_open_context_t *)p_open_context;\r
+ ib_srq_handle_t h_srq;\r
+ ci_umv_buf_t *p_umv_buf = NULL;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_SRQ );\r
+\r
+ /* Validate input buffers. */\r
+ if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+ cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+ {\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Validate SRQ handle */\r
+ h_srq = (ib_srq_handle_t)\r
+ al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+ if( !h_srq )\r
+ {\r
+ status = IB_INVALID_SRQ_HANDLE;\r
+ goto proxy_modify_srq_err;\r
+ }\r
+\r
+ status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_modify_srq_err;\r
+\r
+ status = modify_srq( h_srq, &p_ioctl->in.srq_attr, p_ioctl->in.srq_attr_mask, p_umv_buf );\r
+\r
+ if( status != IB_SUCCESS )\r
+ goto proxy_modify_srq_err;\r
+ \r
+ status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+proxy_modify_srq_err:\r
+ p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+ }\r
+ free_umvbuf( p_umv_buf );\r
+\r
+ if( h_srq )\r
+ deref_al_obj( &h_srq->obj );\r
+\r
+ p_ioctl->out.status = status;\r
+ *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_DESTROY_SRQ\r
+ */\r
+static cl_status_t\r
+proxy_destroy_srq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ual_destroy_srq_ioctl_t *p_ioctl =\r
+ (ual_destroy_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+ al_dev_open_context_t *p_context =\r
+ (al_dev_open_context_t *)p_open_context;\r
+ ib_srq_handle_t h_srq;\r
+\r
+ AL_ENTER( AL_DBG_SRQ );\r
+\r
+ /* Validate input buffers. */\r
+ if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+ cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+ {\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Set the return bytes in all cases */\r
+ *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+ /* Validate SRQ handle */\r
+ h_srq = (ib_srq_handle_t)\r
+ al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+ if( !h_srq )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );\r
+ p_ioctl->out.status = IB_INVALID_SRQ_HANDLE;\r
+ }\r
+ else\r
+ {\r
+ h_srq->obj.pfn_destroy( &h_srq->obj, ib_sync_destroy );\r
+ p_ioctl->out.status = IB_SUCCESS;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_SRQ );\r
+ return CL_SUCCESS;\r
+}\r
+\r
\r
/*\r
* Proxy's QP error handler\r
}\r
\r
\r
+\r
/*\r
* Process the ioctl UAL_CREATE_QP\r
*\r
(al_dev_open_context_t *)p_open_context;\r
ib_pd_handle_t h_pd;\r
ib_qp_handle_t h_qp;\r
+ ib_srq_handle_t h_srq = NULL;\r
ib_cq_handle_t h_sq_cq, h_rq_cq;\r
ci_umv_buf_t *p_umv_buf = NULL;\r
ib_api_status_t status;\r
(uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ );\r
h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
(uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ );\r
+ if (p_ioctl->in.qp_create.h_srq) {\r
+ h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al,\r
+ (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+ if( !h_srq)\r
+ {\r
+ status = IB_INVALID_SRQ_HANDLE;\r
+ goto proxy_create_qp_err1;\r
+ }\r
+ }\r
if( !h_pd)\r
{\r
status = IB_INVALID_PD_HANDLE;\r
p_ioctl->in.qp_create.h_sq_cq = h_sq_cq;\r
/* Substitute rq_cq handle with AL's cq handle */\r
p_ioctl->in.qp_create.h_rq_cq = h_rq_cq;\r
+ /* Substitute srq handle with AL's srq handle */\r
+ p_ioctl->in.qp_create.h_srq = h_srq;\r
\r
status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
if( status != IB_SUCCESS )\r
deref_al_obj( &h_rq_cq->obj );\r
if( h_sq_cq )\r
deref_al_obj( &h_sq_cq->obj );\r
+ if( h_srq )\r
+ deref_al_obj( &h_srq->obj );\r
\r
AL_EXIT( AL_DBG_QP );\r
return CL_SUCCESS;\r
{\r
p_ioctl->out.attr.h_rq_cq = NULL;\r
}\r
+ if( p_ioctl->out.attr.h_srq )\r
+ {\r
+ p_ioctl->out.attr.h_srq =\r
+ (ib_srq_handle_t)p_ioctl->out.attr.h_srq->obj.hdl;\r
+ }\r
+ else\r
+ {\r
+ p_ioctl->out.attr.h_srq = NULL;\r
+ }\r
}\r
else\r
{\r
}\r
\r
\r
+/*\r
+ * Process the ioctl UAL_POST_SRQ_RECV\r
+ */\r
+static\r
+cl_status_t\r
+proxy_post_srq_recv(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ual_post_srq_recv_ioctl_t *p_ioctl =\r
+ (ual_post_srq_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+ al_dev_open_context_t *p_context =\r
+ (al_dev_open_context_t *)p_open_context;\r
+ ib_srq_handle_t h_srq;\r
+ ib_recv_wr_t *p_wr;\r
+ ib_recv_wr_t *p_recv_failure;\r
+ uintn_t i;\r
+ ib_local_ds_t *p_ds;\r
+ uintn_t num_ds = 0;\r
+ ib_api_status_t status;\r
+ size_t in_buf_sz;\r
+\r
+ AL_ENTER( AL_DBG_QP );\r
+\r
+ /* Validate input buffers. */\r
+ if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+ cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+ {\r
+ AL_EXIT( AL_DBG_QP );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /*\r
+ * Additional input buffer validation based on actual settings.\r
+ * Note that this validates that work requests are actually\r
+ * being passed in.\r
+ */\r
+ in_buf_sz = sizeof(p_ioctl->in);\r
+ in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1);\r
+ in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds;\r
+ if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
+ {\r
+ AL_EXIT( AL_DBG_QP );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Setup p_send_failure to head of list. */\r
+ p_recv_failure = p_wr = p_ioctl->in.recv_wr;\r
+\r
+ /* Validate SRQ handle */\r
+ h_srq = (ib_srq_handle_t)\r
+ al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_QP );\r
+ if( !h_srq )\r
+ {\r
+ status = IB_INVALID_SRQ_HANDLE;\r
+ goto proxy_post_recv_done;\r
+ }\r
+\r
+ /* Setup the base data segment pointer. */\r
+ p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr];\r
+\r
+ /* Setup the user's work requests and data segments and translate. */\r
+ for( i = 0; i < p_ioctl->in.num_wr; i++ )\r
+ {\r
+ /* Setup the data segments, if any. */\r
+ if( p_wr[i].num_ds )\r
+ {\r
+ num_ds += p_wr[i].num_ds;\r
+ if( num_ds > p_ioctl->in.num_ds )\r
+ {\r
+ /*\r
+ * The work request submitted exceed the number of data\r
+ * segments specified in the IOCTL.\r
+ */\r
+ status = IB_INVALID_PARAMETER;\r
+ goto proxy_post_recv_done;\r
+ }\r
+ p_wr[i].ds_array = p_ds;\r
+ p_ds += p_wr->num_ds;\r
+ }\r
+ else\r
+ {\r
+ p_wr[i].ds_array = NULL;\r
+ }\r
+\r
+ p_wr[i].p_next = &p_wr[i + 1];\r
+ }\r
+\r
+ /* Mark the end of list. */\r
+ p_wr[i-1].p_next = NULL;\r
+\r
+ status = ib_post_srq_recv( h_srq, p_wr, &p_recv_failure );\r
+\r
+ if( status == IB_SUCCESS )\r
+ {\r
+ p_ioctl->out.failed_cnt = 0;\r
+ }\r
+ else\r
+ {\r
+proxy_post_recv_done:\r
+ /* First set up as if all failed. */\r
+ p_ioctl->out.failed_cnt = p_ioctl->in.num_wr;\r
+ /* Now subtract successful ones. */\r
+ p_ioctl->out.failed_cnt -= (uint32_t)(\r
+ (((uintn_t)p_recv_failure) - ((uintn_t)p_wr))\r
+ / sizeof(ib_recv_wr_t));\r
+ }\r
+\r
+ if( h_srq )\r
+ deref_al_obj( &h_srq->obj );\r
+\r
+ p_ioctl->out.status = status;\r
+ *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+ AL_EXIT( AL_DBG_QP );\r
+ return CL_SUCCESS;\r
+}\r
+\r
\r
/*\r
* Process the ioctl UAL_PEEK_CQ\r
case UAL_MODIFY_AV:\r
cl_status = proxy_modify_av( p_context, h_ioctl, p_ret_bytes );\r
break;\r
+ case UAL_CREATE_SRQ:\r
+ cl_status = proxy_create_srq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_QUERY_SRQ:\r
+ cl_status = proxy_query_srq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_MODIFY_SRQ:\r
+ cl_status = proxy_modify_srq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_DESTROY_SRQ:\r
+ cl_status = proxy_destroy_srq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_POST_SRQ_RECV:\r
+ cl_status = proxy_post_srq_recv( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
case UAL_CREATE_QP:\r
cl_status = proxy_create_qp( p_context, h_ioctl, p_ret_bytes );\r
break;\r
ual_query.c \\r
ual_reg_svc.c \\r
ual_sa_req.c \\r
+ ual_srq.c \\r
ual_sub.c \\r
..\al.c \\r
..\al_av.c \\r
..\al_query.c \\r
..\al_reg_svc.c \\r
..\al_res_mgr.c \\r
+ ..\al_srq.c \\r
..\al_sub.c \\r
..\ib_common.c \\r
..\ib_statustext.c\r
OUT ib_av_attr_t* const p_av_attr,\r
OUT ib_pd_handle_t* const ph_pd );\r
\r
+ib_api_status_t\r
+ual_create_srq(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN OUT ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr);\r
+\r
+ib_api_status_t\r
+ual_modify_srq(\r
+ IN ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask );\r
+\r
+ib_api_status_t\r
+ual_query_srq(\r
+ IN ib_srq_handle_t h_srq,\r
+ OUT ib_srq_attr_t* p_srq_attr );\r
+\r
+ib_api_status_t\r
+ual_destroy_srq(\r
+ IN ib_srq_handle_t h_srq );\r
+\r
ib_api_status_t\r
ual_create_qp(\r
IN const ib_pd_handle_t h_pd,\r
IN ib_recv_wr_t* const p_recv_wr,\r
OUT ib_recv_wr_t **pp_recv_failure );\r
\r
+ib_api_status_t\r
+ual_post_srq_recv(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_recv_failure );\r
+\r
ib_api_status_t\r
ual_peek_cq(\r
IN const ib_cq_handle_t h_cq,\r
{\r
case CA_ERROR_REC:\r
case QP_ERROR_REC:\r
+ case SRQ_ERROR_REC:\r
case CQ_ERROR_REC:\r
{\r
/* Initiate user-mode asynchronous event processing. */\r
uintn_t bytes_ret;\r
cl_status_t cl_status;\r
ib_ca_attr_t *p_old_ca_attr;\r
+ ib_api_status_t status;\r
\r
pnp_event = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_event;\r
ca_guid = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid;\r
ref_al_obj( &p_ci_ca->obj );\r
cl_spinlock_release( &gp_al_mgr->obj.lock );\r
\r
- ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
- if( p_old_ca_attr )\r
+ status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
+ if( status != IB_SUCCESS) {\r
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,\r
+ ("update CA attributes returned %#x.\n", status) );\r
+ break;\r
+ }\r
+ if ( p_old_ca_attr )\r
cl_free( p_old_ca_attr );\r
\r
/*\r
#include "al_cq.h"\r
#include "al_pd.h"\r
#include "al_qp.h"\r
+#include "al_srq.h"\r
#include "ual_mad.h"\r
#include "ual_support.h"\r
\r
qp_create = *p_qp_create;\r
qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq;\r
qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq;\r
+ if (qp_create.h_srq)\r
+ qp_create.h_srq = qp_create.h_srq->h_ci_srq;\r
status = uvp_intf.pre_create_qp( h_pd->h_ci_pd,\r
&qp_create, &qp_ioctl.in.umv_buf );\r
if( status != IB_SUCCESS )\r
(ib_cq_handle_t)p_qp_create->h_rq_cq->obj.hdl;\r
qp_ioctl.in.qp_create.h_sq_cq =\r
(ib_cq_handle_t)p_qp_create->h_sq_cq->obj.hdl;\r
+ if (p_qp_create->h_srq)\r
+ qp_ioctl.in.qp_create.h_srq =\r
+ (ib_srq_handle_t)p_qp_create->h_srq->obj.hdl;\r
qp_ioctl.in.context = h_qp;\r
qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE;\r
\r
else\r
{\r
status = qp_ioctl.out.status;\r
+ \r
+ if( status == IB_SUCCESS )\r
+ {\r
+ h_qp->obj.hdl = qp_ioctl.out.h_qp;\r
+ *p_qp_attr = qp_ioctl.out.attr;\r
+ }\r
}\r
\r
/* Post uvp call */\r
h_qp->pfn_post_send = ual_post_send;\r
}\r
\r
- if( status == IB_SUCCESS )\r
- {\r
- h_qp->obj.hdl = qp_ioctl.out.h_qp;\r
- *p_qp_attr = qp_ioctl.out.attr;\r
- }\r
\r
AL_EXIT( AL_DBG_QP );\r
return status;\r
p_attr->h_rq_cq = h_qp->h_recv_cq->h_ci_cq;\r
if( h_qp->h_send_cq )\r
p_attr->h_sq_cq = h_qp->h_send_cq->h_ci_cq;\r
+ if( h_qp->h_srq )\r
+ p_attr->h_srq = h_qp->h_srq->h_ci_srq;\r
\r
/* Post uvp call */\r
if( h_qp->h_ci_qp && uvp_intf.post_query_qp )\r
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ual_qp.c 1611 2006-08-20 14:48:55Z sleybo $
+ */
+
+
+#include "al.h"
+#include "al_av.h"
+#include "al_ci_ca.h"
+#include "al_cq.h"
+#include "al_pd.h"
+#include "al_srq.h"
+#include "ual_mad.h"
+#include "ual_support.h"
+
+
+#include "al_debug.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "ual_srq.tmh"
+#endif
+
+
+ib_api_status_t
+ual_post_srq_recv(
+ IN const ib_srq_handle_t h_srq,
+ IN ib_recv_wr_t* const p_recv_wr,
+ OUT ib_recv_wr_t **pp_recv_failure OPTIONAL )
+{
+ uintn_t failed_index;
+ uintn_t bytes_ret;
+ uint32_t num_wr = 0;
+ uint32_t num_ds = 0;
+ ib_recv_wr_t* p_wr;
+ ib_local_ds_t* p_ds;
+ ual_post_srq_recv_ioctl_t *p_srq_ioctl;
+ size_t ioctl_buf_sz;
+ cl_status_t cl_status;
+ ib_api_status_t status;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ /*
+ * Since the work request is a link list and we need to pass this
+ * to the kernel as a array of work requests. So first walk through
+ * the list and find out how much memory we need to allocate.
+ */
+ for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next )
+ {
+ num_wr++;
+
+ /* Check for overflow */
+ if( !num_wr )
+ break;
+ if( num_ds > num_ds + p_wr->num_ds )
+ {
+ num_wr = 0;
+ break;
+ }
+
+ num_ds += p_wr->num_ds;
+ }
+ if( !num_wr )
+ {
+ AL_EXIT( AL_DBG_SRQ );
+ return IB_INVALID_PARAMETER;
+ }
+
+ ioctl_buf_sz = sizeof(ual_post_recv_ioctl_t);
+ ioctl_buf_sz += sizeof(ib_recv_wr_t) * (num_wr - 1);
+ ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds;
+
+ p_srq_ioctl = (ual_post_srq_recv_ioctl_t*)cl_zalloc( ioctl_buf_sz );
+ if( !p_srq_ioctl )
+ {
+ AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR,
+ ("Failed to allocate IOCTL buffer.\n") );
+ return IB_INSUFFICIENT_MEMORY;
+ }
+ p_ds = (ib_local_ds_t*)&p_srq_ioctl->in.recv_wr[num_wr];
+
+ /* Now populate the ioctl buffer and send down the ioctl */
+ p_srq_ioctl->in.h_srq = h_srq->obj.hdl;
+ p_srq_ioctl->in.num_wr = num_wr;
+ p_srq_ioctl->in.num_ds = num_ds;
+ num_wr = 0;
+ for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next )
+ {
+ p_srq_ioctl->in.recv_wr[num_wr++] = *p_wr;
+ cl_memcpy(
+ p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds );
+ p_ds += p_wr->num_ds;
+ }
+
+ cl_status = do_al_dev_ioctl( UAL_POST_SRQ_RECV,
+ &p_srq_ioctl->in, ioctl_buf_sz,
+ &p_srq_ioctl->out, sizeof(p_srq_ioctl->out),
+ &bytes_ret );
+
+ if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_srq_ioctl->out) )
+ {
+ if( pp_recv_failure )
+ *pp_recv_failure = p_recv_wr;
+
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+ ("UAL_POST_SRQ_RECV IOCTL returned %s.\n",
+ CL_STATUS_MSG(cl_status)) );
+ status = IB_ERROR;
+ }
+ else
+ {
+ status = p_srq_ioctl->out.status;
+
+ if( status != IB_SUCCESS && pp_recv_failure )
+ {
+ /* Get the failed index */
+ failed_index = num_wr - p_srq_ioctl->out.failed_cnt;
+ p_wr = p_recv_wr;
+ while( failed_index-- )
+ p_wr = p_wr->p_next;
+
+ *pp_recv_failure = p_wr;
+ }
+ }
+
+ cl_free( p_srq_ioctl );
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+
+ib_api_status_t
+ual_create_srq(
+ IN const ib_pd_handle_t h_pd,
+ IN OUT ib_srq_handle_t h_srq,
+ IN const ib_srq_attr_t* const p_srq_attr)
+{
+ /* The first argument is probably not needed */
+ ual_create_srq_ioctl_t srq_ioctl;
+ uintn_t bytes_ret;
+ cl_status_t cl_status;
+ ib_api_status_t status;
+ uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+ ib_srq_attr_t srq_attr;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ /* Clear the srq_ioctl */
+ cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+ /* Pre call to the UVP library */
+ if( h_pd->h_ci_pd && uvp_intf.pre_create_srq )
+ {
+ /* The post call MUST exist as it sets the UVP srq handle. */
+ CL_ASSERT( uvp_intf.post_create_srq );
+ /* Convert the handles to UVP handles */
+ srq_attr = *p_srq_attr;
+ status = uvp_intf.pre_create_srq( h_pd->h_ci_pd,
+ &srq_attr, &srq_ioctl.in.umv_buf );
+ if( status != IB_SUCCESS )
+ {
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+ }
+ }
+ /*
+ * Convert the handles to KAL handles once again starting
+ * from the input srq attribute
+ */
+ srq_ioctl.in.h_pd = h_pd->obj.hdl;
+ srq_ioctl.in.srq_attr = *p_srq_attr;
+ srq_ioctl.in.context = h_srq;
+ srq_ioctl.in.ev_notify = (h_srq->pfn_event_cb != NULL) ? TRUE : FALSE;
+
+ cl_status = do_al_dev_ioctl( UAL_CREATE_SRQ,
+ &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+ &bytes_ret );
+
+ if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+ {
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+ ("UAL_CREATE_SRQ IOCTL returned %s.\n",
+ CL_STATUS_MSG(cl_status)) );
+ status = IB_ERROR;
+ }
+ else
+ {
+ status = srq_ioctl.out.status;
+ }
+
+ /* Post uvp call */
+ if( h_pd->h_ci_pd && uvp_intf.post_create_srq )
+ {
+ uvp_intf.post_create_srq( h_pd->h_ci_pd,
+ status, &h_srq->h_ci_srq, &srq_ioctl.out.umv_buf );
+
+ if( uvp_intf.post_recv )
+ {
+ h_srq->h_recv_srq = h_srq->h_ci_srq;
+ h_srq->pfn_post_srq_recv = uvp_intf.post_srq_recv;
+ }
+ else
+ {
+ h_srq->h_recv_srq = h_srq;
+ h_srq->pfn_post_srq_recv = ual_post_srq_recv;
+ }
+ }
+ else
+ {
+ h_srq->h_recv_srq = h_srq;
+ h_srq->pfn_post_srq_recv = ual_post_srq_recv;
+ }
+
+ if( status == IB_SUCCESS )
+ {
+ h_srq->obj.hdl = srq_ioctl.out.h_srq;
+ }
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+ib_api_status_t
+ual_modify_srq(
+ IN ib_srq_handle_t h_srq,
+ IN const ib_srq_attr_t* const p_srq_attr,
+ IN const ib_srq_attr_mask_t srq_attr_mask)
+{
+ ual_modify_srq_ioctl_t srq_ioctl;
+ uintn_t bytes_ret;
+ cl_status_t cl_status;
+ ib_api_status_t status;
+ uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ /* Clear the srq_ioctl */
+ cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+ /* Call the uvp pre call if the vendor library provided a valid srq handle */
+ if( h_srq->h_ci_srq && uvp_intf.pre_modify_srq )
+ {
+ /* Pre call to the UVP library */
+ status = uvp_intf.pre_modify_srq( h_srq->h_ci_srq,
+ p_srq_attr, srq_attr_mask, &srq_ioctl.in.umv_buf );
+ if( status != IB_SUCCESS )
+ {
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+ }
+ }
+
+ srq_ioctl.in.h_srq = h_srq->obj.hdl;
+ srq_ioctl.in.srq_attr = *p_srq_attr;
+ srq_ioctl.in.srq_attr_mask = srq_attr_mask;
+
+ cl_status = do_al_dev_ioctl( UAL_MODIFY_SRQ,
+ &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+ &bytes_ret );
+
+ if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+ {
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+ ("UAL_MODIFY_SRQ IOCTL returned %s.\n",
+ CL_STATUS_MSG(cl_status)) );
+ status = IB_ERROR;
+ }
+ else
+ {
+ status = srq_ioctl.out.status;
+ }
+
+ /* Post uvp call */
+ if( h_srq->h_ci_srq && uvp_intf.post_modify_srq )
+ {
+ uvp_intf.post_modify_srq( h_srq->h_ci_srq, status,
+ &srq_ioctl.out.umv_buf );
+ }
+
+ //if( status == IB_SUCCESS )
+ //{
+ // *p_srq_attr = srq_ioctl.out.srq_attr;
+ //}
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+ib_api_status_t
+ual_query_srq(
+ IN ib_srq_handle_t h_srq,
+ OUT ib_srq_attr_t* p_srq_attr )
+{
+ ual_query_srq_ioctl_t srq_ioctl;
+ uintn_t bytes_ret;
+ cl_status_t cl_status;
+ ib_api_status_t status;
+ uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+ ib_srq_attr_t* p_attr;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ /* Clear the srq_ioctl */
+ cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+ /* Call the uvp pre call if the vendor library provided a valid ca handle */
+ if( h_srq->h_ci_srq && uvp_intf.pre_query_srq )
+ {
+ /* Pre call to the UVP library */
+ status = uvp_intf.pre_query_srq( h_srq->h_ci_srq, &srq_ioctl.in.umv_buf );
+ if( status != IB_SUCCESS )
+ {
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+ }
+ }
+
+ srq_ioctl.in.h_srq = h_srq->obj.hdl;
+
+ cl_status = do_al_dev_ioctl( UAL_QUERY_SRQ,
+ &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+ &bytes_ret );
+
+ if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+ {
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+ ("UAL_QUERY_SRQ IOCTL returned %s.\n",
+ CL_STATUS_MSG(cl_status)) );
+ status = IB_ERROR;
+ }
+ else
+ {
+ status = srq_ioctl.out.status;
+ }
+
+ p_attr = &srq_ioctl.out.srq_attr;
+
+ /* Post uvp call */
+ if( h_srq->h_ci_srq && uvp_intf.post_query_srq )
+ {
+ uvp_intf.post_query_srq( h_srq->h_ci_srq, status,
+ p_attr, &srq_ioctl.out.umv_buf );
+ }
+
+ if( IB_SUCCESS == status )
+ {
+ /* UVP handles in srq_attr will be converted to UAL's handles
+ * by the common code
+ */
+ *p_srq_attr = *p_attr;
+ }
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
+
+ib_api_status_t
+ual_destroy_srq(
+ IN ib_srq_handle_t h_srq )
+{
+ ual_destroy_srq_ioctl_t srq_ioctl;
+ uintn_t bytes_ret;
+ cl_status_t cl_status;
+ ib_api_status_t status;
+ uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+
+ AL_ENTER( AL_DBG_SRQ );
+
+ /* Call the uvp pre call if the vendor library provided a valid srq handle */
+ if( h_srq->h_ci_srq && uvp_intf.pre_destroy_srq )
+ {
+ status = uvp_intf.pre_destroy_srq( h_srq->h_ci_srq );
+ if (status != IB_SUCCESS)
+ {
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+ }
+ }
+
+ cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+ srq_ioctl.in.h_srq = h_srq->obj.hdl;
+ cl_status = do_al_dev_ioctl( UAL_DESTROY_SRQ,
+ &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+ &bytes_ret );
+
+ if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+ {
+ AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+ ("UAL_DESTROY_SRQ IOCTL returned %s.\n",
+ CL_STATUS_MSG(cl_status)) );
+ status = IB_ERROR;
+ }
+ else
+ {
+ status = srq_ioctl.out.status;
+ }
+
+ /* Call vendor's post_destroy_srq */
+ if( h_srq->h_ci_srq && uvp_intf.post_destroy_srq )
+ uvp_intf.post_destroy_srq( h_srq->h_ci_srq, status );
+
+ AL_EXIT( AL_DBG_SRQ );
+ return status;
+}
+
p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr;\r
p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr;\r
p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr;\r
+ p_ifc->create_srq = ib_create_srq;\r
+ p_ifc->modify_srq = ib_modify_srq;\r
+ p_ifc->query_srq = ib_query_srq;\r
+ p_ifc->destroy_srq = ib_destroy_srq;\r
+ p_ifc->post_srq_recv = ib_post_srq_recv;\r
\r
BUS_EXIT( BUS_DBG_PNP );\r
}\r
ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
ca_attr_p->max_fmr = hca_info_p->max_fmr;\r
ca_attr_p->max_map_per_fmr = hca_info_p->max_map_per_fmr;\r
- \r
+ ca_attr_p->max_srq = hca_info_p->max_srq;\r
+ ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
+ ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
+\r
ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
+ ca_attr_p->modify_srq_depth = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host\r
\r
ca_attr_p->num_page_sizes = 1;\r
}\r
}\r
\r
+void srq_event_handler(struct ib_event *ev, void *context)\r
+{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+ ib_event_rec_t event_rec;\r
+ struct mthca_srq *srq_p;\r
+\r
+ // prepare parameters\r
+ event_rec.type = ev->event;\r
+ event_rec.vendor_specific = ev->vendor_specific;\r
+ srq_p = (struct mthca_srq *)ev->element.srq;\r
+ event_rec.context = srq_p->srq_context;\r
+\r
+ // call the user callback\r
+ if (hob_p)\r
+ (hob_p->async_cb_p)(&event_rec);\r
+ else {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+ }\r
+}\r
+\r
+\r
void qp_event_handler(struct ib_event *ev, void *context)\r
{\r
mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
\r
void ca_event_handler(struct ib_event *ev, void *context);\r
\r
+void srq_event_handler(struct ib_event *ev, void *context);\r
+\r
void qp_event_handler(struct ib_event *ev, void *context);\r
\r
void cq_event_handler(struct ib_event *ev, void *context);\r
#define WPP_CONTROL_GUIDS \\r
WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE), \\r
WPP_DEFINE_BIT( HCA_DBG_DEV) \\r
- WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
WPP_DEFINE_BIT( HCA_DBG_PNP) \\r
+ WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
WPP_DEFINE_BIT( HCA_DBG_MAD) \\r
WPP_DEFINE_BIT( HCA_DBG_PO) \\r
WPP_DEFINE_BIT( HCA_DBG_CQ) \\r
WPP_DEFINE_BIT( HCA_DBG_QP) \\r
WPP_DEFINE_BIT( HCA_DBG_MEMORY) \\r
WPP_DEFINE_BIT( HCA_DBG_AV) \\r
+ WPP_DEFINE_BIT( HCA_DBG_SRQ) \\r
WPP_DEFINE_BIT( HCA_DBG_LOW) \\r
WPP_DEFINE_BIT( HCA_DBG_SHIM))\r
\r
// HCA_ENTER(FLAG);\r
// HCA_EXIT(FLAG);\r
// USEPREFIX(HCA_PRINT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
-// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
-// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]");\r
// USESUFFIX(HCA_ENTER, " [MTHCA] :%!FUNC!()[");\r
// USESUFFIX(HCA_EXIT, " [MTHCA] :%!FUNC!()]");\r
+// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
+// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]");\r
// end_wpp\r
\r
\r
\r
\r
#define HCA_DBG_DEV (1 << 0)\r
-#define HCA_DBG_INIT (1<<1)\r
-#define HCA_DBG_PNP (1 << 2)\r
+#define HCA_DBG_PNP (1<<1)\r
+#define HCA_DBG_INIT (1 << 2)\r
#define HCA_DBG_MAD (1 << 3)\r
#define HCA_DBG_PO (1 << 4)\r
#define HCA_DBG_QP (1 << 5)\r
#define HCA_DBG_CQ (1 << 6)\r
#define HCA_DBG_MEMORY (1 << 7)\r
#define HCA_DBG_AV (1<<8)\r
-#define HCA_DBG_LOW (1 << 9)\r
-#define HCA_DBG_SHIM (1 << 10)\r
+#define HCA_DBG_SRQ (1 << 9)\r
+#define HCA_DBG_LOW (1 << 10)\r
+#define HCA_DBG_SHIM (1 << 11)\r
\r
\r
#if DBG\r
/*\r
* Work Request Processing Verbs.\r
*/\r
+\r
+\r
ib_api_status_t\r
mlnx_post_send (\r
IN const ib_qp_handle_t h_qp,\r
\r
HCA_ENTER(HCA_DBG_QP);\r
\r
- // sanity checks\r
-\r
- // create CQ\r
err = ib_dev->post_send(ib_qp_p, p_send_wr, pp_failed );\r
if (err) {\r
HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
\r
HCA_ENTER(HCA_DBG_QP);\r
\r
- // sanity checks\r
- \r
- // create CQ\r
err = ib_dev->post_recv(ib_qp_p, p_recv_wr, pp_failed );\r
if (err) {\r
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
\r
}\r
\r
+ib_api_status_t \r
+mlnx_post_srq_recv (\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN ib_recv_wr_t *p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_failed OPTIONAL )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_srq *ib_srq_p = (struct ib_srq *)h_srq;\r
+ struct ib_device *ib_dev = ib_srq_p->device;\r
+ \r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ err = ib_dev->post_srq_recv(ib_srq_p, p_recv_wr, pp_failed );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
+ ("post_srq_recv failed (%d)\n", err));\r
+ if (err == -ENOMEM)\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ else\r
+ status = errno_to_iberr(err);\r
+ goto err_post_recv;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_post_recv: \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+ \r
+}\r
+\r
/*\r
* Completion Processing and Completion Notification Request Verbs.\r
*/\r
{\r
p_interface->post_send = mlnx_post_send;\r
p_interface->post_recv = mlnx_post_recv;\r
+ p_interface->post_srq_recv = mlnx_post_srq_recv;\r
\r
p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify;\r
p_interface->peek_cq = NULL; /* mlnx_peek_cq: Not implemented */\r
MAP_ERR( ENODEV, IB_UNSUPPORTED );\r
MAP_ERR( EINVAL, IB_INVALID_PARAMETER );\r
MAP_ERR( ENOSYS, IB_UNSUPPORTED );\r
+ MAP_ERR( ERANGE, IB_INVALID_SETTING );\r
default:\r
//HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
// "Unmapped errno (%d)\n", err);\r
status = IB_UNSUPPORTED;\r
goto err_user_unsupported;\r
}\r
+\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ }\r
+\r
if (!p_mcast_gid || !ph_mcast) {\r
status = IB_INVALID_PARAMETER;\r
goto err_invalid_param;\r
RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid);\r
HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM, ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
- *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
- *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+ cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[0]),\r
+ cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] )));\r
\r
// return the result\r
if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p;\r
kfree(mcast_p);\r
err_no_mem: \r
err_invalid_param:\r
+err_unsupported: \r
err_user_unsupported:\r
end: \r
HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
// sanity check\r
if (!mcast_p || !mcast_p->ib_qp_p)\r
{\r
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
("completes with ERROR status IB_INVALID_PARAMETER\n"));\r
- return IB_INVALID_PARAMETER;\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_param;\r
}\r
-\r
ib_dev = mcast_p->ib_qp_p->device;\r
\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ }\r
+\r
+\r
HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
*(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
*(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
\r
// detach\r
- err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
- (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
- if (err) {\r
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_detach_mcast failed (%d)\n", err));\r
- status = errno_to_iberr(err);\r
- goto err_detach_mcast;\r
- }\r
+ err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
+ (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_detach_mcast failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_detach_mcast;\r
+ }\r
\r
status = IB_SUCCESS;\r
\r
err_detach_mcast:\r
kfree(mcast_p);\r
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,\r
+err_unsupported: \r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,\r
("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+err_invalid_param:\r
return status;\r
}\r
\r
return status;\r
}\r
\r
+/*\r
+* Shared Queue Pair Management Verbs\r
+*/\r
+\r
+\r
+ib_api_status_t\r
+mlnx_create_srq (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const void *srq_context,\r
+ IN const ib_srq_attr_t * const p_srq_attr,\r
+ OUT ib_srq_handle_t *ph_srq,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_srq *ib_srq_p;\r
+ struct mthca_srq *srq_p;\r
+ struct ib_srq_init_attr srq_init_attr;\r
+ struct ib_ucontext *p_context = NULL;\r
+ struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+ struct ib_device *ib_dev = ib_pd_p->device;\r
+ mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev);\r
+\r
+ HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+ if( p_umv_buf && p_umv_buf->command) {\r
+\r
+ // sanity checks \r
+ if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||\r
+ p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_inval_params;\r
+ }\r
+ p_context = ib_pd_p->ucontext;\r
+ }\r
+\r
+ // prepare the parameters\r
+ RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
+ srq_init_attr.event_handler = srq_event_handler;\r
+ srq_init_attr.srq_context = hob_p;\r
+ srq_init_attr.attr = *p_srq_attr;\r
+\r
+ // allocate srq \r
+ ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf );\r
+ if (IS_ERR(ib_srq_p)) {\r
+ err = PTR_ERR(ib_srq_p);\r
+ HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_create_srq;\r
+ }\r
+\r
+ // fill the object\r
+ srq_p = (struct mthca_srq *)ib_srq_p;\r
+ srq_p->srq_context = (void*)srq_context;\r
+ \r
+ // return the result\r
+ if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p;\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_create_srq:\r
+err_inval_params:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_srq (\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+ struct ib_device *ib_dev = ib_srq->device;\r
+ UNUSED_PARAM(p_umv_buf);\r
+\r
+ HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+ err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask);\r
+ if (err) {\r
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+ ("ibv_modify_srq failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }\r
+\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_srq (\r
+ IN const ib_srq_handle_t h_srq,\r
+ OUT ib_srq_attr_t* const p_srq_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+ struct ib_device *ib_dev = ib_srq->device;\r
+ UNUSED_PARAM(p_umv_buf);\r
+\r
+ HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+ err = ibv_query_srq(ib_srq, p_srq_attr);\r
+ if (err) {\r
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+ ("ibv_query_srq failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }\r
+\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_srq (\r
+ IN const ib_srq_handle_t h_srq )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+ struct ib_device *ib_dev = ib_srq->device;\r
+\r
+ HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+ err = ibv_destroy_srq(ib_srq);\r
+ if (err) {\r
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+ ("ibv_destroy_srq failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }\r
+\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
+}\r
+\r
/*\r
* Queue Pair Management Verbs\r
*/\r
}\r
p_context = ib_pd_p->ucontext;\r
}\r
- else \r
- p_context = NULL;\r
\r
// prepare the parameters\r
RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
qp_init_attr.qp_context = hob_p;\r
qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+ qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
}\r
\r
/* sanity check */\r
- if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
+ if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
status = IB_INVALID_CQ_SIZE;\r
goto err_cqe;\r
}\r
p_interface->modify_av = mlnx_modify_av;\r
p_interface->destroy_av = mlnx_destroy_av;\r
\r
+ p_interface->create_srq = mlnx_create_srq;\r
+ p_interface->modify_srq = mlnx_modify_srq;\r
+ p_interface->query_srq = mlnx_query_srq;\r
+ p_interface->destroy_srq = mlnx_destroy_srq;\r
+\r
p_interface->create_qp = mlnx_create_qp;\r
p_interface->create_spl_qp = mlnx_create_spl_qp;\r
p_interface->modify_qp = mlnx_modify_qp;\r
IB_EVENT_DEVICE_FATAL = IB_AE_LOCAL_FATAL,
IB_EVENT_PORT_ACTIVE = IB_AE_PORT_ACTIVE,
IB_EVENT_PORT_ERR = IB_AE_PORT_DOWN,
+ IB_EVENT_SRQ_LIMIT_REACHED = IB_AE_SRQ_LIMIT_REACHED,
+ IB_EVENT_SRQ_CATAS_ERROR = IB_AE_SRQ_CATAS_ERROR,
+ IB_EVENT_SRQ_QP_LAST_WQE_REACHED = IB_AE_SRQ_QP_LAST_WQE_REACHED,
IB_EVENT_LID_CHANGE = IB_AE_UNKNOWN + 1,
IB_EVENT_PKEY_CHANGE,
- IB_EVENT_SM_CHANGE,
- IB_EVENT_SRQ_ERR,
- IB_EVENT_SRQ_LIMIT_REACHED,
- IB_EVENT_QP_LAST_WQE_REACHED
+ IB_EVENT_SM_CHANGE
};
struct ib_event {
IB_CQ_NEXT_COMP
};
-enum ib_srq_attr_mask {
- IB_SRQ_MAX_WR = 1 << 0,
- IB_SRQ_LIMIT = 1 << 1,
-};
-
-struct ib_srq_attr {
- u32 max_wr;
- u32 max_sge;
- u32 srq_limit;
-};
-
struct ib_srq_init_attr {
- void (*event_handler)(struct ib_event *, void *);
- void *srq_context;
- struct ib_srq_attr attr;
+ void (*event_handler)(struct ib_event *, void *);
+ void *srq_context;
+ ib_srq_attr_t attr;
};
struct ib_qp_cap {
};
#pragma warning( default : 4200 )
-struct ib_udata {
- void *inbuf;
- void *outbuf;
- size_t inlen;
- size_t outlen;
-};
-
#define IB_UMEM_MAX_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
((char *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
(char *) &((struct ib_umem_chunk *) 0)->page_list[0]))
-struct ib_umem_object {
- struct ib_uobject uobject;
- struct ib_umem umem;
-};
-
struct ib_pd {
struct list_head list; /* for chaining AV MRs (for user mode only) */
struct ib_device *device;
struct ib_srq {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
+ struct ib_ucontext *ucontext;
+ struct ib_mr *ib_mr;
void (*event_handler)(struct ib_event *, void *);
void *srq_context;
- atomic_t usecnt;
+ atomic_t usecnt; /* count number of work queues */
};
struct ib_qp {
struct ib_srq_init_attr *srq_init_attr,
ci_umv_buf_t* const p_umv_buf);
int (*modify_srq)(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask);
+ ib_srq_attr_t *srq_attr,
+ ib_srq_attr_mask_t srq_attr_mask);
int (*query_srq)(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr);
+ ib_srq_attr_t *srq_attr);
int (*destroy_srq)(struct ib_srq *srq);
int (*post_srq_recv)(struct ib_srq *srq,
struct _ib_recv_wr *recv_wr,
* @srq_init_attr: A list of initial attributes required to create the
* SRQ. If SRQ creation succeeds, then the attributes are updated to
* the actual capabilities of the created SRQ.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
*
* srq_attr->max_wr and srq_attr->max_sge are read the determine the
* requested size of the SRQ, and set to the actual values allocated
* will always be at least as large as the requested values.
*/
struct ib_srq *ibv_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr);
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
/**
* ibv_modify_srq - Modifies the attributes for the specified SRQ.
* the number of receives queued drops below the limit.
*/
int ibv_modify_srq(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask);
+ ib_srq_attr_t *srq_attr,
+ ib_srq_attr_mask_t srq_attr_mask);
/**
* ibv_query_srq - Returns the attribute list and current values for the
* @srq_attr: The attributes of the specified SRQ.
*/
int ibv_query_srq(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr);
+ ib_srq_attr_t *srq_attr);
/**
* ibv_destroy_srq - Destroys the specified SRQ.
* the work request that failed to be posted on the QP.
*/
static inline int ibv_post_srq_recv(struct ib_srq *srq,
- struct _ib_recv_wr *recv_wr,
- struct _ib_recv_wr **bad_recv_wr)
+ struct _ib_recv_wr *recv_wr,
+ struct _ib_recv_wr **bad_recv_wr)
{
return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
}
* @context: user process context (for application calls only)
* @p_umv_buf: parameters structure (for application calls only)
*/
- struct ib_qp *ibv_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+struct ib_qp *ibv_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
/**
* ibv_modify_qp - Modifies the attributes for the specified QP and then
-#include <mt_l2w.h>\r
-#include <hca_data.h>\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "mt_l2w.tmh"\r
-#endif\r
-\r
-pci_pool_t *\r
-pci_pool_create (const char *name, struct mthca_dev *mdev,\r
- size_t size, size_t align, size_t allocation)\r
-{\r
- pci_pool_t *pool;\r
- UNREFERENCED_PARAMETER(align);\r
- UNREFERENCED_PARAMETER(allocation);\r
-\r
- MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);\r
- \r
- // allocation parameter is not handled yet\r
- ASSERT(allocation == 0);\r
-\r
- // allocate object\r
- pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );\r
- if (pool == NULL) \r
- return NULL;\r
-\r
- //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,\r
- // while default alloc function - ExAllocatePoolWithTag -doesn't.\r
- // But for now it is used for elements of size <= PAGE_SIZE\r
- // Anyway - a sanity check:\r
- ASSERT(size <= PAGE_SIZE);\r
- if (size > PAGE_SIZE)\r
- return NULL;\r
-\r
- //TODO: not too effective: one can read its own alloc/free functions\r
- ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );\r
- \r
- // fill the object\r
- pool->mdev = mdev;\r
- pool->size = size;\r
- strncpy( pool->name, name, sizeof pool->name );\r
-\r
- return pool; \r
-}\r
-\r
-// from lib/string.c\r
-/**\r
-* strlcpy - Copy a %NUL terminated string into a sized buffer\r
-* @dest: Where to copy the string to\r
-* @src: Where to copy the string from\r
-* @size: size of destination buffer\r
-*\r
-* Compatible with *BSD: the result is always a valid\r
-* NUL-terminated string that fits in the buffer (unless,\r
-* of course, the buffer size is zero). It does not pad\r
-* out the result like strncpy() does.\r
-*/\r
-SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)\r
-{\r
- SIZE_T ret = strlen(src);\r
-\r
- if (size) {\r
- SIZE_T len = (ret >= size) ? size-1 : ret;\r
- memcpy(dest, src, len);\r
- dest[len] = '\0';\r
- }\r
- return ret;\r
-}\r
-\r
-\r
-int __bitmap_full(const unsigned long *bitmap, int bits)\r
-{\r
- int k, lim = bits/BITS_PER_LONG;\r
- for (k = 0; k < lim; ++k)\r
- if (~bitmap[k])\r
- return 0;\r
-\r
- if (bits % BITS_PER_LONG)\r
- if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
- return 0;\r
-\r
- return 1;\r
-}\r
-\r
-int __bitmap_empty(const unsigned long *bitmap, int bits)\r
-{\r
- int k, lim = bits/BITS_PER_LONG;\r
- for (k = 0; k < lim; ++k)\r
- if (bitmap[k])\r
- return 0;\r
-\r
- if (bits % BITS_PER_LONG)\r
- if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
- return 0;\r
-\r
- return 1;\r
-}\r
-\r
-int request_irq(\r
- IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */\r
- IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */ \r
- IN PKSERVICE_ROUTINE isr, /* ISR */\r
- IN void *isr_ctx, /* ISR context */\r
- OUT PKINTERRUPT *int_obj /* interrupt object */\r
- )\r
-{\r
- NTSTATUS status;\r
-\r
- status = IoConnectInterrupt(\r
- int_obj, /* InterruptObject */\r
- isr, /* ISR */ \r
- isr_ctx, /* ISR context */\r
- isr_lock, /* spinlock */\r
- int_info->u.Interrupt.Vector, /* interrupt vector */\r
- (KIRQL)int_info->u.Interrupt.Level, /* IRQL */\r
- (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */\r
- (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? \r
- Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */\r
- (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared), /* vector shared or not */\r
- g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */\r
- FALSE /* whether to save Float registers */\r
- );\r
-\r
- if (!NT_SUCCESS(status)) {\r
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt failed status %d (did you change the processor_affinity ? )\n",status));\r
- return -EFAULT; /* failed to connect interrupt */\r
- } \r
- else\r
- return 0;\r
-}\r
-\r
+#include <mt_l2w.h>
+#include <hca_data.h>
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_l2w.tmh"
+#endif
+
+pci_pool_t *
+pci_pool_create (const char *name, struct mthca_dev *mdev,
+ size_t size, size_t align, size_t allocation)
+{
+ pci_pool_t *pool;
+ UNREFERENCED_PARAMETER(align);
+ UNREFERENCED_PARAMETER(allocation);
+
+ MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ // allocation parameter is not handled yet
+ ASSERT(allocation == 0);
+
+ // allocate object
+ pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
+ if (pool == NULL)
+ return NULL;
+
+ //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
+ // while default alloc function - ExAllocatePoolWithTag -doesn't.
+ // But for now it is used for elements of size <= PAGE_SIZE
+ // Anyway - a sanity check:
+ ASSERT(size <= PAGE_SIZE);
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ //TODO: not too effective: one can read its own alloc/free functions
+ ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
+
+ // fill the object
+ pool->mdev = mdev;
+ pool->size = size;
+ strncpy( pool->name, name, sizeof pool->name );
+
+ return pool;
+}
+
+// from lib/string.c
+/**
+* strlcpy - Copy a %NUL terminated string into a sized buffer
+* @dest: Where to copy the string to
+* @src: Where to copy the string from
+* @size: size of destination buffer
+*
+* Compatible with *BSD: the result is always a valid
+* NUL-terminated string that fits in the buffer (unless,
+* of course, the buffer size is zero). It does not pad
+* out the result like strncpy() does.
+*/
+SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)
+{
+ SIZE_T ret = strlen(src);
+
+ if (size) {
+ SIZE_T len = (ret >= size) ? size-1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (~bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+
+int request_irq(
+ IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */
+ IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */
+ IN PKSERVICE_ROUTINE isr, /* ISR */
+ IN void *isr_ctx, /* ISR context */
+ OUT PKINTERRUPT *int_obj /* interrupt object */
+ )
+{
+ NTSTATUS status;
+
+ status = IoConnectInterrupt(
+ int_obj, /* InterruptObject */
+ isr, /* ISR */
+ isr_ctx, /* ISR context */
+ isr_lock, /* spinlock */
+ int_info->u.Interrupt.Vector, /* interrupt vector */
+ (KIRQL)int_info->u.Interrupt.Level, /* IRQL */
+ (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */
+ (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ?
+ Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */
+ (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared), /* vector shared or not */
+ g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */
+ FALSE /* whether to save Float registers */
+ );
+
+ if (!NT_SUCCESS(status)) {
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt failed status %d (did you change the processor_affinity ? )\n",status));
+ return -EFAULT; /* failed to connect interrupt */
+ }
+ else
+ return 0;
+}
+
#define CPU_2_BE64_PREP
#define CPU_2_BE64(x) cl_hton64(x)
#else
-#define CPU_2_BE64_PREP unsigned __int64 __tmp__;
+#define CPU_2_BE64_PREP unsigned __int64 __tmp__
#define CPU_2_BE64(x) ( __tmp__ = x, cl_hton64(__tmp__) )
#endif
struct ib_mr *ib_mr = NULL;
u64 start = 0;
u64 user_handle = 0;
+ struct ibv_create_ah_resp *create_ah_resp = 0;
// for user call we need also allocate MR
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
ah = pd->device->create_ah(pd, ah_attr);
+ /* fill obligatory fields */
+ if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+ create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
+ create_ah_resp->user_handle = user_handle;
+ }
+
if (IS_ERR(ah)) {
err = PTR_ERR(ah);
HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err));
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
create_ah_resp->start = start;
- create_ah_resp->user_handle = user_handle;
create_ah_resp->mr.lkey = ib_mr->lkey;
create_ah_resp->mr.rkey = ib_mr->rkey;
create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
/* Shared receive queues */
struct ib_srq *ibv_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr)
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
{
- struct ib_srq *srq;
+ int err;
+ struct ib_srq *ib_srq;
+ struct ib_mr *ib_mr = NULL;
+ u64 user_handle = 0;
+ struct ibv_create_srq_resp *create_srq_resp = 0;
- if (!pd->device->create_srq)
- return ERR_PTR(-ENOSYS);
+ // for user call we need also allocate MR
+ if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+ struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(void*)p_umv_buf->p_inout_buf;
+
+ // create region
+ ib_mr = ibv_reg_mr(
+ (struct ib_pd *)(ULONG_PTR)create_srp->mr.pd_handle,
+ create_srp->mr.access_flags,
+ (void*)(ULONG_PTR)create_srp->mr.start,
+ create_srp->mr.length, create_srp->mr.hca_va, TRUE );
+ if (IS_ERR(ib_mr)) {
+ err = PTR_ERR(ib_mr);
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err));
+ goto err_alloc_mr;
+ }
+ create_srp->lkey = ib_mr->lkey;
+ user_handle = create_srp->user_handle;
+ }
- srq = pd->device->create_srq(pd, srq_init_attr, NULL);
+ ib_srq = pd->device->create_srq(pd, srq_init_attr, p_umv_buf);
- if (!IS_ERR(srq)) {
- srq->device = pd->device;
- srq->pd = pd;
- srq->uobject = NULL;
- srq->event_handler = srq_init_attr->event_handler;
- srq->srq_context = srq_init_attr->srq_context;
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
+ /* fill obligatory fields */
+ if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+ create_srq_resp = (struct ibv_create_srq_resp *)(void*)p_umv_buf->p_inout_buf;
+ create_srq_resp->user_handle = user_handle;
+ }
+
+ if (IS_ERR(ib_srq)) {
+ err = PTR_ERR(ib_srq);
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err));
+ goto err_create_srq;
+ }
+
+ // fill results
+ ib_srq->device = pd->device;
+ ib_srq->pd = pd;
+ ib_srq->ucontext = context;
+ ib_srq->event_handler = srq_init_attr->event_handler;
+ ib_srq->srq_context = srq_init_attr->srq_context;
+ atomic_inc(&pd->usecnt);
+ atomic_set(&ib_srq->usecnt, 0);
+ if (context)
+ atomic_inc(&context->usecnt);
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ ,
+ ("uctx %p, qhndl %p, qnum %#x \n",
+ pd->ucontext, ib_srq, ((struct mthca_srq*)ib_srq)->srqn ) );
+
+ // fill results for user
+ if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+ struct mthca_srq *srq = (struct mthca_srq *)ib_srq;
+ ib_srq->ib_mr = ib_mr;
+ create_srq_resp->mr.lkey = ib_mr->lkey;
+ create_srq_resp->mr.rkey = ib_mr->rkey;
+ create_srq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
+ create_srq_resp->srq_handle = (__u64)(ULONG_PTR)srq;
+ create_srq_resp->max_wr = (mthca_is_memfree(to_mdev(pd->device))) ? srq->max - 1 : srq->max;
+ create_srq_resp->max_sge = srq->max_gs;
+ create_srq_resp->srqn= srq->srqn;
+ p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
- return srq;
+ return ib_srq;
+
+err_create_srq:
+ if (ib_mr)
+ ibv_dereg_mr(ib_mr);
+err_alloc_mr:
+ if( p_umv_buf && p_umv_buf->command )
+ p_umv_buf->status = IB_ERROR;
+ HCA_EXIT(HCA_DBG_QP);
+ return ERR_PTR(err);
}
int ibv_modify_srq(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask)
+ ib_srq_attr_t *srq_attr,
+ ib_srq_attr_mask_t srq_attr_mask)
{
return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
}
int ibv_query_srq(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr)
+ ib_srq_attr_t *srq_attr)
{
- return srq->device->query_srq ?
- srq->device->query_srq(srq, srq_attr) : -ENOSYS;
+ return srq->device->query_srq(srq, srq_attr);
}
int ibv_destroy_srq(struct ib_srq *srq)
{
- struct ib_pd *pd;
int ret;
-
- if (atomic_read(&srq->usecnt))
- return -EBUSY;
-
- pd = srq->pd;
+ struct ib_pd *pd = srq->pd;
+ struct ib_ucontext *ucontext = pd->ucontext;
+ struct ib_mr * ib_mr = srq->ib_mr;
ret = srq->device->destroy_srq(srq);
if (!ret) {
atomic_dec(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
- ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+ release_user_cq_qp_resources(ucontext, ib_mr);
}
return ret;
CMD_TIME_CLASS_A, status);
}
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status)
+{
+ return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
+ CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+}
+
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
{
return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
int srq_num, u8 *status);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status);
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status);
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
spin_unlock_irq(&lh);
/* wait for all RUNNING DPCs on that EQ to complete */
- {
- ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
- // wait for DPCs, using this EQ, to complete
- spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_COMP].lock);
- //TODO: do we need that ?
- spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_ASYNC].lock );
- }
+ ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+ KeFlushQueuedDpcs();
atomic_dec(&cq->refcount);
wait_event(&cq->wait, !atomic_read(&cq->refcount));
int max_qp_init_rdma;
int reserved_qps;
int num_srqs;
- int reserved_srqs;
int max_srq_wqes;
+ int max_srq_sge;
+ int reserved_srqs;
int num_eecs;
int reserved_eecs;
int num_cqs;
struct mthca_srq *srq);
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq);
+ ib_srq_attr_t *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
-int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask);
+int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr,
+ ib_srq_attr_mask_t attr_mask);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
- enum ib_event_type event_type);
+ enum ib_event_type event_type, u8 vendor_code);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
void ib_uverbs_cleanup(void);
int mthca_ah_grh_present(struct mthca_ah *ah);
+int mthca_max_srq_sge(struct mthca_dev *dev);
+
#endif /* MTHCA_DEV_H */
} qp;\r
struct { \r
__be32 srqn; \r
+ u32 reserved1;\r
+ u32 reserved2;\r
+ u8 reserved3[1];\r
+ u8 vendor_code;\r
+ u8 reserved4[2];\r
} srq;\r
struct {\r
__be32 cqn;\r
\r
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:\r
mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
- IB_EVENT_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code);\r
+ IB_EVENT_SRQ_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code);\r
+ break;\r
+\r
+ case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:\r
+ mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,\r
+ IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_SRQ_LIMIT:\r
mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,\r
- IB_EVENT_SRQ_LIMIT_REACHED);\r
+ IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code);\r
break;\r
\r
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:\r
break;\r
\r
case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:\r
- case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:\r
case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:\r
case MTHCA_EVENT_TYPE_ECC_DETECT:\r
default:\r
}\r
loops++;\r
if (cl_get_time_stamp() - start > g_max_DPC_time_us ) {\r
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handeling of EQ stopped, and a new DPC is entered after %d loops\n", loops));\r
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handling of EQ stopped, and a new DPC is entered after %d loops\n", loops));\r
KeInsertQueueDpc(&dev->eq_table.eq[eq->eq_num].dpc, NULL, NULL);\r
break;\r
} \r
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
+ mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
#include "mthca_cmd.h"
#include "mthca_memfree.h"
- static void init_query_mad(struct ib_smp *mad)
- {
+static void init_query_mad(struct ib_smp *mad)
+{
mad->base_version = 1;
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
mad->class_version = 1;
mad->method = IB_MGMT_METHOD_GET;
- }
+}
- int mthca_query_device(struct ib_device *ibdev,
+int mthca_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct ib_smp *in_mad = NULL;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
props->max_srq_wr = mdev->limits.max_srq_wqes;
- props->max_srq_sge = mdev->limits.max_sg;
+ if (mthca_is_memfree(mdev))
+ --props->max_srq_wr;
+ props->max_srq_sge = mdev->limits.max_srq_sge;
props->local_ca_ack_delay = (u8)mdev->limits.local_ca_ack_delay;
props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
IB_ATOMIC_LOCAL : IB_ATOMIC_NONE;
return err;
}
-int mthca_query_pkey_chunk(struct ib_device *ibdev,
+static int mthca_query_pkey_chunk(struct ib_device *ibdev,
u8 port, u16 index, u16 pkey[32])
{
struct ib_smp *in_mad = NULL;
return err;
}
-int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,
+static int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,
int index, union ib_gid gid[8])
{
struct ib_smp *in_mad = NULL;
return 0;
}
-struct ib_ah *mthca_ah_create(struct ib_pd *pd,
+static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
int err;
return &ah->ibah;
}
-int mthca_ah_destroy(struct ib_ah *ah)
+static int mthca_ah_destroy(struct ib_ah *ah)
{
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
kfree(ah);
return 0;
}
-struct ib_srq *mthca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- ci_umv_buf_t* const p_umv_buf)
+static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ ci_umv_buf_t* const p_umv_buf)
{
-#ifdef WIN_TO_BE_CHANGED
- struct mthca_create_srq ucmd;
+ struct ibv_create_srq ucmd = { 0 };
struct mthca_ucontext *context = NULL;
struct mthca_srq *srq;
int err;
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
+ srq = kzalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
}
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
- &init_attr->attr, srq);
+ &init_attr->attr, srq);
if (err && pd->ucontext)
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
- context->db_tab, ucmd.db_index);
+ context->db_tab, ucmd.db_index);
if (err)
goto err_free;
kfree(srq);
return ERR_PTR(err);
-#else
- UNREFERENCED_PARAMETER(p_umv_buf);
- UNREFERENCED_PARAMETER(init_attr);
- UNREFERENCED_PARAMETER(pd);
- return NULL;
-#endif
}
-int mthca_destroy_srq(struct ib_srq *srq)
+static int mthca_destroy_srq(struct ib_srq *srq)
{
struct mthca_ucontext *context;
- if (srq->uobject) {
- context = to_mucontext(srq->uobject->context);
+ if (srq->ucontext) {
+ context = to_mucontext(srq->ucontext);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
- context->db_tab, to_msrq(srq)->db_index);
+ context->db_tab, to_msrq(srq)->db_index);
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
return 0;
}
-struct ib_qp *mthca_create_qp(struct ib_pd *pd,
+static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
ci_umv_buf_t* const p_umv_buf)
{
return ERR_PTR(err);
}
-int mthca_destroy_qp(struct ib_qp *qp)
+static int mthca_destroy_qp(struct ib_qp *qp)
{
if (qp->ucontext) {
mthca_unmap_user_db(to_mdev(qp->device),
return 0;
}
-struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
struct ib_ucontext *context,
ci_umv_buf_t* const p_umv_buf)
{
return ERR_PTR(err);
}
-int mthca_destroy_cq(struct ib_cq *cq)
+static int mthca_destroy_cq(struct ib_cq *cq)
{
if (cq->ucontext) {
mthca_unmap_user_db(to_mdev(cq->device),
return &mr->ibmr;
}
-struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
+static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf,
mthca_qp_access_t acc,
return &mr->ibmr;
}
-struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd,
+static struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd,
void* __ptr64 vaddr, uint64_t length, uint64_t hca_va,
mthca_qp_access_t acc, boolean_t um_call)
{
return 0;
}
-struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
+static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
struct ib_fmr_attr *fmr_attr)
{
struct mthca_fmr *fmr;
return &fmr->ibmr;
}
-int mthca_dealloc_fmr(struct ib_fmr *fmr)
+static int mthca_dealloc_fmr(struct ib_fmr *fmr)
{
struct mthca_fmr *mfmr = to_mfmr(fmr);
int err;
return 0;
}
-int mthca_unmap_fmr(struct list_head *fmr_list)
+static int mthca_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;
int err;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
dev->ib_dev.modify_srq = mthca_modify_srq;
+ dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
if (mthca_is_memfree(dev))
-/*\r
- * Copyright (c) 2004 Topspin Communications. All rights reserved.\r
- * Copyright (c) 2005 Cisco Systems. All rights reserved.\r
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
- *\r
- * This software is available to you under a choice of one of two\r
- * licenses. You may choose to be licensed under the terms of the GNU\r
- * General Public License (GPL) Version 2, available from the file\r
- * COPYING in the main directory of this source tree, or the\r
- * OpenIB.org BSD license below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-#ifndef MTHCA_PROVIDER_H\r
-#define MTHCA_PROVIDER_H\r
-\r
-#include <ib_verbs.h>\r
-#include <ib_pack.h>\r
-#include <iba/ib_ci.h>\r
-\r
-typedef uint32_t mthca_mpt_access_t;\r
-#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)\r
-#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)\r
-#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)\r
-#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)\r
-#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)\r
-\r
-union mthca_buf {\r
- struct scatterlist direct;\r
- struct scatterlist *page_list;\r
-};\r
-\r
-struct mthca_uar {\r
- PFN_NUMBER pfn;\r
- int index;\r
-};\r
-\r
-struct mthca_user_db_table;\r
-\r
-struct mthca_ucontext {\r
- struct ib_ucontext ibucontext;\r
- struct mthca_uar uar;\r
- struct mthca_user_db_table *db_tab;\r
- // for user UAR \r
- PMDL mdl;\r
- PVOID kva;\r
- SIZE_T uar_size; \r
-};\r
-\r
-struct mthca_mtt;\r
-\r
-struct mthca_mr {\r
- //NB: the start of this structure is to be equal to mlnx_mro_t !\r
- //NB: the structure was not inserted here for not to mix driver and provider structures\r
- struct ib_mr ibmr;\r
- struct mthca_mtt *mtt;\r
- int iobuf_used;\r
- mt_iobuf_t iobuf;\r
- void *secure_handle;\r
-};\r
-\r
-struct mthca_fmr {\r
- struct ib_fmr ibmr;\r
- struct ib_fmr_attr attr;\r
- struct mthca_mtt *mtt;\r
- int maps;\r
- union {\r
- struct {\r
- struct mthca_mpt_entry __iomem *mpt;\r
- u64 __iomem *mtts;\r
- } tavor;\r
- struct {\r
- struct mthca_mpt_entry *mpt;\r
- __be64 *mtts;\r
- } arbel;\r
- } mem;\r
-};\r
-\r
-struct mthca_pd {\r
- struct ib_pd ibpd;\r
- u32 pd_num;\r
- atomic_t sqp_count;\r
- struct mthca_mr ntmr;\r
- int privileged;\r
-};\r
-\r
-struct mthca_eq {\r
- struct mthca_dev *dev;\r
- int eqn;\r
- int eq_num;\r
- u32 eqn_mask;\r
- u32 cons_index;\r
- u16 msi_x_vector;\r
- u16 msi_x_entry;\r
- int have_irq;\r
- int nent;\r
- struct scatterlist *page_list;\r
- struct mthca_mr mr;\r
- KDPC dpc; /* DPC for MSI-X interrupts */\r
- spinlock_t lock; /* spinlock for simult DPCs */\r
-};\r
-\r
-struct mthca_av;\r
-\r
-enum mthca_ah_type {\r
- MTHCA_AH_ON_HCA,\r
- MTHCA_AH_PCI_POOL,\r
- MTHCA_AH_KMALLOC\r
-};\r
-\r
-struct mthca_ah {\r
- struct ib_ah ibah;\r
- enum mthca_ah_type type;\r
- u32 key;\r
- struct mthca_av *av;\r
- dma_addr_t avdma;\r
-};\r
-\r
-/*\r
- * Quick description of our CQ/QP locking scheme:\r
- *\r
- * We have one global lock that protects dev->cq/qp_table. Each\r
- * struct mthca_cq/qp also has its own lock. An individual qp lock\r
- * may be taken inside of an individual cq lock. Both cqs attached to\r
- * a qp may be locked, with the send cq locked first. No other\r
- * nesting should be done.\r
- *\r
- * Each struct mthca_cq/qp also has an atomic_t ref count. The\r
- * pointer from the cq/qp_table to the struct counts as one reference.\r
- * This reference also is good for access through the consumer API, so\r
- * modifying the CQ/QP etc doesn't need to take another reference.\r
- * Access because of a completion being polled does need a reference.\r
- *\r
- * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the\r
- * destroy function to sleep on.\r
- *\r
- * This means that access from the consumer API requires nothing but\r
- * taking the struct's lock.\r
- *\r
- * Access because of a completion event should go as follows:\r
- * - lock cq/qp_table and look up struct\r
- * - increment ref count in struct\r
- * - drop cq/qp_table lock\r
- * - lock struct, do your thing, and unlock struct\r
- * - decrement ref count; if zero, wake up waiters\r
- *\r
- * To destroy a CQ/QP, we can do the following:\r
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock\r
- * - decrement ref count\r
- * - wait_event until ref count is zero\r
- *\r
- * It is the consumer's responsibilty to make sure that no QP\r
- * operations (WQE posting or state modification) are pending when the\r
- * QP is destroyed. Also, the consumer must make sure that calls to\r
- * qp_modify are serialized.\r
- *\r
- * Possible optimizations (wait for profile data to see if/where we\r
- * have locks bouncing between CPUs):\r
- * - split cq/qp table lock into n separate (cache-aligned) locks,\r
- * indexed (say) by the page in the table\r
- * - split QP struct lock into three (one for common info, one for the\r
- * send queue and one for the receive queue)\r
- */\r
-//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP\r
-// operations (WQE posting or state modification) are pending when the QP is destroyed"\r
-\r
-struct mthca_cq {\r
- struct ib_cq ibcq;\r
- void *cq_context; // leo: for IBAL shim\r
- spinlock_t lock;\r
- atomic_t refcount;\r
- int cqn;\r
- u32 cons_index;\r
- int is_direct;\r
- int is_kernel;\r
-\r
- /* Next fields are Arbel only */\r
- int set_ci_db_index;\r
- __be32 *set_ci_db;\r
- int arm_db_index;\r
- __be32 *arm_db;\r
- int arm_sn;\r
- int u_arm_db_index;\r
- int *p_u_arm_sn;\r
-\r
- union mthca_buf queue;\r
- struct mthca_mr mr;\r
- wait_queue_head_t wait;\r
- KMUTEX mutex;\r
-};\r
-\r
-struct mthca_srq {\r
- struct ib_srq ibsrq;\r
- spinlock_t lock;\r
- atomic_t refcount;\r
- int srqn;\r
- int max;\r
- int max_gs;\r
- int wqe_shift;\r
- int first_free;\r
- int last_free;\r
- u16 counter; /* Arbel only */\r
- int db_index; /* Arbel only */\r
- __be32 *db; /* Arbel only */\r
- void *last;\r
-\r
- int is_direct;\r
- u64 *wrid;\r
- union mthca_buf queue;\r
- struct mthca_mr mr;\r
-\r
- wait_queue_head_t wait;\r
- KMUTEX mutex;\r
-};\r
-\r
-struct mthca_wq {\r
- spinlock_t lock;\r
- int max;\r
- unsigned next_ind;\r
- unsigned last_comp;\r
- unsigned head;\r
- unsigned tail;\r
- void *last;\r
- int max_gs;\r
- int wqe_shift;\r
-\r
- int db_index; /* Arbel only */\r
- __be32 *db;\r
-};\r
-\r
-struct mthca_qp {\r
- struct ib_qp ibqp;\r
- void *qp_context; // leo: for IBAL shim\r
- //TODO: added just because absense of ibv_query_qp\r
- // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;\r
- struct ib_qp_init_attr qp_init_attr; // leo: for query_qp\r
- atomic_t refcount;\r
- u32 qpn;\r
- int is_direct;\r
- u8 transport;\r
- u8 state;\r
- u8 atomic_rd_en;\r
- u8 resp_depth;\r
-\r
- struct mthca_mr mr;\r
-\r
- struct mthca_wq rq;\r
- struct mthca_wq sq;\r
- enum ib_sig_type sq_policy;\r
- int send_wqe_offset;\r
- int max_inline_data;\r
-\r
- u64 *wrid;\r
- union mthca_buf queue;\r
-\r
- wait_queue_head_t wait;\r
- KMUTEX mutex;\r
-};\r
-\r
-struct mthca_sqp {\r
- struct mthca_qp qp;\r
- int port;\r
- int pkey_index;\r
- u32 qkey;\r
- u32 send_psn;\r
- struct ib_ud_header ud_header;\r
- struct scatterlist sg;\r
-};\r
-\r
-static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)\r
-{\r
- return container_of(ibucontext, struct mthca_ucontext, ibucontext);\r
-}\r
-\r
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)\r
-{\r
- return container_of(ibmr, struct mthca_fmr, ibmr);\r
-}\r
-\r
-static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)\r
-{\r
- return container_of(ibmr, struct mthca_mr, ibmr);\r
-}\r
-\r
-static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)\r
-{\r
- return container_of(ibpd, struct mthca_pd, ibpd);\r
-}\r
-\r
-static inline struct mthca_ah *to_mah(struct ib_ah *ibah)\r
-{\r
- return container_of(ibah, struct mthca_ah, ibah);\r
-}\r
-\r
-static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)\r
-{\r
- return container_of(ibcq, struct mthca_cq, ibcq);\r
-}\r
-\r
-static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)\r
-{\r
- return container_of(ibsrq, struct mthca_srq, ibsrq);\r
-}\r
-\r
-static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)\r
-{\r
- return container_of(ibqp, struct mthca_qp, ibqp);\r
-}\r
-\r
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)\r
-{\r
- return container_of(qp, struct mthca_sqp, qp);\r
-}\r
-\r
-static inline uint8_t start_port(struct ib_device *device)\r
-{\r
- return device->node_type == IB_NODE_SWITCH ? 0 : 1;\r
-}\r
-\r
-static inline uint8_t end_port(struct ib_device *device)\r
-{\r
- return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;\r
-}\r
-\r
-static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)\r
-{\r
- RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len);\r
- return 0;\r
-}\r
-\r
-static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)\r
-{\r
- if (p_umv_buf->output_size < len) {\r
- p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
- p_umv_buf->output_size = 0;\r
- return -EFAULT;\r
- }\r
- RtlCopyMemory(p_umv_buf->p_inout_buf, src, len);\r
- p_umv_buf->status = IB_SUCCESS;\r
- p_umv_buf->output_size = (uint32_t)len;\r
- return 0;\r
-}\r
-\r
-\r
-\r
-// API\r
-int mthca_query_device(struct ib_device *ibdev,\r
- struct ib_device_attr *props);\r
-\r
-int mthca_query_port(struct ib_device *ibdev,\r
- u8 port, struct ib_port_attr *props);\r
-\r
-int mthca_modify_port(struct ib_device *ibdev,\r
- u8 port, int port_modify_mask,\r
- struct ib_port_modify *props);\r
-\r
-int mthca_query_pkey_chunk(struct ib_device *ibdev,\r
- u8 port, u16 index, u16 pkey[32]);\r
-\r
-int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,\r
- int index, union ib_gid gid[8]);\r
-\r
-struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,\r
- ci_umv_buf_t* const p_umv_buf);\r
-\r
-int mthca_dealloc_ucontext(struct ib_ucontext *context);\r
-\r
-struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,\r
- struct ib_ucontext *context,\r
- ci_umv_buf_t* const p_umv_buf);\r
-\r
-int mthca_dealloc_pd(struct ib_pd *pd);\r
-\r
-struct ib_ah *mthca_ah_create(struct ib_pd *pd,\r
- struct ib_ah_attr *ah_attr);\r
-\r
-int mthca_ah_destroy(struct ib_ah *ah);\r
-\r
-struct ib_srq *mthca_create_srq(struct ib_pd *pd,\r
- struct ib_srq_init_attr *init_attr,\r
- ci_umv_buf_t* const p_umv_buf);\r
-\r
-int mthca_destroy_srq(struct ib_srq *srq);\r
-\r
-struct ib_qp *mthca_create_qp(struct ib_pd *pd,\r
- struct ib_qp_init_attr *init_attr,\r
- ci_umv_buf_t* const p_umv_buf);\r
-\r
-int mthca_destroy_qp(struct ib_qp *qp);\r
-\r
-struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,\r
- struct ib_ucontext *context,\r
- ci_umv_buf_t* const p_umv_buf);\r
-\r
-int mthca_destroy_cq(struct ib_cq *cq);\r
-\r
-struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);\r
-\r
-struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,\r
- struct ib_phys_buf *buffer_list,\r
- int num_phys_buf,\r
- mthca_qp_access_t acc,\r
- u64 *iova_start);\r
-\r
-struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, \r
- void* __ptr64 vaddr, uint64_t length, uint64_t hca_va,\r
- mthca_qp_access_t acc, boolean_t um_call);\r
-\r
-int mthca_dereg_mr(struct ib_mr *mr);\r
-\r
-struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,\r
- struct ib_fmr_attr *fmr_attr);\r
-\r
-int mthca_dealloc_fmr(struct ib_fmr *fmr);\r
-\r
-int mthca_unmap_fmr(struct list_head *fmr_list);\r
-\r
-int mthca_poll_cq_list(\r
- IN struct ib_cq *ibcq, \r
- IN OUT ib_wc_t** const pp_free_wclist,\r
- OUT ib_wc_t** const pp_done_wclist );\r
-\r
-\r
-#endif /* MTHCA_PROVIDER_H */\r
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id$
+ */
+
+#ifndef MTHCA_PROVIDER_H
+#define MTHCA_PROVIDER_H
+
+#include <ib_verbs.h>
+#include <ib_pack.h>
+#include <iba/ib_ci.h>
+
+typedef uint32_t mthca_mpt_access_t;
+#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
+#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
+#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)
+#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)
+#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)
+
+union mthca_buf {
+ struct scatterlist direct;
+ struct scatterlist *page_list;
+};
+
+struct mthca_uar {
+ PFN_NUMBER pfn;
+ int index;
+};
+
+struct mthca_user_db_table;
+
+struct mthca_ucontext {
+ struct ib_ucontext ibucontext;
+ struct mthca_uar uar;
+ struct mthca_user_db_table *db_tab;
+ // for user UAR
+ PMDL mdl;
+ PVOID kva;
+ SIZE_T uar_size;
+};
+
+struct mthca_mtt;
+
+struct mthca_mr {
+ //NB: the start of this structure is to be equal to mlnx_mro_t !
+ //NB: the structure was not inserted here for not to mix driver and provider structures
+ struct ib_mr ibmr;
+ struct mthca_mtt *mtt;
+ int iobuf_used;
+ mt_iobuf_t iobuf;
+ void *secure_handle;
+};
+
+struct mthca_fmr {
+ struct ib_fmr ibmr;
+ struct ib_fmr_attr attr;
+ struct mthca_mtt *mtt;
+ int maps;
+ union {
+ struct {
+ struct mthca_mpt_entry __iomem *mpt;
+ u64 __iomem *mtts;
+ } tavor;
+ struct {
+ struct mthca_mpt_entry *mpt;
+ __be64 *mtts;
+ } arbel;
+ } mem;
+};
+
+struct mthca_pd {
+ struct ib_pd ibpd;
+ u32 pd_num;
+ atomic_t sqp_count;
+ struct mthca_mr ntmr;
+ int privileged;
+};
+
+struct mthca_eq {
+ struct mthca_dev *dev;
+ int eqn;
+ int eq_num;
+ u32 eqn_mask;
+ u32 cons_index;
+ u16 msi_x_vector;
+ u16 msi_x_entry;
+ int have_irq;
+ int nent;
+ struct scatterlist *page_list;
+ struct mthca_mr mr;
+ KDPC dpc; /* DPC for MSI-X interrupts */
+ spinlock_t lock; /* spinlock for simult DPCs */
+};
+
+struct mthca_av;
+
+enum mthca_ah_type {
+ MTHCA_AH_ON_HCA,
+ MTHCA_AH_PCI_POOL,
+ MTHCA_AH_KMALLOC
+};
+
+struct mthca_ah {
+ struct ib_ah ibah;
+ enum mthca_ah_type type;
+ u32 key;
+ struct mthca_av *av;
+ dma_addr_t avdma;
+};
+
+/*
+ * Quick description of our CQ/QP locking scheme:
+ *
+ * We have one global lock that protects dev->cq/qp_table. Each
+ * struct mthca_cq/qp also has its own lock. An individual qp lock
+ * may be taken inside of an individual cq lock. Both cqs attached to
+ * a qp may be locked, with the send cq locked first. No other
+ * nesting should be done.
+ *
+ * Each struct mthca_cq/qp also has an atomic_t ref count. The
+ * pointer from the cq/qp_table to the struct counts as one reference.
+ * This reference also is good for access through the consumer API, so
+ * modifying the CQ/QP etc doesn't need to take another reference.
+ * Access because of a completion being polled does need a reference.
+ *
+ * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
+ * destroy function to sleep on.
+ *
+ * This means that access from the consumer API requires nothing but
+ * taking the struct's lock.
+ *
+ * Access because of a completion event should go as follows:
+ * - lock cq/qp_table and look up struct
+ * - increment ref count in struct
+ * - drop cq/qp_table lock
+ * - lock struct, do your thing, and unlock struct
+ * - decrement ref count; if zero, wake up waiters
+ *
+ * To destroy a CQ/QP, we can do the following:
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
+ * - decrement ref count
+ * - wait_event until ref count is zero
+ *
+ * It is the consumer's responsibilty to make sure that no QP
+ * operations (WQE posting or state modification) are pending when the
+ * QP is destroyed. Also, the consumer must make sure that calls to
+ * qp_modify are serialized.
+ *
+ * Possible optimizations (wait for profile data to see if/where we
+ * have locks bouncing between CPUs):
+ * - split cq/qp table lock into n separate (cache-aligned) locks,
+ * indexed (say) by the page in the table
+ * - split QP struct lock into three (one for common info, one for the
+ * send queue and one for the receive queue)
+ */
+//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP
+// operations (WQE posting or state modification) are pending when the QP is destroyed"
+
+struct mthca_cq {
+ struct ib_cq ibcq;
+ void *cq_context; // leo: for IBAL shim
+ spinlock_t lock;
+ atomic_t refcount;
+ int cqn;
+ u32 cons_index;
+ int is_direct;
+ int is_kernel;
+
+ /* Next fields are Arbel only */
+ int set_ci_db_index;
+ __be32 *set_ci_db;
+ int arm_db_index;
+ __be32 *arm_db;
+ int arm_sn;
+ int u_arm_db_index;
+ int *p_u_arm_sn;
+
+ union mthca_buf queue;
+ struct mthca_mr mr;
+ wait_queue_head_t wait;
+ KMUTEX mutex;
+};
+
+struct mthca_srq {
+ struct ib_srq ibsrq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int srqn;
+ int max;
+ int max_gs;
+ int wqe_shift;
+ int first_free;
+ int last_free;
+ u16 counter; /* Arbel only */
+ int db_index; /* Arbel only */
+ __be32 *db; /* Arbel only */
+ void *last;
+
+ int is_direct;
+ u64 *wrid;
+ union mthca_buf queue;
+ struct mthca_mr mr;
+
+ wait_queue_head_t wait;
+ KMUTEX mutex;
+ void *srq_context;
+};
+
+struct mthca_wq {
+ spinlock_t lock;
+ int max;
+ unsigned next_ind;
+ unsigned last_comp;
+ unsigned head;
+ unsigned tail;
+ void *last;
+ int max_gs;
+ int wqe_shift;
+
+ int db_index; /* Arbel only */
+ __be32 *db;
+};
+
+struct mthca_qp {
+ struct ib_qp ibqp;
+ void *qp_context; // leo: for IBAL shim
+ //TODO: added just because absense of ibv_query_qp
+ // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr qp_init_attr; // leo: for query_qp
+ atomic_t refcount;
+ u32 qpn;
+ int is_direct;
+ u8 transport;
+ u8 state;
+ u8 atomic_rd_en;
+ u8 resp_depth;
+
+ struct mthca_mr mr;
+
+ struct mthca_wq rq;
+ struct mthca_wq sq;
+ enum ib_sig_type sq_policy;
+ int send_wqe_offset;
+ int max_inline_data;
+
+ u64 *wrid;
+ union mthca_buf queue;
+
+ wait_queue_head_t wait;
+ KMUTEX mutex;
+};
+
+struct mthca_sqp {
+ struct mthca_qp qp;
+ int port;
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ struct scatterlist sg;
+};
+
+static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct mthca_ucontext, ibucontext);
+}
+
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
+{
+ return container_of(ibmr, struct mthca_fmr, ibmr);
+}
+
+static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct mthca_mr, ibmr);
+}
+
+static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct mthca_pd, ibpd);
+}
+
+static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct mthca_ah, ibah);
+}
+
+static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct mthca_cq, ibcq);
+}
+
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mthca_srq, ibsrq);
+}
+
+static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct mthca_qp, ibqp);
+}
+
+static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
+{
+ return container_of(qp, struct mthca_sqp, qp);
+}
+
+static inline uint8_t start_port(struct ib_device *device)
+{
+ return device->node_type == IB_NODE_SWITCH ? 0 : 1;
+}
+
+static inline uint8_t end_port(struct ib_device *device)
+{
+ return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
+}
+
+static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)
+{
+ RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len);
+ return 0;
+}
+
+static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)
+{
+ if (p_umv_buf->output_size < len) {
+ p_umv_buf->status = IB_INSUFFICIENT_MEMORY;
+ p_umv_buf->output_size = 0;
+ return -EFAULT;
+ }
+ RtlCopyMemory(p_umv_buf->p_inout_buf, src, len);
+ p_umv_buf->status = IB_SUCCESS;
+ p_umv_buf->output_size = (uint32_t)len;
+ return 0;
+}
+
+
+
+// API
+int mthca_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props);
+
+int mthca_query_port(struct ib_device *ibdev,
+ u8 port, struct ib_port_attr *props);
+
+int mthca_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props);
+
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ ci_umv_buf_t* const p_umv_buf);
+
+int mthca_dealloc_pd(struct ib_pd *pd);
+
+int mthca_dereg_mr(struct ib_mr *mr);
+
+int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr);
+
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
+ ci_umv_buf_t* const p_umv_buf);
+
+int mthca_dealloc_ucontext(struct ib_ucontext *context);
+
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);
+
+int mthca_poll_cq_list(
+ IN struct ib_cq *ibcq,
+ IN OUT ib_wc_t** const pp_free_wclist,
+ OUT ib_wc_t** const pp_done_wclist );
+
+
+#endif /* MTHCA_PROVIDER_H */
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, mailbox, sqd_event, &status);
- if (err)
+ if (err) {
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP returned error (qp-num = 0x%x) returned status %02x "
+ "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n",
+ ibqp->qp_num, status, cur_state, new_state,
+ attr_mask, req_param, opt_param));
goto out_mailbox;
+ }
if (status) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
- state_table[cur_state][new_state].trans, status));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP bad status(qp-num = 0x%x) returned status %02x "
+ "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n",
+ ibqp->qp_num, status, cur_state, new_state,
+ attr_mask, req_param, opt_param));
err = -EINVAL;
goto out_mailbox;
}
atomic_set(&qp->refcount, 1);
init_waitqueue_head(&qp->wait);
KeInitializeMutex(&qp->mutex, 0);
-
+
qp->state = IBQPS_RESET;
qp->atomic_rd_en = 0;
qp->resp_depth = 0;
atomic_dec(&qp->refcount);
wait_event(&qp->wait, !atomic_read(&qp->refcount));
- if (qp->state != IBQPS_RESET)
+ if (qp->state != IBQPS_RESET) {
mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
+ }
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
#pragma alloc_text (PAGE, mthca_cleanup_srq_table)
#endif
+
enum {
MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
};
__be32 state_pd;
__be32 lkey;
__be32 uar;
- __be32 wqe_cnt;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
u32 reserved[2];
};
RtlZeroMemory(context, sizeof *context);
- logsize = long_log2(srq->max) + srq->wqe_shift;
+ logsize = long_log2(srq->max);
context->state_logsize_srqn = cl_hton32(logsize << 24 | srq->srqn);
context->lkey = cl_hton32(srq->mr.ibmr.lkey);
context->db_index = cl_hton32(srq->db_index);
}
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq)
+ ib_srq_attr_t *attr, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
u8 status;
/* Sanity check SRQ size before proceeding */
if ((int)attr->max_wr > dev->limits.max_srq_wqes ||
- (int)attr->max_sge > dev->limits.max_sg)
+ (int)attr->max_sge > dev->limits.max_srq_sge)
return -EINVAL;
srq->max = attr->max_wr;
ds = max(64UL,
roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg)));
+
+ if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
+ return -EINVAL;
+
srq->wqe_shift = long_log2(ds);
srq->srqn = mthca_alloc(&dev->srq_table.alloc);
err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
if (err) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_SRQ failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ failed (%d)\n", err));
goto err_out_free_buf;
}
if (status) {
- HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_SRQ returned status 0x%02x\n",
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ returned status 0x%02x\n",
status));
err = -EINVAL;
goto err_out_free_buf;
srq->first_free = 0;
srq->last_free = srq->max - 1;
- attr->max_wr = srq->max;
+ attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
attr->max_sge = srq->max_gs;
return 0;
err_out_free_srq:
err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
- if (err){
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ failed (%d)\n", err));
- }else if (status){
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ returned status 0x%02x\n", status));
+ if (err) {
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err));
+ } else if (status) {
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status));
}
err_out_free_buf:
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No memory for mailbox to free SRQ.\n"));
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "No memory for mailbox to free SRQ.\n"));
return;
}
err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
- if (err){
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ failed (%d)\n", err));
- }else if (status){
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ returned status 0x%02x\n", status));
+ if (err) {
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err));
+ } else if (status) {
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status));
}
spin_lock_irq(&dev->srq_table.lock, &lh);
mthca_array_clear(&dev->srq_table.srq,
srq->srqn & (dev->limits.num_srqs - 1));
+ atomic_dec(&srq->refcount);
spin_unlock_irq(&lh);
- atomic_dec(&srq->refcount);
wait_event(&srq->wait, !atomic_read(&srq->refcount));
- if (!srq->ibsrq.uobject) {
+ if (!srq->ibsrq.ucontext) {
mthca_free_srq_buf(dev, srq);
if (mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
mthca_free_mailbox(dev, mailbox);
}
-int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask)
-{
+int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr,
+ ib_srq_attr_mask_t attr_mask)
+{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
int ret;
/* We don't support resizing SRQs (yet?) */
if (attr_mask & IB_SRQ_MAX_WR)
- return -EINVAL;
+ return -ENOSYS;
if (attr_mask & IB_SRQ_LIMIT) {
- if (attr->srq_limit > (u32)srq->max)
- return -EINVAL;
+ u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
+ if (attr->srq_limit > max_wr)
+ return -ERANGE;
down(&srq->mutex);
ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
return 0;
}
+int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ struct mthca_mailbox *mailbox;
+ struct mthca_arbel_srq_context *arbel_ctx;
+ struct mthca_tavor_srq_context *tavor_ctx;
+ u8 status;
+ int err;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+ if (err)
+ goto out;
+
+ if (mthca_is_memfree(dev)) {
+ arbel_ctx = mailbox->buf;
+ srq_attr->srq_limit = cl_ntoh16(arbel_ctx->limit_watermark);
+ } else {
+ tavor_ctx = mailbox->buf;
+ srq_attr->srq_limit = cl_ntoh16(tavor_ctx->limit_watermark);
+ }
+
+ srq_attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
+ srq_attr->max_sge = srq->max_gs;
+
+out:
+ mthca_free_mailbox(dev, mailbox);
+
+ return err;
+}
+
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
- enum ib_event_type event_type)
+ enum ib_event_type event_type, u8 vendor_code)
{
struct mthca_srq *srq;
struct ib_event event;
spin_unlock(&lh);
if (!srq) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Async event for bogus SRQ %08x\n", srqn));
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "Async event for bogus SRQ %08x\n", srqn));
return;
}
event.device = &dev->ib_dev;
event.event = event_type;
- event.element.srq = &srq->ibsrq;
- srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+ event.element.srq = &srq->ibsrq;
+ event.vendor_specific = vendor_code;
+ HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_SRQ,
+ ("SRQ %06x Async event event_type 0x%x vendor_code 0x%x\n",
+ srqn,event_type,vendor_code));
+ if (srq->ibsrq.event_handler)
+ srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
out:
if (atomic_dec_and_test(&srq->refcount))
spin_unlock(&lh);
}
-//TODO: is this code correct at all ?
int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
- __be32 doorbell[2];
+ __be32 doorbell[2];
int err = 0;
int first_ind;
int ind;
int i;
u8 *wqe;
u8 *prev_wqe;
+ CPU_2_BE64_PREP;
SPIN_LOCK_PREP(lh);
spin_lock_irqsave(&srq->lock, &lh);
first_ind = srq->first_free;
- for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
- doorbell[1] = cl_hton32(srq->srqn << 8);
-
- /*
- * Make sure that descriptors are written
- * before doorbell is rung.
- */
- wmb();
-
- mthca_write64(doorbell,
- dev->kar + MTHCA_RECEIVE_DOORBELL,
- MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
- first_ind = srq->first_free;
- }
-
+ for (nreq = 0; wr; wr = wr->p_next) {
ind = srq->first_free;
if (ind < 0) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn));
err = -ENOMEM;
*bad_wr = wr;
- goto out;
+ break;
}
- wqe = get_wqe(srq, ind);
- next_ind = *wqe_to_link(wqe);
+ wqe = get_wqe(srq, ind);
+ next_ind = *wqe_to_link(wqe);
if (next_ind < 0) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn));
err = -ENOMEM;
*bad_wr = wr;
break;
}
- prev_wqe = srq->last;
+ prev_wqe = srq->last;
srq->last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
err = -EINVAL;
*bad_wr = wr;
srq->last = prev_wqe;
- goto out;
+ break;
}
for (i = 0; i < (int)wr->num_ds; ++i) {
((struct mthca_data_seg *) wqe)->lkey =
cl_hton32(wr->ds_array[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
- cl_hton64(wr->ds_array[i].vaddr);
+ CPU_2_BE64(wr->ds_array[i].vaddr);
wqe += sizeof (struct mthca_data_seg);
}
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
+
+ ++nreq;
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+ doorbell[1] = cl_hton32(srq->srqn << 8);
+
+ /*
+ * Make sure that descriptors are written
+ * before doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ first_ind = srq->first_free;
+ }
}
-out:
if (likely(nreq)) {
doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
doorbell[1] = cl_hton32((srq->srqn << 8) | nreq);
return err;
}
-//TODO: is this code correct at all ?
int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr)
{
int nreq;
int i;
u8 *wqe;
+ CPU_2_BE64_PREP;
SPIN_LOCK_PREP(lh);
spin_lock_irqsave(&srq->lock, &lh);
ind = srq->first_free;
if (ind < 0) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn));
err = -ENOMEM;
*bad_wr = wr;
- goto out;
+ break;
}
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
if (next_ind < 0) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SRQ %06x full\n", srq->srqn));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn));
err = -ENOMEM;
*bad_wr = wr;
break;
}
- ((struct mthca_next_seg *) wqe)->nda_op =
+ ((struct mthca_next_seg *) wqe)->nda_op =
cl_hton32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
if (unlikely((int)wr->num_ds > srq->max_gs)) {
err = -EINVAL;
*bad_wr = wr;
- goto out;
+ break;
}
for (i = 0; i < (int)wr->num_ds; ++i) {
((struct mthca_data_seg *) wqe)->lkey =
cl_hton32(wr->ds_array[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
- cl_hton64(wr->ds_array[i].vaddr);
+ CPU_2_BE64(wr->ds_array[i].vaddr);
wqe += sizeof (struct mthca_data_seg);
}
srq->first_free = next_ind;
}
-out:
if (likely(nreq)) {
- srq->counter = srq->counter + (u16)nreq;
+ srq->counter = (u16)(srq->counter + nreq);
/*
* Make sure that descriptors are written before
return err;
}
+int mthca_max_srq_sge(struct mthca_dev *dev)
+{
+ if (mthca_is_memfree(dev))
+ return dev->limits.max_sg;
+
+ /*
+ * SRQ allocations are based on powers of 2 for Tavor,
+ * (although they only need to be multiples of 16 bytes).
+ *
+ * Therefore, we need to base the max number of sg entries on
+ * the largest power of 2 descriptor size that is <= to the
+ * actual max WQE descriptor size, rather than return the
+ * max_sg value given by the firmware (which is based on WQE
+ * sizes as multiples of 16, not powers of 2).
+ *
+ * If SRQ implementation is changed for Tavor to be based on
+ * multiples of 16, the calculation below can be deleted and
+ * the FW max_sg value returned.
+ */
+ return min( (uint32_t)dev->limits.max_sg,
+ ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
+ sizeof (struct mthca_next_seg)) /
+ sizeof (struct mthca_data_seg));
+}
+
int mthca_init_srq_table(struct mthca_dev *dev)
{
int err;
mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
mthca_alloc_cleanup(&dev->srq_table.alloc);
}
-
+++ /dev/null
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- uint64_t uar_addr;
- u64 pd_handle;
- u32 pdn;
- u32 qp_tab_size;
- u32 uarc_size;
- u32 vend_id;
- u16 dev_id;
-};
-
-struct mthca_create_srq {
- u32 lkey;
- u32 db_index;
- u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- u32 srqn;
- u32 reserved;
-};
-
-#endif /* MTHCA_USER_H */
uint32_t cqn;
};
+struct ibv_create_srq {
+ uint64_t user_handle;
+ struct ibv_reg_mr mr;
+ uint32_t lkey; /* used only in kernel */
+ uint32_t db_index;
+ uint64_t db_page;
+};
+
+struct ibv_create_srq_resp {
+ struct ibv_reg_mr_resp mr;
+ uint64_t srq_handle;
+ uint64_t user_handle;
+ uint32_t max_wr;
+ uint32_t max_sge;
+ uint32_t srqn;
+ uint32_t reserved;
+};
+
struct ibv_create_qp {
uint64_t sq_db_page;
uint64_t rq_db_page;
mlnx_ual_mrw.c \\r
mlnx_ual_osbypass.c \\r
mlnx_ual_pd.c \\r
- mlnx_ual_qp.c \\r
+ mlnx_ual_qp.c \\r
+ mlnx_ual_srq.c \\r
\\r
mlnx_uvp_debug.c \\r
mlnx_uvp.c \\r
}\r
*ph_uvp_av = (ib_av_handle_t)ah;\r
}\r
+ else {\r
+ mthca_free_av(ah);\r
+ cl_free(ah);\r
+ }\r
goto end;\r
\r
end: \r
*/\r
mlnx_get_pd_interface (p_uvp);\r
\r
+ /*\r
+ * SRQ Management Verbs\r
+ */\r
+ mlnx_get_srq_interface (p_uvp);\r
+\r
/*\r
* QP Management Verbs\r
*/\r
IN const ib_cq_handle_t h_uvp_cq,\r
IN ib_api_status_t ioctl_status);\r
\r
+/************* SRQ Management *************************/\r
+void \r
+mlnx_get_srq_interface (\r
+ IN OUT uvp_interface_t *p_uvp );\r
+\r
+ib_api_status_t \r
+mlnx_pre_create_srq (\r
+ IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed\r
+ IN const ib_srq_attr_t *p_srq_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf);\r
+\r
+void \r
+mlnx_post_create_srq (\r
+ IN const ib_pd_handle_t h_uvp_pd,\r
+ IN ib_api_status_t ioctl_status,\r
+ OUT ib_srq_handle_t *ph_uvp_srq,\r
+ IN ci_umv_buf_t *p_umv_buf );\r
+\r
+ib_api_status_t \r
+mlnx_pre_modify_srq (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN const ib_srq_attr_mask_t srq_attr_attr, // Fixme\r
+ IN const ib_srq_attr_t *p_srq_attr, // Fixme\r
+ IN OUT ci_umv_buf_t *p_umv_buf);\r
+\r
+void \r
+mlnx_post_modify_srq (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status,\r
+ IN OUT ci_umv_buf_t *p_umv_buf);\r
+\r
+ib_api_status_t \r
+mlnx_pre_query_srq (\r
+ IN ib_srq_handle_t h_uvp_srq,\r
+ IN OUT ci_umv_buf_t *p_umv_buf);\r
+\r
+void \r
+mlnx_post_query_srq (\r
+ IN ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status,\r
+ IN ib_srq_attr_t *p_query_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf);\r
+\r
+ib_api_status_t \r
+mlnx_pre_destroy_srq (\r
+ IN const ib_srq_handle_t h_uvp_srq);\r
+\r
+void \r
+mlnx_post_destroy_srq (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status );\r
+\r
+\r
/************* QP Management *************************/\r
void \r
mlnx_get_qp_interface (\r
IN ib_recv_wr_t* const p_recv_wr,\r
OUT ib_recv_wr_t** pp_recv_failure );\r
\r
+ib_api_status_t \r
+mlnx_post_srq_recv (\r
+ IN const void* __ptr64 h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t** pp_recv_failure );\r
+\r
ib_api_status_t \r
mlnx_bind_mw (\r
IN const ib_mw_handle_t h_uvp_mw,\r
*/\r
p_uvp->post_send = mlnx_post_send;\r
p_uvp->post_recv = mlnx_post_recv;\r
+ p_uvp->post_srq_recv = mlnx_post_srq_recv;\r
\r
/*\r
* Completion Processing and \r
}\r
\r
\r
+ib_api_status_t\r
+mlnx_post_srq_recv (\r
+ IN const void* __ptr64 h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t** pp_recv_failure )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq);\r
+\r
+ UVP_ENTER(UVP_DBG_QP);\r
+\r
+ CL_ASSERT (srq);\r
+\r
+ CL_ASSERT( p_recv_wr );\r
+\r
+ err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure );\r
+ if (err) {\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err));\r
+ if (err == -ENOMEM)\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ else if (err == -EINVAL) \r
+ status = IB_INVALID_WR_TYPE;\r
+ else if (err == -ERANGE)\r
+ status = IB_INVALID_MAX_SGE;\r
+ else if (err == -EBUSY)\r
+ status = IB_INVALID_QP_STATE;\r
+ else\r
+ status = errno_to_iberr(err);\r
+ }\r
+\r
+ UVP_EXIT(UVP_DBG_QP);\r
+ return status;\r
+}\r
+\r
+\r
ib_api_status_t\r
mlnx_post_recv (\r
IN const void* __ptr64 h_qp,\r
/* convert attributes */\r
attr.send_cq = p_create_attr->h_sq_cq->ibv_cq;\r
attr.recv_cq = p_create_attr->h_rq_cq->ibv_cq;\r
- attr.srq = NULL; /* absent in IBAL */\r
+ attr.srq = (struct ibv_srq*)p_create_attr->h_srq;\r
attr.cap.max_send_wr = p_create_attr->sq_depth;\r
attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
attr.cap.max_send_sge = p_create_attr->sq_sge;\r
if (ioctl_status == IB_SUCCESS) \r
cl_free (p_qp_info);\r
else\r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", ioctl_status));\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp_post failed (%d)\n", ioctl_status));\r
\r
UVP_EXIT(UVP_DBG_SHIM);\r
return;\r
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mlnx_ual_srq.c 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#include "mt_l2w.h"
+#include "mlnx_ual_main.h"
+#include "mlnx_uvp.h"
+#include "mx_abi.h"
+
+#if defined(EVENT_TRACING)
+#include "mlnx_ual_srq.tmh"
+#endif
+
+
+extern uint32_t mlnx_dbg_lvl;
+
+void
+mlnx_get_srq_interface (
+ IN OUT uvp_interface_t *p_uvp )
+{
+ UVP_ENTER(UVP_DBG_DEV);
+
+ CL_ASSERT(p_uvp);
+
+ /*
+ * Completion Queue Management Verbs
+ */
+ p_uvp->pre_create_srq = mlnx_pre_create_srq;
+ p_uvp->post_create_srq = mlnx_post_create_srq;
+
+ p_uvp->pre_query_srq = NULL; /* mlnx_pre_query_srq; */
+ p_uvp->post_query_srq = NULL; /*mlnx_post_query_srq;*/
+
+ p_uvp->pre_modify_srq = NULL; /* mlnx_modify_srq;*/
+ p_uvp->post_modify_srq = NULL; /*mlnx_post_modify_srq;*/
+
+ p_uvp->pre_destroy_srq = NULL; /* mlnx_pre_destroy_srq; */
+ p_uvp->post_destroy_srq = mlnx_post_destroy_srq;
+
+ UVP_EXIT(UVP_DBG_DEV);
+}
+
+static void __free_srq(struct mthca_srq *srq)
+{
+ /* srq may be NULL, when ioctl returned with some kind of error, e.g. IB_INVALID_PARAM */
+ if (!srq)
+ return;
+
+ if (mthca_is_memfree(srq->ibv_srq.context)) {
+ mthca_free_db(to_mctx(srq->ibv_srq.context)->db_tab, MTHCA_DB_TYPE_SRQ,
+ srq->db_index);
+ }
+
+ if (srq->buf) {
+#ifdef NOT_USE_VIRTUAL_ALLOC
+ cl_free(srq->buf);
+#else
+ VirtualFree( srq->buf, 0, MEM_RELEASE);
+#endif
+ }
+
+ if (srq->wrid)
+ cl_free(srq->wrid);
+
+ cl_spinlock_destroy(&srq->lock);
+ cl_free (srq);
+}
+
+ib_api_status_t
+mlnx_pre_create_srq (
+ IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed
+ IN const ib_srq_attr_t *p_srq_attr,
+ IN OUT ci_umv_buf_t *p_umv_buf)
+{
+ struct mthca_srq *srq;
+ ib_api_status_t status = IB_SUCCESS;
+ size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );
+ mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;
+ struct ibv_pd *ibv_pd = p_pd->ibv_pd;
+ struct ibv_create_srq *p_create_srq;
+ int err;
+
+ UVP_ENTER(UVP_DBG_SRQ);
+
+ CL_ASSERT(p_umv_buf);
+
+ /* Sanity check SRQ size before proceeding */
+ if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)
+ {
+ status = IB_INVALID_PARAMETER;
+ goto err_params;
+ }
+
+ if( !p_umv_buf->p_inout_buf )
+ {
+ p_umv_buf->p_inout_buf = cl_zalloc( size );
+ if( !p_umv_buf->p_inout_buf )
+ {
+ status = IB_INSUFFICIENT_MEMORY;
+ goto err_memory;
+ }
+ }
+ p_umv_buf->input_size = sizeof(struct ibv_create_srq);
+ p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
+ p_umv_buf->command = TRUE;
+
+ /* allocate srq */
+ srq = cl_zalloc(sizeof *srq);
+ if (!srq)
+ {
+ status = IB_INSUFFICIENT_MEMORY;
+ goto err_alloc_srq;
+ }
+
+ /* init fields */
+ cl_spinlock_construct(&srq->lock);
+ if (cl_spinlock_init(&srq->lock))
+ goto err_lock;
+
+ srq->ibv_srq.pd = ibv_pd;
+ srq->ibv_srq.context = ibv_pd->context;
+ srq->max = align_queue_size(ibv_pd->context, p_srq_attr->max_wr, 1);
+ srq->max_gs = p_srq_attr->max_sge;
+ srq->counter = 0;
+
+ if (mthca_alloc_srq_buf(ibv_pd, (void*)p_srq_attr, srq))
+ {
+ status = IB_INSUFFICIENT_MEMORY;
+ goto err_alloc_buf;
+ }
+
+ // fill the parameters for ioctl
+ p_create_srq = (struct ibv_create_srq *)p_umv_buf->p_inout_buf;
+ p_create_srq->user_handle = (uint64_t)(ULONG_PTR)srq;
+ p_create_srq->mr.start = (uint64_t)(ULONG_PTR)srq->buf;
+ p_create_srq->mr.length = srq->buf_size;
+ p_create_srq->mr.hca_va = 0;
+ p_create_srq->mr.pd_handle = p_pd->ibv_pd->handle;
+ p_create_srq->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn;
+ p_create_srq->mr.access_flags = 0; //local read
+
+ if (mthca_is_memfree(ibv_pd->context)) {
+ srq->db_index = mthca_alloc_db(to_mctx(ibv_pd->context)->db_tab,
+ MTHCA_DB_TYPE_SRQ, &srq->db);
+ if (srq->db_index < 0)
+ goto err_alloc_db;
+
+ p_create_srq->db_page = db_align(srq->db);
+ p_create_srq->db_index = srq->db_index;
+ }
+
+ status = IB_SUCCESS;
+ goto end;
+
+err_alloc_db:
+#ifdef NOT_USE_VIRTUAL_ALLOC
+ cl_free(srq->buf);
+#else
+ VirtualFree( srq->buf, 0, MEM_RELEASE);
+#endif
+ cl_free(srq->wrid);
+err_alloc_buf:
+ cl_spinlock_destroy(&srq->lock);
+err_lock:
+ cl_free(srq);
+err_alloc_srq:
+ cl_free(p_umv_buf->p_inout_buf);
+err_memory:
+err_params:
+end:
+ UVP_EXIT(UVP_DBG_SRQ);
+ return status;
+}
+
+
+void
+mlnx_post_create_srq (
+ IN const ib_pd_handle_t h_uvp_pd,
+ IN ib_api_status_t ioctl_status,
+ OUT ib_srq_handle_t *ph_uvp_srq,
+ IN ci_umv_buf_t *p_umv_buf )
+{
+ int err;
+ struct mthca_srq *srq;
+ struct ibv_create_srq_resp *p_resp;
+ mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;
+ struct ibv_pd *ibv_pd = p_pd->ibv_pd;
+ ib_api_status_t status = IB_SUCCESS;
+
+ UVP_ENTER(UVP_DBG_SRQ);
+
+ CL_ASSERT(p_umv_buf);
+ p_resp = (struct ibv_create_srq_resp *)p_umv_buf->p_inout_buf;
+ srq = (struct mthca_srq *)(ULONG_PTR)p_resp->user_handle;
+
+ if (IB_SUCCESS == ioctl_status) {
+
+ /* complete filling SRQ object */
+ srq->ibv_srq.handle = p_resp->srq_handle;
+ srq->srqn = p_resp->srqn;
+ srq->max = p_resp->max_wr;
+ srq->max_gs = p_resp->max_sge;
+ srq->mr.handle = p_resp->mr.mr_handle;
+ srq->mr.lkey = p_resp->mr.lkey;
+ srq->mr.rkey = p_resp->mr.rkey;
+ srq->mr.pd = ibv_pd;
+ srq->mr.context = ibv_pd->context;
+
+ if (mthca_is_memfree(ibv_pd->context))
+ mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn);
+
+ *ph_uvp_srq = (ib_srq_handle_t)srq;
+ }
+ else
+ __free_srq(srq);
+
+ if (p_resp)
+ cl_free( p_resp );
+ UVP_EXIT(UVP_DBG_SRQ);
+ return;
+}
+
+void
+mlnx_post_destroy_srq (
+ IN const ib_srq_handle_t h_uvp_srq,
+ IN ib_api_status_t ioctl_status)
+{
+ int err;
+ struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_uvp_srq);
+
+ UVP_ENTER(UVP_DBG_CQ);
+
+ CL_ASSERT(srq);
+
+ if (IB_SUCCESS == ioctl_status)
+ __free_srq(srq);
+
+ UVP_EXIT(UVP_DBG_CQ);
+}
+
+
void *buf;
void *last;
cl_spinlock_t lock;
- struct ibv_mr *mr;
+ struct ibv_mr mr;
uint64_t *wrid;
uint32_t srqn;
int max;
+++ /dev/null
-/*
- * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#ifndef MTHCA_UVP_ABI_H
-#define MTHCA_ABI_H
-
-#include "mlnx_uvp_kern_abi.h"
-
-struct mthca_alloc_ucontext_resp {
- struct ibv_get_context_resp ibv_resp;
-};
-
-struct mthca_create_srq {
- uint32_t lkey;
- uint32_t db_index;
- uint64_t db_page;
- struct ibv_create_srq ibv_cmd;
-};
-
-struct mthca_create_srq_resp {
- struct ibv_create_srq_resp ibv_resp;
- uint32_t srqn;
- uint32_t reserved;
-};
-
-struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
-void mthca_free_context(struct ibv_context *ibctx);
-
-
-#endif /* MTHCA_ABI_H */
WPP_DEFINE_BIT( UVP_DBG_CQ) \\r
WPP_DEFINE_BIT( UVP_DBG_QP) \\r
WPP_DEFINE_BIT( UVP_DBG_MEMORY) \\r
+ WPP_DEFINE_BIT( UVP_DBG_SRQ) \\r
WPP_DEFINE_BIT( UVP_DBG_AV) \\r
WPP_DEFINE_BIT( UVP_DBG_SEND) \\r
WPP_DEFINE_BIT( UVP_DBG_RECV) \\r
#define UVP_DBG_QP (1 << 4)\r
#define UVP_DBG_CQ (1 << 5)\r
#define UVP_DBG_MEMORY (1 << 6)\r
-#define UVP_DBG_AV (1 << 7)\r
-#define UVP_DBG_SEND (1 << 8)\r
-#define UVP_DBG_RECV (1 << 9)\r
-#define UVP_DBG_LOW (1 << 10)\r
-#define UVP_DBG_SHIM (1 << 11)\r
+#define UVP_DBG_SRQ (1 << 7)\r
+#define UVP_DBG_AV (1 << 8)\r
+#define UVP_DBG_SEND (1 << 9)\r
+#define UVP_DBG_RECV (1 << 10)\r
+#define UVP_DBG_LOW (1 << 11)\r
+#define UVP_DBG_SHIM (1 << 12)\r
\r
\r
VOID\r
first_ind = srq->first_free;
- for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
- doorbell[1] = cl_hton32(srq->srqn << 8);
-
- /*
- * Make sure that descriptors are written
- * before doorbell is rung.
- */
- wmb();
-
- mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
-
- first_ind = srq->first_free;
- }
-
+ for (nreq = 0; wr; wr = wr->p_next) {
ind = srq->first_free;
if (ind < 0) {
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
+
+ if (++nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB) {
+ nreq = 0;
+
+ doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+ doorbell[1] = cl_hton32(srq->srqn << 8);
+
+ /*
+ * Make sure that descriptors are written
+ * before doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
+
+ first_ind = srq->first_free;
+ }
}
if (nreq) {
srq->buf_size = srq->max << srq->wqe_shift;
if (posix_memalign(&srq->buf, g_page_size,
- align(srq->buf_size, g_page_size))) {
+ align(srq->buf_size, g_page_size))) {
cl_free(srq->wrid);
return -1;
}
- memset(srq->buf, 0, srq->buf_size);
+ cl_memclr(srq->buf, srq->buf_size);
/*
* Now initialize the SRQ buffer so that all of the WQEs are
return 0;
}
-static int align_queue_size(struct ibv_context *context, int size, int spare)
+int align_queue_size(struct ibv_context *context, int size, int spare)
{
int ret;
}
// fill the rest qp fields
- qp->ibv_qp .pd = pd;
+ qp->ibv_qp.pd = pd;
qp->ibv_qp.send_cq = attr->send_cq;
qp->ibv_qp.recv_cq = attr->recv_cq;
qp->ibv_qp.srq = attr->srq;
return -ENOSYS;
#endif
}
+
struct ibv_srq {
- struct ibv_context *context;
- void *srq_context;
struct ibv_pd *pd;
uint64_t handle;
- HANDLE mutex;
-
-#ifdef WIN_TO_BE_CHANGED
- pthread_cond_t cond;
- uint32_t events_completed;
-#endif
+ struct ibv_context *context;
};
struct ibv_qp {
void *abi_compat;
};
+int align_queue_size(struct ibv_context *context, int size, int spare);
+
END_C_DECLS
#endif /* INFINIBAND_VERBS_H */
{\r
#endif /* __cplusplus */\r
\r
-/****h* IB Access Layer API/Overview\r
+/****h* IB Access Layer API/Access Layer\r
* NAME\r
* InfiniBand Access Layer\r
* COPYRIGHT\r
ib_ca_handle_t h_ca;\r
ib_cq_handle_t h_cq;\r
ib_qp_handle_t h_qp;\r
+ ib_srq_handle_t h_srq;\r
\r
} handle;\r
\r
*****/\r
\r
\r
+/****f* Access Layer/ib_create_srq\r
+* NAME\r
+* ib_create_srq\r
+*\r
+* DESCRIPTION\r
+* Creates a shared receive queue and returns its handle to the user.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_create_srq(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const void* const srq_context,\r
+ IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL,\r
+ OUT ib_srq_handle_t* const ph_srq );\r
+/*\r
+* PARAMETERS\r
+* h_pd\r
+* [in] This is a handle to a protection domain associated with the shared queue\r
+* pair.\r
+*\r
+* p_srq_attr\r
+* [in] Attributes necessary to allocate and initialize a shared receive queue.\r
+*\r
+* srq_context\r
+* [in] A user-specified context information associated with the shared\r
+* receive queue.\r
+*\r
+* pfn_qp_event_cb\r
+* [in] User-specified error callback routine invoked after an\r
+* asynchronous event has occurred on the shared receive queue.\r
+*\r
+* ph_srq\r
+* [out] Upon successful completion of this call, this references a\r
+* handle to the newly created shared receive queue.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The receive queue was successfully created.\r
+*\r
+* IB_INVALID_PD_HANDLE\r
+* The protection domain to associate with the shared receive queue was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the shared receive queue attributes or handle was not provided.\r
+*\r
+* IB_INSUFFICIENT_MEMORY\r
+* There was insufficient memory to create the shared receive queue.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to create the shared receive queue.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified shared receive queue creation attributes are invalid.\r
+*\r
+* IB_INVALID_MAX_WRS\r
+* The requested maximum send or receive work request depth could not be\r
+* supported.\r
+*\r
+* IB_INVALID_MAX_SGE\r
+* The requested maximum number of scatter-gather entries for the send or\r
+* receive queue could not be supported.\r
+*\r
+* NOTES\r
+* This routine allocates a shared receive queue with the specified attributes. If\r
+* the shared receive queue cannot be allocated, an error is returned. When creating\r
+* the shared receive queue, users associate a context with the shared receive queue. This\r
+* context is returned to the user through the asynchronous event callback\r
+* if an event occurs.\r
+*\r
+* This routine is used to create receive queues, which work with QPs of type:\r
+*\r
+* IB_QPT_RELIABLE_CONN\r
+* IB_QPT_UNRELIABLE_CONN\r
+* IB_QPT_UNRELIABLE_DGRM\r
+*\r
+* SEE ALSO\r
+* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_query_srq\r
+* NAME\r
+* ib_query_srq\r
+*\r
+* DESCRIPTION\r
+* Query the current attributes of the shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_query_srq(\r
+ IN const ib_srq_handle_t h_srq,\r
+ OUT ib_srq_attr_t* const p_srq_attr );\r
+/*\r
+* PARAMETERS\r
+* h_srq\r
+* [in] A handle to an existing shared receive queue.\r
+*\r
+* p_srq_attr\r
+* [out] Upon successful completion of this call, the structure\r
+* referenced by this parameter contains the attributes of the specified\r
+* quere pair.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The shared receive queue attributes were returned successfully.\r
+*\r
+* IB_INVALID_SRQ_HANDLE\r
+* The shared receive queue handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the shared receive queue attributes structure was not provided.\r
+*\r
+* NOTES\r
+* This routine returns information about the specified shared receive queue.\r
+*\r
+* SEE ALSO\r
+* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_modify_srq\r
+* NAME\r
+* ib_modify_srq\r
+*\r
+* DESCRIPTION\r
+* Modifies the attributes of an existing shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_modify_srq(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask );\r
+/*\r
+* PARAMETERS\r
+* h_srq\r
+* [in] A handle to an existing shared receive queue.\r
+*\r
+* p_srq_attr\r
+* [in] Attributes necessary to allocate and initialize a shared receive queue.\r
+*\r
+* srq_attr_mask\r
+* [in] Flags, indicating which fields in the previous structure are valid.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The shared receive queue was successfully modified.\r
+*\r
+* IB_INVALID_SRQ_HANDLE\r
+* The shared receive queue handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the shared receive queue attributes was not provided.\r
+*\r
+* IB_INVALID_SETTING\r
+* The specified shared receive queue attributes were invalid.\r
+*\r
+* IB_UNSUPPORTED\r
+* The required action is not supported yet.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* There were insufficient resources currently available on the channel\r
+* adapter to register the modify the shared receive queue.\r
+*\r
+* NOTES\r
+* This routine modifies the attributes of an existing shared receive queue and\r
+* transitions it to a new state. The new state and attributes are\r
+* specified through the p_qp_mod parameter. Upon successful completion,\r
+* the shared receive queue is in the requested state.\r
+*\r
+* SEE ALSO\r
+* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_destroy_srq\r
+* NAME\r
+* ib_destroy_srq\r
+*\r
+* DESCRIPTION\r
+* Release a shared receive queue. Once destroyed, no further access to this\r
+* shared receive queue is possible.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_destroy_srq(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL );\r
+/*\r
+* PARAMETERS\r
+* h_srq\r
+* [in] A handle to an existing shared shared receive queue.\r
+*\r
+* pfn_destroy_cb\r
+* [in] A user-specified callback that is invoked after the shared receive queue\r
+* has been successfully destroyed.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* The destroy request was registered.\r
+*\r
+* IB_INVALID_SRQ_HANDLE\r
+* The shared receive queue handle was invalid.\r
+*\r
+* IB_RESOURCE_BUSY\r
+* There are QPs, bound to the shared receive queue\r
+*\r
+* NOTES\r
+* This call destroys an existing shared receive queue. Since callbacks may be\r
+* outstanding against the shared receive queue at the time the destroy operation is\r
+* invoked, then this call operates asynchronously. The user will be notified\r
+* through a callback once the destroy operation completes, indicating that\r
+* no additional callbacks will be invoked for the specified shared receive queue.\r
+*\r
+* SEE ALSO\r
+* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_post_srq_recv\r
+* NAME\r
+* ib_post_srq_recv\r
+*\r
+* DESCRIPTION\r
+* This routine posts a work request to the shared receive queue of a shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_post_srq_recv(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_recv_failure OPTIONAL );\r
+/*\r
+* PARAMETERS\r
+* h_srq\r
+* [in] The shared receive queue to which this work request is being submitted.\r
+*\r
+* p_recv_wr\r
+* [in] A reference to the head of the work request list.\r
+*\r
+* pp_recv_failure\r
+* [out] If the post receive operation failed, this references the work\r
+* request in the p_recv_wr list where the first failure occurred.\r
+* This parameter may be NULL if only a single work request is being\r
+* posted to the QP.\r
+*\r
+* RETURN VALUES\r
+* IB_SUCCESS\r
+* All work requests were successfully posted.\r
+*\r
+* IB_INVALID_QP_HANDLE\r
+* The shared receive queue handle was invalid.\r
+*\r
+* IB_INVALID_PARAMETER\r
+* A reference to the receive work request list was not provided.\r
+*\r
+* IB_INSUFFICIENT_RESOURCES\r
+* The number of posted work requests exceed the current depth available\r
+* on the receive queue.\r
+*\r
+* IB_INVALID_WR_TYPE\r
+* The work request type was invalid.\r
+*\r
+* IB_INVALID_QP_STATE\r
+* The current shared receive queue state does not allow posting receives.\r
+*\r
+* NOTES\r
+* This routine posts a work request to the shared receive queue.\r
+* The type of work to perform is defined by the p_recv_wr parameter. This\r
+* call is used to post data buffers to receive incoming message sends.\r
+*\r
+* SEE ALSO\r
+* ib_recv_wr_t\r
+*****/\r
+\r
+\r
/****f* Access Layer/ib_create_qp\r
* NAME\r
* ib_create_qp\r
* The send or receive completion queue to associate with the queue pair\r
* was invalid.\r
*\r
+* IB_INVALID_SRQ_HANDLE\r
+* The shared receive queue to be associated with the queue pair\r
+* was invalid.\r
+*\r
* IB_INVALID_SETTING\r
* The specified queue pair creation attributes are invalid.\r
*\r
* receive queue could not be supported.\r
*\r
* NOTES\r
-* This routine allocates a queue pair with the specified attributes. If\r
+* 1. This routine allocates a queue pair with the specified attributes. If\r
* the queue pair cannot be allocated, an error is returned. When creating\r
* the queue pair, users associate a context with the queue pair. This\r
* context is returned to the user through the asynchronous event callback\r
* if an event occurs.\r
*\r
-* This routine is used to create queue pairs of type:\r
-*\r
-* IB_QPT_RELIABLE_CONN\r
-* IB_QPT_UNRELIABLE_CONN\r
-* IB_QPT_UNRELIABLE_DGRM\r
-* IB_QPT_MAD\r
-*\r
-* Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair\r
+* 2. For QPs that are associated with an SRQ, the Consumer should take\r
+* the QP through the Error State before invoking a Destroy QP or a Modify\r
+* QP to the Reset State. The Consumer may invoke the Destroy QP without\r
+* first performing a Modify QP to the Error State and waiting for the Affiliated \r
+* Asynchronous Last WQE Reached Event. However, if the Consumer\r
+* does not wait for the Affiliated Asynchronous Last WQE Reached Event,\r
+* then WQE and Data Segment leakage may occur.\r
+*\r
+* 3. This routine is used to create queue pairs of type:\r
+* IB_QPT_RELIABLE_CONN\r
+* IB_QPT_UNRELIABLE_CONN\r
+* IB_QPT_UNRELIABLE_DGRM\r
+* IB_QPT_MAD\r
+*\r
+* 4. Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair\r
* is of type IB_QPT_UNRELIABLE_DGRM or IB_QPT_MAD before sending or\r
* receiving data. IB_QPT_RELIABLE_CONN, IB_QPT_UNRELIABLE_CONN type\r
* queue pairs should be used by the connection establishment process\r
\r
void* __ptr64 pnp_context;\r
void* __ptr64 context;\r
-\r
+ //NOTE:\r
+ //guid and ca_guid use as key to flexi map need to keep these field together\r
ib_net64_t guid;\r
+ ib_net64_t ca_guid;\r
\r
} ib_pnp_rec_t;\r
/*\r
* The GUID of the adapter, port, IOU, or IOC for which\r
* the PnP event occurred.\r
*\r
+* ca_guid\r
+* The GUID of the HCA \r
+*\r
* NOTES\r
* This structure is returned to the user to notify them of: the addition\r
* of a channel adapter, the removal of a channel adapter, a port up or down\r
typedef struct _ib_pnp_iou_rec\r
{\r
ib_pnp_rec_t pnp_rec;\r
+ net64_t guid;\r
net64_t ca_guid;\r
net64_t chassis_guid;\r
uint8_t slot;\r
* Status of the operation.\r
*****/\r
\r
+/****s* User-mode Access Layer/ual_create_srq_ioctl_t\r
+* NAME\r
+* ual_create_srq_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input and output parameters for\r
+* ib_create_srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_create_srq_ioctl\r
+{\r
+ struct _ual_create_srq_ioctl_in\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ uint64_t h_pd;\r
+ ib_srq_attr_t srq_attr;\r
+ void* __ptr64 context;\r
+ boolean_t ev_notify;\r
+\r
+ } in;\r
+ struct _ual_create_srq_ioctl_out\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ ib_api_status_t status;\r
+ uint64_t h_srq;\r
+\r
+ } out;\r
+\r
+} ual_create_srq_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.umv_buf\r
+* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to\r
+* exchange private information with the kernel-mode HCA driver.\r
+*\r
+* in.h_pd\r
+* Protection domain on which to create the srq.\r
+*\r
+* in.srq_attr\r
+* Attributes necessary for creating the srq.\r
+*\r
+* in.context\r
+* UAL's srq context that needs to be returned on a callback.\r
+*\r
+* in.ev_notify\r
+* Boolean indicating whether asynchronous events should be\r
+* forwarded to user-mode.\r
+*\r
+* out.umv_buf\r
+* Returns the status from the HCA driver to the user-mode HCA library,\r
+* along with any vendor specific output information.\r
+*\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.h_srq\r
+* Handle for the newly created srq.\r
+*****/\r
+\r
+\r
+/****s* User-mode Access Layer/ual_modify_srq_ioctl_t\r
+* NAME\r
+* ual_modify_srq_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input and output parameters for\r
+* ib_modify_srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_modify_srq_ioctl\r
+{\r
+ struct _ual_modify_srq_ioctl_in\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ uint64_t h_srq;\r
+ ib_srq_attr_mask_t srq_attr_mask;\r
+ ib_srq_attr_t srq_attr;\r
+\r
+ } in;\r
+ struct _ual_modify_srq_ioctl_out\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ ib_api_status_t status;\r
+\r
+ } out;\r
+\r
+} ual_modify_srq_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.umv_buf\r
+* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to\r
+* exchange private information with the kernel-mode HCA driver.\r
+*\r
+* in.h_srq\r
+* A handle to an existing Queue Pair.\r
+*\r
+* in.modify_attr\r
+* Attributes used for modifying the srq.\r
+*\r
+* out.umv_buf\r
+* Returns the status from the HCA driver to the user-mode HCA library,\r
+* along with any vendor specific output information.\r
+*\r
+* out.status\r
+* Status of the operation.\r
+*\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_query_srq_ioctl_t\r
+* NAME\r
+* ual_query_srq_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input and output parameters for\r
+* ib_query_srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_query_srq_ioctl\r
+{\r
+ struct _ual_query_srq_ioctl_in\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ uint64_t h_srq;\r
+\r
+ } in;\r
+ struct _ual_query_srq_ioctl_out\r
+ {\r
+ ci_umv_buf_t umv_buf;\r
+ ib_api_status_t status;\r
+ ib_srq_attr_t srq_attr;\r
+\r
+ } out;\r
+\r
+} ual_query_srq_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.umv_buf\r
+* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to\r
+* exchange private information with the kernel-mode HCA driver.\r
+*\r
+* h_srq\r
+* Handle to the srq whose attributes to query.\r
+*\r
+* out.umv_buf\r
+* Returns the status from the HCA driver to the user-mode HCA library,\r
+* along with any vendor specific output information.\r
+*\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.srq_attr\r
+* Attributes of the srq.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_destroy_srq_ioctl_t\r
+* NAME\r
+* ual_destroy_srq_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input and output parameters for\r
+* ib_destroy_srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_destroy_srq_ioctl\r
+{\r
+ struct _ual_destroy_srq_ioctl_in\r
+ {\r
+ uint64_t h_srq;\r
+\r
+ } in;\r
+ struct _ual_destroy_srq_ioctl_out\r
+ {\r
+ ib_api_status_t status;\r
+\r
+ } out;\r
+\r
+} ual_destroy_srq_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.h_srq\r
+* Handle of the srq that needs to be destroyed.\r
+*\r
+* out.status\r
+* Status of the operation.\r
+*****/\r
+\r
\r
\r
/****s* User-mode Access Layer/ual_create_qp_ioctl_t\r
*****/\r
\r
\r
+/****s* User-mode Access Layer/ual_post_srq_recv_ioctl_t\r
+* NAME\r
+* ual_post_srq_recv_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input and output parameters for\r
+* ib_post_srq_recv\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_post_srq_recv_ioctl\r
+{\r
+ struct _ual_post_srq_recv_ioctl_in\r
+ {\r
+ uint64_t h_srq;\r
+ uint32_t num_wr;\r
+ uint32_t num_ds;\r
+ ib_recv_wr_t recv_wr[1];\r
+ /* Additional work requests follow, followed by data segments. */\r
+\r
+ } in;\r
+ struct _ual_post_srq_recv_ioctl_out\r
+ {\r
+ ib_api_status_t status;\r
+ uint32_t failed_cnt;\r
+\r
+ } out;\r
+\r
+} ual_post_srq_recv_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.h_srq\r
+* A handle to SRQ where the work request is being posted.\r
+*\r
+* in.num_wr\r
+* Number of work request items in the array of work requests.\r
+*\r
+* in.num_ds\r
+* Number of data segments following the array of work requests.\r
+*\r
+* in.recv_wr\r
+* First work request in the array of work requests being posted.\r
+*\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* failed_cnt\r
+* Number of work request that failed.\r
+*****/\r
+\r
+\r
\r
/****s* User-mode Access Layer/ual_post_recv_ioctl_t\r
* NAME\r
/*\r
-* Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
-* Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
-*\r
-* This software is available to you under the OpenIB.org BSD license\r
-* below:\r
-*\r
-* Redistribution and use in source and binary forms, with or\r
-* without modification, are permitted provided that the following\r
-* conditions are met:\r
-*\r
-* - Redistributions of source code must retain the above\r
-* copyright notice, this list of conditions and the following\r
-* disclaimer.\r
-*\r
-* - Redistributions in binary form must reproduce the above\r
-* copyright notice, this list of conditions and the following\r
-* disclaimer in the documentation and/or other materials\r
-* provided with the distribution.\r
-*\r
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
-* SOFTWARE.\r
-*\r
-* $Id$\r
-*/\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
\r
/* This file is shared between user- and kernel-mode */\r
\r
* definition.\r
*/\r
#define VERBS_MAJOR_VER (0x0001)\r
-#define VERBS_MINOR_VER (0x0003)\r
+#define VERBS_MINOR_VER (0x0004)\r
\r
#define VERBS_VERSION (((VERBS_MAJOR_VER) << 16) | (VERBS_MINOR_VER))\r
#define MK_VERBS_VERSION(maj,min) ((((maj) & 0xFFFF) << 16) | \\r
*********\r
*/\r
\r
+/****f* Verbs/ci_create_srq\r
+* NAME\r
+* ci_create_srq -- Create a Shared Queue Pair for the specified HCA\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_create_srq) (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const void *srq_context,\r
+ IN const ib_srq_attr_t * const p_srq_attr,\r
+ OUT ib_srq_handle_t *ph_srq,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL );\r
+/*\r
+* DESCRIPTION\r
+* A new shared queue pair is created on the specified HCA. The initial set of\r
+* parameters is provided by the srq_attr_mask/p_srq_attr parameters. The newly created\r
+* queue pair with its attributes is returned in the srq_query_attr structure.\r
+* PARAMETERS\r
+* h_pd\r
+* [in] Handle to Protection Domain\r
+* srq_context\r
+* [in] A user specified context passed in a asynchronous error callback.\r
+* p_srq_attr\r
+* [in out] Initial attributes with which the srq must be created.\r
+* ph_srq\r
+* [out] Handle to the queue pair newly created.\r
+* p_umv_buf\r
+* [in out] Vendor specific parameter to support user mode IO.\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The queue pair is successfully created with the provided initial\r
+* attributes.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to complete request.\r
+* IB_INVALID_PD_HANDLE\r
+* pd_handle supplied in the qp_create_attr is invalid\r
+* IB_INVALID_SERVICE_TYPE\r
+* Invalid service type.\r
+* IB_INVALID_MAX_WRS\r
+* Max WRS capacity exceeded\r
+* IB_INVALID_MAX_SGE\r
+* Max Scatter gather element request exceeds HCA capability\r
+* IB_UNSUPPORTED\r
+* Unreliable datagram not supported\r
+* IB_INVALID_PARAMETER\r
+* The parameter p_create_attr is invalid.\r
+* NOTES\r
+* If any of the initial parameters is not valid, the queue pair is not\r
+* created. If the routine call is not successful then the contents of\r
+* qp_query_attr and qp_handle is undefined.\r
+* SEE ALSO\r
+* ci_query_qp, ci_modify_qp, ci_destroy_qp\r
+******\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_modify_srq\r
+* NAME\r
+* ci_modify_srq -- Modify attributes of the specified SRQ.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_modify_srq) (\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL );\r
+/*\r
+* DESCRIPTION\r
+* This routine is used to modify the srq states or other attributes of the\r
+* srq. On successful completion, the requested state transition is performed\r
+* and the srq is transitioned to the required state.\r
+* PARAMETERS\r
+* h_srq\r
+* [in] Handle to the queue pair whose state is to be modified.\r
+* p_srq_attr\r
+* [in] Initial attributes with which the srq must be created.\r
+* srq_attr_mask\r
+* [in] Flags, specifying valid fields in ib_srq_attr_t structure.\r
+* p_umv_buf\r
+* [in out] Vendor specific parameter to support user mode IO.\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The operation was successful and the QP attributes are modified\r
+* to the requested state.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to complete the requested operation.\r
+* IB_INVALID_QP_HANDLE\r
+* Invalid QP handle was passed.\r
+* IB_UNSUPPORTED\r
+* Requested operation is not supported, for e.g. Atomic operations.\r
+* IB_QP_INVALID_STATE\r
+* Invalid state transition request. Current QP state not in allowable\r
+* state.\r
+* IB_INVALID_PARAMETER\r
+* The parameter p_modify_attr is not valid.\r
+* SEE ALSO\r
+* ci_create_qp, ci_destroy_qp, ci_query_qp\r
+******\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_query_srq\r
+* NAME\r
+* ci_query_srq -- Query the current SRQ attributes\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_query_srq) (\r
+ IN const ib_srq_handle_t h_srq,\r
+ OUT ib_srq_attr_t* const p_srq_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL );\r
+/*\r
+* DESCRIPTION\r
+* This routine queries the current attributes for the srq\r
+* corresponding to h_srq. The attributes are returned in p_query_attr.\r
+* Depending on the current state of the srq, some of the fields in the\r
+* attribute structure may not be valid.\r
+* PARAMETERS\r
+* h_srq\r
+* [in] Handle to the srq for which the attributes are being retrieved\r
+* p_srq_attr\r
+* [out] Pointer to the ib_srq_query_t structure where the current\r
+* attributes of the srq is returned.\r
+* p_umv_buf\r
+* [in out] Vendor specific parameter to support user mode IO.\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The values returned in p_qp_attr are valid.\r
+* IB_INVALID_QP_HANDLE\r
+* The h_qp supplied is not a valid handle.\r
+* IB_INVALID_PARAMETER\r
+* Parameter p_qp_attr is not valid.\r
+* SEE ALSO\r
+* ci_create_qp, ci_destroy_qp, ci_modify_srq\r
+*****\r
+*/\r
+\r
+\r
+/****f* Verbs/ci_destroy_srq\r
+* NAME\r
+* ci_destroy_srq -- Destroy the specified Shared Queue Pair.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_destroy_srq) (\r
+ IN const ib_srq_handle_t h_srq );\r
+/*\r
+* DESCRIPTION\r
+* Destroys the associated srq. The srq could have outstanding work requests\r
+* when this call is made. Any outstanding work requests *SHALL NOT* be\r
+* completed after this routine returns.\r
+* PARAMETERS\r
+* h_srq\r
+* [in] Handle to the srq that needs to be destroyed.\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The intend to destroy this queue pair is registered and no further\r
+* work requests will be processed. When no pending callbacks are in\r
+* progress, the destroy_callback function is invoked which marks the\r
+* destruction of the resource. The consumer can be guaranteed that\r
+* no future callbacks will be propagated on behalf of this resource.\r
+* IB_INVALID_QP_HANDLE\r
+* The handle passed is invalid.\r
+* IB_RESOURCE_BUSY\r
+* If the queue pair is a unreliable datagram service type, and\r
+* is still bound to a multicast group.\r
+* NOTES\r
+* This call cannot be called from any of the notification functions invoked\r
+* by the Verbs driver. For e.g. the completion handler or the async error\r
+* callback provided during the ci_open_ca() call. The call will block until\r
+* all references to this adapter object is closed which includes all the\r
+* pending callbacks returning back to the verbs provider driver.\r
+* SEE ALSO\r
+* ci_cquery_qp, ci_destroy_qp, ci_modify_srq\r
+******\r
+*/\r
+\r
+\r
/****f* Verbs/ci_create_qp\r
* NAME\r
* ci_create_qp -- Create a Queue Pair for the specified HCA\r
******\r
*/\r
\r
+/****f* Verbs/ci_post_srq_recv\r
+* NAME\r
+* ci_post_srq_recv -- Post a work request to the receive queue of a queue pair.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(*ci_post_srq_recv) (\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_failed );\r
+/*\r
+* DESCRIPTION\r
+* This routine allows to queue a work request to the receive side of a\r
+* shared queue pair. The work_req holds necessary data to satisfy an incoming\r
+* receive message. If an attempt is made to queue more work requests than\r
+* what is available, an error is returned.\r
+* PARAMETERS\r
+* h_srq\r
+* [in] Handle to the queue pair to which the receive work request is being\r
+* posted.\r
+* p_recv_wr\r
+* [in] Holds the WRs to be posted to the receive queue.\r
+* pp_failed\r
+* [out] If any entry could not be posted with the CI, then this points\r
+* to the first WR that completed unsuccessfully. If all entries are\r
+* posted, then this field is set to NULL on successful exit.\r
+* RETURN VALUE\r
+* Any unsuccessful status indicates the status of the first failed request.\r
+*\r
+* IB_SUCCESS\r
+* The work request was successfully queued to the receive side of the QP.\r
+* IB_INVALID_SRQ_HANDLE\r
+* srq_handle supplied is not valid.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* The qp has exceeded its receive queue depth than what is has been\r
+* configured.\r
+* IB_INVALID_WR_TYPE\r
+* Invalid work request type found in the request.\r
+* SEE ALSO\r
+******\r
+*/\r
+\r
+\r
+\r
/****f* Verbs/ci_post_recv\r
* NAME\r
* ci_post_recv -- Post a work request to the receive queue of a queue pair.\r
ci_modify_av modify_av;\r
ci_destroy_av destroy_av;\r
\r
+ /*\r
+ * SRQ Management Verbs\r
+ */\r
+ ci_create_srq create_srq;\r
+ ci_modify_srq modify_srq;\r
+ ci_query_srq query_srq;\r
+ ci_destroy_srq destroy_srq;\r
+\r
/*\r
* QP Management Verbs\r
*/\r
*/\r
ci_post_send post_send;\r
ci_post_recv post_recv;\r
+ ci_post_srq_recv post_srq_recv;\r
\r
/*\r
* Completion Processing and\r
typedef struct _mlnx_fmr* __ptr64 mlnx_fmr_handle_t;\r
typedef struct _ib_mw* __ptr64 ib_mw_handle_t;\r
typedef struct _ib_qp* __ptr64 ib_qp_handle_t;\r
+typedef struct _ib_srq* __ptr64 ib_srq_handle_t;\r
typedef struct _ib_cq* __ptr64 ib_cq_handle_t;\r
typedef struct _ib_av* __ptr64 ib_av_handle_t;\r
typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t;\r
IB_INVALID_MAX_WRS,\r
IB_INVALID_MAX_SGE,\r
IB_INVALID_CQ_SIZE,\r
+ IB_INVALID_SRQ_SIZE,\r
IB_INVALID_SERVICE_TYPE,\r
IB_INVALID_GID,\r
IB_INVALID_LID,\r
IB_INVALID_AV_HANDLE,\r
IB_INVALID_CQ_HANDLE,\r
IB_INVALID_QP_HANDLE,\r
+ IB_INVALID_SRQ_HANDLE,\r
IB_INVALID_PD_HANDLE,\r
IB_INVALID_MR_HANDLE,\r
IB_INVALID_FMR_HANDLE,\r
IB_VERBS_PROCESSING_DONE, /* See Notes above */\r
IB_INVALID_WR_TYPE,\r
IB_QP_IN_TIMEWAIT,\r
+ IB_EE_IN_TIMEWAIT,\r
IB_INVALID_PORT,\r
IB_NOT_DONE,\r
IB_INVALID_INDEX,\r
IB_AE_PORT_ACTIVE,\r
IB_AE_PORT_DOWN,\r
IB_AE_CLIENT_REREGISTER,\r
+ IB_AE_SRQ_LIMIT_REACHED,\r
+ IB_AE_SRQ_CATAS_ERROR,\r
+ IB_AE_SRQ_QP_LAST_WQE_REACHED,\r
IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */\r
\r
} ib_async_event_t;\r
* IB_AE_CLIENT_REREGISTER\r
* The SM idicate to client to reregister its SA records.\r
*\r
+* IB_AE_SRQ_LIMIT_REACHED\r
+* Reached SRQ low watermark\r
+*\r
+* IB_AE_SRQ_CATAS_ERROR\r
+* An error occurred while processing or accessing the SRQ that prevents\r
+* dequeuing a WQE from the SRQ and reporting of receive completions.\r
+*\r
+* IB_AE_SRQ_QP_LAST_WQE_REACHED\r
+* An event, issued for a QP, associated with a shared receive queue, when\r
+* a CQE is generated for the last WQE, or\r
+* the QP gets in the Error State and there are no more WQEs on the RQ.\r
+*\r
* IB_AE_UNKNOWN\r
* An unknown error occurred which cannot be attributed to any\r
* resource; behavior is indeterminate.\r
uint32_t max_qps_per_mcast_grp;\r
uint32_t max_fmr;\r
uint32_t max_map_per_fmr;\r
+ uint32_t max_srq;\r
+ uint32_t max_srq_wrs;\r
+ uint32_t max_srq_sges;\r
\r
/*\r
* local_ack_delay:\r
boolean_t av_port_check;\r
boolean_t change_primary_port;\r
boolean_t modify_wr_depth;\r
+ boolean_t modify_srq_depth;\r
boolean_t current_qp_state_support;\r
boolean_t shutdown_port_capability;\r
boolean_t init_type_support;\r
* Maximum limit on number of responder resources for incomming RDMA\r
* operations on QPs.\r
*\r
+* max_fmr\r
+* Maximum number of Fast Memory Regions supported.\r
+*\r
+* max_map_per_fmr\r
+* Maximum number of mappings, supported by a Fast Memory Region.\r
+*\r
+* max_srq\r
+* Maximum number of Shared Receive Queues supported.\r
+*\r
+* max_srq_wrs\r
+* Maximum number of work requests supported by this SRQ.\r
+*\r
+* max_srq_sges\r
+* Maximum number of scatter gather elements supported per work request on SRQ.\r
+*\r
* max_resp_res\r
* Maximum number of responder resources per HCA, with this HCA used as\r
* the target.\r
* Indicates ability to modify QP depth during a modify QP operation.\r
* Check the verb specification for permitted states.\r
*\r
+* modify_srq_depth\r
+* Indicates ability to modify SRQ depth during a modify SRQ operation.\r
+* Check the verb specification for permitted states.\r
+*\r
* current_qp_state_support\r
* Indicates ability of the HCA to support the current QP state modifier\r
* during a modify QP operation.\r
IB_QPT_RAW_ETHER,\r
IB_QPT_MAD, /* InfiniBand Access Layer */\r
IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */\r
- IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */\r
-\r
+ IB_QPT_QP1_ALIAS, /* InfiniBand Access Layer */\r
+ IB_QPT_UNKNOWN\r
} ib_qp_type_t;\r
/*\r
* VALUES\r
*****/\r
\r
\r
+/****f* IBA Base: Types/ib_get_qp_type_str\r
+* NAME\r
+* ib_get_qp_type_str\r
+*\r
+* DESCRIPTION\r
+* Returns a string for the specified QP type\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT const char* AL_API\r
+ib_get_qp_type_str(\r
+ IN uint8_t qp_type );\r
+\r
+/*\r
+* PARAMETERS\r
+* qp_type\r
+* [in] Encoded QP type as defined in the\r
+QP attribute.\r
+\r
+* RETURN VALUES\r
+* Pointer to the QP type string.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_qp_type_t\r
+*********/\r
+\r
/****d* Access Layer/ib_access_t\r
* NAME\r
* ib_access_t\r
} ib_apm_state_t;\r
/*****/\r
\r
+/****d* Access Layer/ib_srq_attr_mask_t\r
+* NAME\r
+* ib_srq_attr_mask_t\r
+*\r
+* DESCRIPTION\r
+* Indicates valid fields in ib_srq_attr_t structure\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef enum _ib_srq_attr_mask {\r
+ IB_SRQ_MAX_WR = 1 << 0,\r
+ IB_SRQ_LIMIT = 1 << 1,\r
+} ib_srq_attr_mask_t;\r
+/*****/\r
+\r
+\r
+/****s* Access Layer/ib_srq_attr_t\r
+* NAME\r
+* ib_srq_attr_t\r
+*\r
+* DESCRIPTION\r
+* Attributes used to initialize a shared queue pair at creation time.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ib_srq_attr {\r
+ uint32_t max_wr;\r
+ uint32_t max_sge;\r
+ uint32_t srq_limit;\r
+} ib_srq_attr_t;\r
+/*\r
+* FIELDS\r
+* max_wr\r
+* Specifies the max number of work request on SRQ.\r
+*\r
+* max_sge\r
+* Specifies the max number of scatter/gather elements in one work request.\r
+*\r
+* srq_limit\r
+* Specifies the low water mark for SRQ.\r
+*\r
+* SEE ALSO\r
+* ib_qp_type_t, ib_srq_attr_mask_t\r
+*****/\r
+\r
\r
/****s* Access Layer/ib_qp_create_t\r
* NAME\r
\r
ib_cq_handle_t h_sq_cq;\r
ib_cq_handle_t h_rq_cq;\r
+ ib_srq_handle_t h_srq;\r
\r
boolean_t sq_signaled;\r
\r
* work request completions. This handle must be NULL if the type is\r
* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS.\r
*\r
+* h_srq\r
+* A handle to an SRQ to get receive completions via. Must be coded NULL \r
+* when QP is not associated with SRQ\r
+*\r
* sq_signaled\r
* A flag that is used to indicate whether the queue pair will signal\r
* an event upon completion of a send work request. If set to\r
\r
ib_cq_handle_t h_sq_cq;\r
ib_cq_handle_t h_rq_cq;\r
+ ib_srq_handle_t h_srq;\r
\r
boolean_t sq_signaled;\r
\r
* IB resources provided by HCAs.\r
*********/\r
\r
-#define AL_INTERFACE_VERSION (9)\r
+#define AL_INTERFACE_VERSION (10)\r
\r
\r
\r
(*ib_pfn_destroy_av_t)(\r
IN const ib_av_handle_t h_av );\r
\r
+typedef ib_api_status_t\r
+(*ib_pfn_create_srq_t)(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const void* const srq_context,\r
+ IN const ib_pfn_event_cb_t srq_event_cb OPTIONAL,\r
+ OUT ib_srq_handle_t* const ph_srq );\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_query_srq_t)(\r
+ IN const ib_srq_handle_t h_srq,\r
+ OUT ib_srq_attr_t* const p_srq_attr );\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_modify_srq_t)(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask );\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_destroy_srq_t)(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL );\r
+\r
+typedef ib_api_status_t\r
+(*ib_pfn_post_srq_recv_t)(\r
+ IN const ib_srq_handle_t h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_recv_failure OPTIONAL );\r
+\r
typedef ib_api_status_t\r
(*ib_pfn_create_qp_t)(\r
IN const ib_pd_handle_t h_pd,\r
mlnx_pfn_map_phys_fmr_t map_phys_mlnx_fmr;\r
mlnx_pfn_unmap_fmr_t unmap_mlnx_fmr;\r
mlnx_pfn_destroy_fmr_t destroy_mlnx_fmr;\r
+ ib_pfn_create_srq_t create_srq;\r
+ ib_pfn_query_srq_t query_srq;\r
+ ib_pfn_modify_srq_t modify_srq;\r
+ ib_pfn_destroy_srq_t destroy_srq;\r
+ ib_pfn_post_srq_recv_t post_srq_recv;\r
\r
} ib_al_ifc_t;\r
\r
\r
/********/\r
\r
+/****f* user-mode Verbs/uvp_pre_create_srq\r
+* NAME\r
+* uvp_pre_create_srq -- Pre-ioctl function to Create a Shared Queue Pair.\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(AL_API *uvp_pre_create_srq) (\r
+ IN const ib_pd_handle_t h_uvp_pd,\r
+ IN const ib_srq_attr_t* const p_srq_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_pre_create_srq() is implemented by vendor. It is the pre-ioctl routine\r
+* for ib_create_srq().\r
+*\r
+* PARAMETERS\r
+* h_uvp_pd\r
+* [in] Vendor's Protection domain handle in user-mode library.\r
+* p_srq_attr\r
+* [in] Initial attributes with which the srq must be created.\r
+* p_umv_buf\r
+* [in out] On input, UAL provides this buffer template.\r
+* On return from this function, p_umv_buf contains\r
+* any vendor-specific record to be exchanged with the vendor's\r
+* HCA driver.\r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The pre-ioctl call is successful.\r
+* IB_INVALID_PD_HANDLE\r
+* The PD handle is invalid.\r
+* IB_UNSUPPORTED\r
+* The specified queue pair type was not supported by the channel adapter.\r
+* IB_INVALID_MAX_WRS\r
+* The requested maximum send or receive work request depth could not be\r
+* supported.\r
+* IB_INVALID_MAX_SGE\r
+* The requested maximum number of scatter-gather entries for the send or\r
+* receive queue could not be supported.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources in Vendor library to complete the call.\r
+* IB_INVALID_PARAMETER\r
+* At least one parameter is invalid.\r
+*\r
+* PORTABILITY\r
+* User Mode\r
+*\r
+* SEE ALSO\r
+* uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq,\r
+* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_post_create_srq_t\r
+* NAME\r
+* uvp_post_create_srq_t -- Post-ioctl function to Create a Queue Pair.\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef void\r
+(AL_API *uvp_post_create_srq_t) (\r
+ IN const ib_pd_handle_t h_uvp_pd,\r
+ IN ib_api_status_t ioctl_status,\r
+ OUT ib_srq_handle_t *ph_uvp_srq,\r
+ IN ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_post_create_srq_t() is implemented by vendor. It is the post-ioctl routine\r
+* for ib_create_srq().\r
+*\r
+* PARAMETERS\r
+* h_uvp_pd\r
+* [in] Vendor's Protection domain handle in user-mode library.\r
+* ioctl_status\r
+* [in] The ioctl status of the AL API.\r
+* ph_uvp_srq\r
+* [out] Vendor's srq handle for the newly created srq (in user-mode\r
+* library).\r
+* p_umv_buf\r
+* [in out] On input, it contains any vendor-specific private information\r
+* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_srq).\r
+* Vendor is expected to check vendor-specific status in\r
+* umv_buf as appropriate.\r
+*\r
+* RETURN VALUE\r
+* This function does not return a value.\r
+*\r
+* PORTABILITY\r
+* User Mode\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq,\r
+* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_pre_modify_srq\r
+* NAME\r
+* uvp_pre_modify_srq -- Pre-ioctl function to Modify attributes of the\r
+* specified srq.\r
+*\r
+* SYNOPSIS\r
+*\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(AL_API *uvp_pre_modify_srq) (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN const ib_srq_attr_t * const p_srq_attr,\r
+ IN const ib_srq_attr_mask_t srq_attr_mask,\r
+ IN OUT ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_pre_modify_srq() is implemented by vendor to modify the attributes of a\r
+* srq. It is the pre-ioctl routine for ib_modify_srq().\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's srq Handle to the queue pair (in user-mode library)\r
+* whose state is to be modified.\r
+* p_srq_attr\r
+* [in] Specifies what attributes need to be modified in the srq.\r
+* srq_attr_mask\r
+* [in] Specifies which fields of ib_srq_attr_t are valid.\r
+* p_umv_buf\r
+* [in out] On input, UAL provides this buffer template.\r
+* On return from this function, p_umv_buf contains\r
+* any vendor-specific record to be exchanged with the vendor's\r
+* HCA driver.\r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The pre-ioctl call is successful.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources to complete the requested operation.\r
+* IB_INVALID_SRQ_HANDLE\r
+* Invalid srq handle.\r
+* IB_UNSUPPORTED\r
+* Requested operation is not supported, for e.g. Atomic operations.\r
+*\r
+* PORTABILITY\r
+* User mode\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t,\r
+* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_post_modify_srq_t\r
+* NAME\r
+* uvp_post_modify_srq_t -- Post-ioctl function to Modify attributes of\r
+* the specified srq.\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef void\r
+(AL_API *uvp_post_modify_srq_t) (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status,\r
+ IN ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_post_modify_srq_t() is implemented by vendor to modify the srq attributes.\r
+* It is the post-ioctl routine for ib_modify_srq().\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's srq Handle to the queue pair (in user-mode library)\r
+* whose state is modified.\r
+* ioctl_status\r
+* [in] The ioctl status of the AL API.\r
+* p_umv_buf\r
+* [in out] On input, it contains any vendor-specific private information\r
+* exchanged with the vendor's Verbs Provider Driver (uvp_pre_modify_srq).\r
+* Vendor is expected to check vendor-specific status in\r
+* umv_buf as appropriate.\r
+*\r
+* RETURN VALUE\r
+* This function does not return a value.\r
+*\r
+* PORTABILITY\r
+* User mode\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t,\r
+* uvp_pre_modify_srq, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_pre_query_srq\r
+* NAME\r
+* uvp_pre_query_srq -- Pre-ioctl function to Query the attributes of the srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(AL_API *uvp_pre_query_srq) (\r
+ IN ib_srq_handle_t h_uvp_srq,\r
+ IN OUT ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_pre_query_srq() is implemented by vendor. It is the pre-ioctl routine\r
+* for the AL call ib_query_srq().\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's handle to the srq (in user-mode library).\r
+* p_umv_buf\r
+* [in out] On input, UAL provides this buffer template.\r
+* On return from this function, p_umv_buf contains\r
+* any vendor-specific record to be exchanged with the vendor's\r
+* HCA driver.\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The pre-ioctl function succeeded.\r
+* IB_INVALID_SRQ_HANDLE\r
+* srq handle is invalid\r
+* IB_INSUFFICIENT_RESOURCES\r
+* Insufficient resources in Vendor library to complete the call.\r
+*\r
+* PORTABILITY\r
+* User Mode\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_post_query_srq_t, uvp_pre_modify_srq,\r
+* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+*********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_post_query_srq_t\r
+* NAME\r
+* uvp_post_query_srq_t -- Post-ioctl operation for user-mode ib_query_srq()\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef void\r
+(AL_API *uvp_post_query_srq_t) (\r
+ IN ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status,\r
+ IN ib_srq_attr_t *p_query_attr,\r
+ IN ci_umv_buf_t *p_umv_buf );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_post_query_srq_t() is implemented by vendor. It is the post-ioctl routine\r
+* for ib_query_srq().\r
+* UAL provides the results of the query to the vendor library in this\r
+* post-ioctl routine.\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's handle to the srq (in user-mode library).\r
+* ioctl_status\r
+* [in] The ioctl status of the AL API.\r
+* p_query_attr\r
+* [in] srq attribute as returned by the ioctl.\r
+* p_umv_buf\r
+* [in out] On input, it contains any vendor-specific private information\r
+* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_srq).\r
+* Vendor is expected to check vendor-specific status in\r
+* umv_buf as appropriate.\r
+*\r
+* RETURN VALUE\r
+* This function does not return a value.\r
+*\r
+* PORTABILITY\r
+* User Mode\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_pre_modify_srq,\r
+* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t\r
+*\r
+*********/\r
+\r
+/********/\r
+\r
+/****f* user-mode Verbs/uvp_pre_destroy_srq\r
+* NAME\r
+* uvp_pre_destroy_srq -- Pre-ioctl function to Destroy a Queue Pair.\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(AL_API *uvp_pre_destroy_srq) (\r
+ IN const ib_srq_handle_t h_uvp_srq );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_pre_destroy_srq() is the pre-ioctl routine implemented by vendor\r
+* to destroy srq.\r
+* UAL invokes this pre-ioctl routine to destroy srq.\r
+* The vendor is expected to perform any preliminary steps in preparation\r
+* for destroying the srq and perform any book-keeping.\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's Handle to the srq (in user-mode library)\r
+* that needs to be destroyed.\r
+* p_umv_buf\r
+* [in out] On input, UAL provides this buffer template.\r
+* On return from this function, p_umv_buf contains\r
+* any vendor-specific record to be exchanged with the vendor's\r
+* HCA driver.\r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The pre-ioctl call is successful.\r
+*\r
+* PORTABILITY\r
+* User mode.\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t,\r
+* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_post_destroy_srq_t\r
+*\r
+********/\r
+\r
+/********/\r
+\r
+\r
+/****f* user-mode Verbs/uvp_post_destroy_srq_t\r
+* NAME\r
+* uvp_post_destroy_srq_t -- Post-ioctl function to Destroy a Queue Pair.\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef void\r
+(AL_API *uvp_post_destroy_srq_t) (\r
+ IN const ib_srq_handle_t h_uvp_srq,\r
+ IN ib_api_status_t ioctl_status );\r
+\r
+/*\r
+* DESCRIPTION\r
+* uvp_post_destroy_srq_t() is implemented by vendor. It is the post-ioctl\r
+* routine for ib_destroy_srq().\r
+* UAL invokes this post-ioctl routine to destroy srq when it receives\r
+* asynchronous notification from the user-mode proxy in kernel.\r
+*\r
+* PARAMETERS\r
+* h_uvp_srq\r
+* [in] Vendor's Handle to the srq (in user-mode library)\r
+* that needs to be destroyed.\r
+*\r
+* RETURN VALUE\r
+* IB_SUCCESS\r
+* The post-ioctl call is successful.\r
+*\r
+* PORTABILITY\r
+* User mode.\r
+*\r
+* SEE ALSO\r
+* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t,\r
+* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_pre_destroy_srq\r
+*\r
+********/\r
+\r
+/********/\r
+\r
/****f* user-mode Verbs/uvp_pre_create_qp\r
* NAME\r
* uvp_pre_create_qp -- Pre-ioctl function to Create a Queue Pair.\r
\r
/********/\r
\r
+/****f* user-mode Verbs/uvp_post_srq_recv\r
+* NAME\r
+* uvp_post_srq_recv -- Post a work request to the shared receive queue of a queue pair.\r
+*\r
+* SYNOPSIS\r
+*/\r
+\r
+typedef ib_api_status_t\r
+(AL_API *uvp_post_srq_recv) (\r
+ IN const void* __ptr64 h_srq,\r
+ IN ib_recv_wr_t* const p_recv_wr,\r
+ OUT ib_recv_wr_t** pp_recv_failure );\r
+\r
+/*\r
+* DESCRIPTION\r
+* This routine allows to queue a work request to the receive side of a shared\r
+* queue pair. The work_req holds necessary data to satisfy an incoming\r
+* receive message. If an attempt is made to queue more work requests than\r
+* what is available, an error is returned.\r
+*\r
+* PARAMETERS\r
+* h_srq\r
+* [in] Type-cast as appropriate for user/kernel mode, this is\r
+* the shared Queue pair handle to which the receive work request is being\r
+* posted.\r
+* p_recv_wr\r
+* [in] List of recv work requests that needs to be posted.\r
+* pp_recv_failure\r
+* [out] The work requests that failed.\r
+\r
+* RETURN VALUE\r
+* Any unsuccessful status indicates the status of the first failed request.\r
+*\r
+* IB_SUCCESS\r
+* The work request was successfully queued to the receive side of the QP.\r
+* IB_INVALID_SRQ_HANDLE\r
+* srq_handle supplied is not valid.\r
+* IB_INSUFFICIENT_RESOURCES\r
+* The qp has exceeded its receive queue depth than what is has been\r
+* configured.\r
+* IB_INVALID_WR_TYPE\r
+* Invalid work request type found in the request.\r
+*\r
+* PORTABILITY\r
+* Kernel & User mode.\r
+*\r
+* SEE ALSO\r
+*\r
+********/\r
+\r
+/********/\r
+\r
/****f* user-mode Verbs/uvp_peek_cq\r
* NAME\r
* uvp_peek_cq\r
uvp_pre_destroy_av pre_destroy_av;\r
uvp_post_destroy_av_t post_destroy_av;\r
\r
+ /*\r
+ * SRQ Management Verbs\r
+ */\r
+ uvp_pre_create_srq pre_create_srq;\r
+ uvp_post_create_srq_t post_create_srq;\r
+\r
+ uvp_pre_modify_srq pre_modify_srq;\r
+ uvp_post_modify_srq_t post_modify_srq;\r
+\r
+ uvp_pre_query_srq pre_query_srq;\r
+ uvp_post_query_srq_t post_query_srq;\r
+\r
+ uvp_pre_destroy_srq pre_destroy_srq;\r
+ uvp_post_destroy_srq_t post_destroy_srq;\r
+\r
+\r
/*\r
* QP Management Verbs\r
*/\r
*/\r
uvp_post_send post_send;\r
uvp_post_recv post_recv;\r
+ uvp_post_srq_recv post_srq_recv;\r
\r
/*\r
* Completion Processing and\r
/*\r
* Create QP Attributes\r
*/\r
+ cl_memclr(&qp_create, sizeof(ib_qp_create_t));\r
qp_create.sq_depth= 1;\r
qp_create.rq_depth= 1;\r
qp_create.sq_sge = 1;\r
}\r
\r
/* Create a qp */\r
+ cl_memclr(&qp_create, sizeof(ib_qp_create_t));\r
qp_create.qp_type = IB_QPT_RELIABLE_CONN;\r
qp_create.h_rdd = NULL;\r
qp_create.sq_depth = 255;\r
}\r
\r
/* Create a qp */\r
+ cl_memclr(&qp_create, sizeof(ib_qp_create_t));\r
qp_create.qp_type = IB_QPT_RELIABLE_CONN;\r
qp_create.h_rdd = NULL;\r
qp_create.sq_depth = 255;\r
printPortMTU(portPtr->mtu);\r
if(fullPrint){\r
printf("\t\tmax_msg_sz=0x%x (Max message size)\n", portPtr->max_msg_size);\r
- printf("\t\tcapability_mask=TBD\n");\r
+ printf("\t\tcapability_mask=0x%x (Port capability mask)\n", portPtr->cap);\r
printf("\t\tmax_vl_num=0x%x (Maximum number of VL supported by this port)\n", portPtr->max_vls);\r
printf("\t\tbad_pkey_counter=0x%x (Bad PKey counter)\n", portPtr->pkey_ctr);\r
printf("\t\tqkey_viol_counter=0x%x (QKey violation counter)\n", portPtr->qkey_ctr);\r
printf("\tmax_qp_ous_wr = 0x%x (Maximum Number of outstanding WR on any WQ)\n", ca_attr->max_wrs);\r
printf("\tmax_num_sg_ent = 0x%x (Max num of scatter/gather entries for WQE other than RD)\n", ca_attr->max_sges);\r
printf("\tmax_num_sg_ent_rd = 0x%x (Max num of scatter/gather entries for RD WQE)\n", ca_attr->max_rd_sges);\r
- printf("\tmax_num_srq = 0 (Maximum Number of SRQs supported)\n");\r
- printf("\tmax_wqe_per_srq = 0 (Maximum Number of outstanding WR on any SRQ)\n");\r
- printf("\tmax_srq_sentries = 0 (Maximum Number of scatter/gather entries for SRQ WQE)\n");\r
- printf("\tsrq_resize_supported = 0 (SRQ resize supported)\n");\r
+ printf("\tmax_num_srq = 0x%x (Maximum Number of SRQs supported)\n", ca_attr->max_srq);\r
+ printf("\tmax_wqe_per_srq = 0x%x (Maximum Number of outstanding WR on any SRQ)\n", ca_attr->max_srq_wrs);\r
+ printf("\tmax_srq_sentries = 0x%x (Maximum Number of scatter/gather entries for SRQ WQE)\n", ca_attr->max_srq_sges);\r
+ printf("\tsrq_resize_supported = %d (SRQ resize supported)\n", ca_attr->modify_srq_depth);\r
printf("\tmax_num_cq = 0x%x (Max num of supported CQs)\n", ca_attr->max_cqs);\r
printf("\tmax_num_ent_cq = 0x%x (Max num of supported entries per CQ)\n", ca_attr->max_cqes);\r
printf("\tmax_num_mr = 0x%x (Maximum number of memory region supported)\n", ca_attr->init_regions);\r
* Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
*/
+
#if !defined(__IB_TYPES_H__)
#define __IB_TYPES_H__
#define __ptr64
#endif
-
-
/****h* IBA Base/Constants
* NAME
* Constants
IN const uint8_t class_code )
{
return( (class_code >= IB_MCLASS_VENDOR_LOW_RANGE_MIN) &&
- (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX)) ;
+ (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX) );
}
/*
* PARAMETERS
IN const uint8_t class_code )
{
return( (class_code >= IB_MCLASS_VENDOR_HIGH_RANGE_MIN) &&
- (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX)) ;
+ (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX) );
}
/*
* PARAMETERS
/*
* MAD methods
*/
+
/****d* IBA Base: Constants/IB_MAX_METHOD
* NAME
* IB_MAX_METHOD
*/
#define IB_MAX_METHODS 128
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_RESP_MASK
* NAME
* IB_MAD_METHOD_RESP_MASK
*/
#define IB_MAD_METHOD_RESP_MASK 0x80
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_GET
* NAME
* IB_MAD_METHOD_GET
*/
#define IB_MAD_METHOD_GET 0x01
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_SET
* NAME
* IB_MAD_METHOD_SET
*/
#define IB_MAD_METHOD_SET 0x02
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_GET_RESP
* NAME
* IB_MAD_METHOD_GET_RESP
*/
#define IB_MAD_METHOD_SEND 0x03
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP
* NAME
* IB_MAD_METHOD_TRAP
*/
#define IB_MAD_METHOD_TRAP 0x05
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT
* NAME
* IB_MAD_METHOD_REPORT
*/
#define IB_MAD_METHOD_REPORT 0x06
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP
* NAME
* IB_MAD_METHOD_REPORT_RESP
*/
#define IB_MAD_METHOD_REPORT_RESP 0x86
/**********/
+
/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS
* NAME
* IB_MAD_METHOD_TRAP_REPRESS
*/
#define IB_MAD_METHOD_TRAP_REPRESS 0x07
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_BUSY
* NAME
* IB_MAD_STATUS_BUSY
*
* SOURCE
*/
-#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001))
+#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_REDIRECT
* NAME
* IB_MAD_STATUS_REDIRECT
*/
#define IB_MAD_STATUS_REDIRECT (CL_HTON16(0x0002))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER
* NAME
* IB_MAD_STATUS_UNSUP_CLASS_VER
*
* SOURCE
*/
-#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004))
+#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD
* NAME
* IB_MAD_STATUS_UNSUP_METHOD
*/
#define IB_MAD_STATUS_UNSUP_METHOD (CL_HTON16(0x0008))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR
* NAME
* IB_MAD_STATUS_UNSUP_METHOD_ATTR
*
* SOURCE
*/
-#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C))
+#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD
* NAME
* IB_MAD_STATUS_INVALID_FIELD
#define IB_MAD_STATUS_CLASS_MASK (CL_HTON16(0xFF00))
#define IB_SA_MAD_STATUS_SUCCESS (CL_HTON16(0x0000))
-#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100))
-#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200))
+#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100))
+#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200))
#define IB_SA_MAD_STATUS_NO_RECORDS (CL_HTON16(0x0300))
-#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400))
-#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500))
-#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600))
+#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400))
+#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500))
+#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600))
-#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100))
-#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200))
-#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000))
+#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100))
+#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200))
+#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000))
/****d* IBA Base: Constants/IB_MAD_ATTR_CLASS_PORT_INFO
* NAME
*/
#define IB_MAD_ATTR_CLASS_PORT_INFO (CL_NTOH16(0x0001))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_NOTICE
* NAME
* IB_MAD_ATTR_NOTICE
*/
#define IB_MAD_ATTR_NOTICE (CL_NTOH16(0x0002))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO
* NAME
* IB_MAD_ATTR_INFORM_INFO
*/
#define IB_MAD_ATTR_INFORM_INFO (CL_NTOH16(0x0003))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_DESC
* NAME
* IB_MAD_ATTR_NODE_DESC
* SOURCE
*/
#define IB_MAD_ATTR_NODE_DESC (CL_NTOH16(0x0010))
+
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL
* NAME
* IB_MAD_ATTR_PORT_SMPL_CTRL
*/
#define IB_MAD_ATTR_PORT_SMPL_CTRL (CL_NTOH16(0x0010))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_INFO
* NAME
* IB_MAD_ATTR_NODE_INFO
*/
#define IB_MAD_ATTR_NODE_INFO (CL_NTOH16(0x0011))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT
* NAME
* IB_MAD_ATTR_PORT_SMPL_RSLT
*/
#define IB_MAD_ATTR_PORT_SMPL_RSLT (CL_NTOH16(0x0011))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO
* NAME
* IB_MAD_ATTR_SWITCH_INFO
*/
#define IB_MAD_ATTR_SWITCH_INFO (CL_NTOH16(0x0012))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS
* NAME
* IB_MAD_ATTR_PORT_CNTRS
*/
#define IB_MAD_ATTR_PORT_CNTRS (CL_NTOH16(0x0012))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_GUID_INFO
* NAME
* IB_MAD_ATTR_GUID_INFO
*/
#define IB_MAD_ATTR_GUID_INFO (CL_NTOH16(0x0014))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_INFO
* NAME
* IB_MAD_ATTR_PORT_INFO
*/
#define IB_MAD_ATTR_PORT_INFO (CL_NTOH16(0x0015))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE
* NAME
* IB_MAD_ATTR_P_KEY_TABLE
*/
#define IB_MAD_ATTR_P_KEY_TABLE (CL_NTOH16(0x0016))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE
* NAME
* IB_MAD_ATTR_SLVL_TABLE
*/
#define IB_MAD_ATTR_SLVL_TABLE (CL_NTOH16(0x0017))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION
* NAME
* IB_MAD_ATTR_VL_ARBITRATION
*/
#define IB_MAD_ATTR_VL_ARBITRATION (CL_NTOH16(0x0018))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL
* NAME
* IB_MAD_ATTR_LIN_FWD_TBL
*/
#define IB_MAD_ATTR_LIN_FWD_TBL (CL_NTOH16(0x0019))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL
* NAME
* IB_MAD_ATTR_RND_FWD_TBL
*/
#define IB_MAD_ATTR_RND_FWD_TBL (CL_NTOH16(0x001A))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL
* NAME
* IB_MAD_ATTR_MCAST_FWD_TBL
*/
#define IB_MAD_ATTR_NODE_RECORD (CL_NTOH16(0x0011))
/**********/
+
/****d* IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD
* NAME
* IB_MAD_ATTR_PORTINFO_RECORD
#define IB_MAD_ATTR_PORTINFO_RECORD (CL_NTOH16(0x0012))
/**********/
-
/****d* IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD
* NAME
* IB_MAD_ATTR_LINK_RECORD
*
* SOURCE
*/
-#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020))
+#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020))
/**********/
/****d* IBA Base: Constants/IB_MAD_ATTR_SMINFO_RECORD
* IB_MAD_ATTR_SMINFO_RECORD
*
* DESCRIPTION
-* SmInfoRecord attribute (15.2.5)
+* SMInfoRecord attribute (15.2.5)
*
* SOURCE
*/
*
* SOURCE
*/
-#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015))
+#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015))
/**********/
/****d* IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD
* SOURCE
*/
#define IB_PATH_SELECTOR_GREATER_THAN 0
-#define IB_PATH_SELECTOR_LESS_THAN 1
-#define IB_PATH_SELECTOR_EXACTLY 2
-#define IB_PATH_SELECTOR_LARGEST 3
+#define IB_PATH_SELECTOR_LESS_THAN 1
+#define IB_PATH_SELECTOR_EXACTLY 2
+#define IB_PATH_SELECTOR_LARGEST 3
/**********/
/****d* IBA Base: Constants/IB_SMINFO_STATE_NOTACTIVE
*/
#define IB_SMINFO_STATE_NOTACTIVE 0
/**********/
+
/****d* IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING
* NAME
* IB_SMINFO_STATE_DISCOVERING
*/
#define IB_SMINFO_STATE_DISCOVERING 1
/**********/
+
/****d* IBA Base: Constants/IB_SMINFO_STATE_STANDBY
* NAME
* IB_SMINFO_STATE_STANDBY
*/
#define IB_SMINFO_STATE_STANDBY 2
/**********/
+
/****d* IBA Base: Constants/IB_SMINFO_STATE_MASTER
* NAME
* IB_SMINFO_STATE_MASTER
*/
#define IB_SMINFO_STATE_MASTER 3
/**********/
+
/****d* IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK
* NAME
* IB_PATH_REC_SELECTOR_MASK
*/
#define IB_PATH_REC_SELECTOR_MASK 0xC0
/**********/
+
/****d* IBA Base: Constants/IB_PATH_REC_BASE_MASK
* NAME
* IB_PATH_REC_BASE_MASK
* Definitions are from the InfiniBand Architecture Specification v1.2
*
*********/
+
/****d* IBA Base: Types/ib_net16_t
* NAME
* ib_net16_t
*/
typedef uint16_t ib_net16_t;
/**********/
+
/****d* IBA Base: Types/ib_net32_t
* NAME
* ib_net32_t
*/
typedef uint32_t ib_net32_t;
/**********/
+
/****d* IBA Base: Types/ib_net64_t
* NAME
* ib_net64_t
*/
typedef uint64_t ib_net64_t;
/**********/
+
/****d* IBA Base: Types/ib_gid_prefix_t
* NAME
* ib_gid_prefix_t
*/
#define IB_LINK_NO_CHANGE 0
#define IB_LINK_DOWN 1
-#define IB_LINK_INIT 2
+#define IB_LINK_INIT 2
#define IB_LINK_ARMED 3
#define IB_LINK_ACTIVE 4
#define IB_LINK_ACT_DEFER 5
ib_get_port_state_from_str(
IN char* p_port_state_str )
{
- if( !strncmp(p_port_state_str,"No State Change (NOP)",12) )
+ if( !strncmp(p_port_state_str,"No State Change (NOP)", 12) )
return(0);
- else if( !strncmp(p_port_state_str, "DOWN",4) )
+ else if( !strncmp(p_port_state_str, "DOWN", 4) )
return(1);
else if( !strncmp(p_port_state_str, "INIT", 4) )
return(2);
- else if( !strncmp(p_port_state_str,"ARMED" , 5) )
+ else if( !strncmp(p_port_state_str, "ARMED" , 5) )
return(3);
else if( !strncmp(p_port_state_str, "ACTIVE", 6) )
return(4);
*
* SOURCE
*/
-#define IB_JOIN_STATE_FULL 1
-#define IB_JOIN_STATE_NON 2
+#define IB_JOIN_STATE_FULL 1
+#define IB_JOIN_STATE_NON 2
#define IB_JOIN_STATE_SEND_ONLY 4
/**********/
*/
AL_INLINE ib_net16_t AL_API
ib_pkey_get_base(
- IN const ib_net16_t pkey )
+ IN const ib_net16_t pkey )
{
return( (ib_net16_t)(pkey & IB_PKEY_BASE_MASK) );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_pkey_is_full_member
* NAME
* ib_pkey_is_full_member
*/
AL_INLINE boolean_t AL_API
ib_pkey_is_full_member(
- IN const ib_net16_t pkey )
+ IN const ib_net16_t pkey )
{
return( (pkey & IB_PKEY_TYPE_MASK) == IB_PKEY_TYPE_MASK );
}
*/
OSM_INLINE boolean_t AL_API
ib_pkey_is_invalid(
- IN const ib_net16_t pkey )
+ IN const ib_net16_t pkey )
{
if (ib_pkey_get_base(pkey) == 0x0000)
return TRUE;
#include <complib/cl_packon.h>
typedef union _ib_gid
{
- uint8_t raw[16];
+ uint8_t raw[16];
struct _ib_gid_unicast
{
ib_gid_prefix_t prefix;
- ib_net64_t interface_id;
+ ib_net64_t interface_id;
} PACK_SUFFIX unicast;
struct _ib_gid_multicast
{
- uint8_t header[2];
- uint8_t raw_group_id[14];
+ uint8_t header[2];
+ uint8_t raw_group_id[14];
} PACK_SUFFIX multicast;
AL_INLINE boolean_t AL_API
ib_gid_is_multicast(
- IN const ib_gid_t* p_gid )
+ IN const ib_gid_t* p_gid )
{
return( p_gid->raw[0] == 0xFF );
}
*/
AL_INLINE void AL_API
ib_gid_set_default(
- IN ib_gid_t* const p_gid,
- IN const ib_net64_t interface_id )
+ IN ib_gid_t* const p_gid,
+ IN const ib_net64_t interface_id )
{
p_gid->unicast.prefix = IB_DEFAULT_SUBNET_PREFIX;
p_gid->unicast.interface_id = interface_id;
* SEE ALSO
* ib_gid_t
*********/
+
/****f* IBA Base: Types/ib_gid_get_subnet_prefix
* NAME
* ib_gid_get_subnet_prefix
*/
AL_INLINE ib_net64_t AL_API
ib_gid_get_subnet_prefix(
- IN const ib_gid_t* const p_gid )
+ IN const ib_gid_t* const p_gid )
{
return( p_gid->unicast.prefix );
}
* SEE ALSO
* ib_gid_t
*********/
+
/****f* IBA Base: Types/ib_gid_is_link_local
* NAME
* ib_gid_is_link_local
*/
AL_INLINE boolean_t AL_API
ib_gid_is_link_local(
- IN const ib_gid_t* const p_gid )
+ IN const ib_gid_t* const p_gid )
{
return( ib_gid_get_subnet_prefix( p_gid ) == IB_DEFAULT_SUBNET_PREFIX );
}
* SEE ALSO
* ib_gid_t
*********/
+
/****f* IBA Base: Types/ib_gid_is_site_local
* NAME
* ib_gid_is_site_local
*/
AL_INLINE boolean_t AL_API
ib_gid_is_site_local(
- IN const ib_gid_t* const p_gid )
+ IN const ib_gid_t* const p_gid )
{
return( ( ib_gid_get_subnet_prefix( p_gid ) &
CL_HTON64( 0xFFFFFFFFFFFF0000ULL ) ) == CL_HTON64( 0xFEC0000000000000ULL ) );
* SEE ALSO
* ib_gid_t
*********/
+
/****f* IBA Base: Types/ib_gid_get_guid
* NAME
* ib_gid_get_guid
*/
AL_INLINE ib_net64_t AL_API
ib_gid_get_guid(
- IN const ib_gid_t* const p_gid )
+ IN const ib_gid_t* const p_gid )
{
return( p_gid->unicast.interface_id );
}
#define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3))
#define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4))
#define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5))
-#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6))
+#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6))
#define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7))
#define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8))
#define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9))
#define IB_LR_COMPMASK_FROM_LID (CL_HTON64(((uint64_t)1)<<0))
#define IB_LR_COMPMASK_FROM_PORT (CL_HTON64(((uint64_t)1)<<1))
#define IB_LR_COMPMASK_TO_PORT (CL_HTON64(((uint64_t)1)<<2))
-#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3))
+#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3))
/* VL Arbitration Record MASKs */
#define IB_VLA_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
#define IB_NR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1))
#define IB_NR_COMPMASK_BASEVERSION (CL_HTON64(((uint64_t)1)<<2))
#define IB_NR_COMPMASK_CLASSVERSION (CL_HTON64(((uint64_t)1)<<3))
-#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4))
-#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5))
-#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6))
-#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7))
-#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8))
-#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9))
-#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10))
-#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11))
-#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12))
-#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13))
-#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14))
+#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4))
+#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5))
+#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6))
+#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7))
+#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8))
+#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9))
+#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10))
+#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11))
+#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12))
+#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13))
+#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14))
/* Service Record Component Masks Sec 15.2.5.14 Ver 1.1*/
#define IB_SR_COMPMASK_SID (CL_HTON64(((uint64_t)1)<<0))
#define IB_SR_COMPMASK_SDATA64_1 (CL_HTON64(((uint64_t)1)<<36))
/* Port Info Record Component Masks */
-#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
-#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1))
-#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2))
-#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3))
-#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4))
-#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5))
-#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6))
-#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7))
-#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8))
-#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9))
-#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10))
-#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11))
-#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12))
-#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13))
-#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14))
-#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15))
-#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16))
-#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17))
-#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18))
-#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19))
-#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20))
-#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21))
-#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22))
-#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23))
-#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24))
-#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25))
-#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26))
-#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27))
-#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28))
-#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29))
-#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30))
-#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31))
-#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32))
-#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33))
-#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34))
-#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35))
-#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36))
-#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37))
-#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38))
-#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39))
-#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40))
-#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41))
-#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42))
-#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43))
-#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44))
-#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45))
-#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46))
-#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47))
-#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48))
+#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
+#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1))
+#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2))
+#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3))
+#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4))
+#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5))
+#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6))
+#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7))
+#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8))
+#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9))
+#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10))
+#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11))
+#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12))
+#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13))
+#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14))
+#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15))
+#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16))
+#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17))
+#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18))
+#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19))
+#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20))
+#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21))
+#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22))
+#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23))
+#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24))
+#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25))
+#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26))
+#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27))
+#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28))
+#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29))
+#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30))
+#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31))
+#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32))
+#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33))
+#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34))
+#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35))
+#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36))
+#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37))
+#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38))
+#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39))
+#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40))
+#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41))
+#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42))
+#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43))
+#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44))
+#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45))
+#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46))
+#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47))
+#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48))
/* Multicast Member Record Component Masks */
#define IB_MCR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<0))
*/
AL_INLINE void AL_API
ib_path_rec_init_local(
- IN ib_path_rec_t* const p_rec,
- IN ib_gid_t* const p_dgid,
- IN ib_gid_t* const p_sgid,
- IN ib_net16_t dlid,
- IN ib_net16_t slid,
- IN uint8_t num_path,
- IN ib_net16_t pkey,
- IN uint8_t sl,
- IN uint8_t mtu_selector,
- IN uint8_t mtu,
- IN uint8_t rate_selector,
- IN uint8_t rate,
- IN uint8_t pkt_life_selector,
- IN uint8_t pkt_life,
- IN uint8_t preference )
+ IN ib_path_rec_t* const p_rec,
+ IN ib_gid_t* const p_dgid,
+ IN ib_gid_t* const p_sgid,
+ IN ib_net16_t dlid,
+ IN ib_net16_t slid,
+ IN uint8_t num_path,
+ IN ib_net16_t pkey,
+ IN uint8_t sl,
+ IN uint8_t mtu_selector,
+ IN uint8_t mtu,
+ IN uint8_t rate_selector,
+ IN uint8_t rate,
+ IN uint8_t pkt_life_selector,
+ IN uint8_t pkt_life,
+ IN uint8_t preference )
{
p_rec->dgid = *p_dgid;
p_rec->sgid = *p_sgid;
*/
AL_INLINE uint8_t AL_API
ib_path_rec_num_path(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( p_rec->num_path &0x7F );
}
* [in] Pointer to the path record object.
*
* RETURN VALUES
-* Maximum number of paths toreturn for each unique SGID_DGID combination.
+* Maximum number of paths to return for each unique SGID_DGID combination.
*
* NOTES
*
*/
AL_INLINE uint8_t AL_API
ib_path_rec_sl(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_mtu(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)(p_rec->mtu & IB_PATH_REC_BASE_MASK) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_mtu_sel(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)((p_rec->mtu & IB_PATH_REC_SELECTOR_MASK) >> 6) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_rate(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)(p_rec->rate & IB_PATH_REC_BASE_MASK) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_rate_sel(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)((p_rec->rate & IB_PATH_REC_SELECTOR_MASK) >> 6) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_pkt_life(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)(p_rec->pkt_life & IB_PATH_REC_BASE_MASK) );
}
* [in] Pointer to the path record object.
*
* RETURN VALUES
-* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime.
+* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime.
*
* NOTES
*
*/
AL_INLINE uint8_t AL_API
ib_path_rec_pkt_life_sel(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)((p_rec->pkt_life & IB_PATH_REC_SELECTOR_MASK) >> 6 ));
}
*/
AL_INLINE uint32_t AL_API
ib_path_rec_flow_lbl(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( ((cl_ntoh32(p_rec->hop_flow_raw) >> 8) & 0x000FFFFF) );
}
*/
AL_INLINE uint8_t AL_API
ib_path_rec_hop_limit(
- IN const ib_path_rec_t* const p_rec )
+ IN const ib_path_rec_t* const p_rec )
{
return( (uint8_t)(p_rec->hop_flow_raw & 0x000000FF ) );
}
*/
#define IB_CLASS_CAP_TRAP 0x0001
/*********/
+
/****s* IBA Base: Constants/IB_CLASS_CAP_GETSET
* NAME
* IB_CLASS_CAP_GETSET
*/
#define IB_CLASS_CAP_GETSET 0x0002
/*********/
+
/****s* IBA Base: Constants/IB_CLASS_RESP_TIME_MASK
* NAME
* IB_CLASS_RESP_TIME_MASK
* IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP
*
*********/
+
/****s* IBA Base: Types/ib_sm_info_t
* NAME
* ib_sm_info_t
*/
AL_INLINE uint8_t AL_API
ib_sminfo_get_priority(
- IN const ib_sm_info_t* const p_smi )
+ IN const ib_sm_info_t* const p_smi )
{
return( (uint8_t)((p_smi->pri_state & 0xF0)>>4) );
}
*/
AL_INLINE uint8_t AL_API
ib_sminfo_get_state(
- IN const ib_sm_info_t* const p_smi )
+ IN const ib_sm_info_t* const p_smi )
{
return( (uint8_t)(p_smi->pri_state & 0x0F) );
}
*/
AL_INLINE void AL_API
ib_mad_init_new(
- IN ib_mad_t* const p_mad,
- IN const uint8_t mgmt_class,
- IN const uint8_t class_ver,
- IN const uint8_t method,
- IN const ib_net64_t trans_id,
- IN const ib_net16_t attr_id,
- IN const ib_net32_t attr_mod )
+ IN ib_mad_t* const p_mad,
+ IN const uint8_t mgmt_class,
+ IN const uint8_t class_ver,
+ IN const uint8_t method,
+ IN const ib_net64_t trans_id,
+ IN const ib_net16_t attr_id,
+ IN const ib_net32_t attr_mod )
{
CL_ASSERT( p_mad );
p_mad->base_ver = 1;
*/
AL_INLINE void AL_API
ib_mad_init_response(
- IN const ib_mad_t* const p_req_mad,
- IN ib_mad_t* const p_mad,
- IN const ib_net16_t status )
+ IN const ib_mad_t* const p_req_mad,
+ IN ib_mad_t* const p_mad,
+ IN const ib_net16_t status )
{
CL_ASSERT( p_req_mad );
CL_ASSERT( p_mad );
*/
AL_INLINE boolean_t AL_API
ib_mad_is_response(
- IN const ib_mad_t* const p_mad )
+ IN const ib_mad_t* const p_mad )
{
CL_ASSERT( p_mad );
return( (p_mad->method & IB_MAD_METHOD_RESP_MASK) ==
* ib_mad_t
*********/
-
-#define IB_RMPP_TYPE_DATA 1
-#define IB_RMPP_TYPE_ACK 2
-#define IB_RMPP_TYPE_STOP 3
-#define IB_RMPP_TYPE_ABORT 4
+#define IB_RMPP_TYPE_DATA 1
+#define IB_RMPP_TYPE_ACK 2
+#define IB_RMPP_TYPE_STOP 3
+#define IB_RMPP_TYPE_ABORT 4
#define IB_RMPP_NO_RESP_TIME 0x1F
-#define IB_RMPP_FLAG_ACTIVE 0x01
-#define IB_RMPP_FLAG_FIRST 0x02
-#define IB_RMPP_FLAG_LAST 0x04
+#define IB_RMPP_FLAG_ACTIVE 0x01
+#define IB_RMPP_FLAG_FIRST 0x02
+#define IB_RMPP_FLAG_LAST 0x04
#define IB_RMPP_STATUS_SUCCESS 0
-#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */
-#define IB_RMPP_STATUS_T2L 118 /* time too long */
+#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */
+#define IB_RMPP_STATUS_T2L 118 /* time too long */
#define IB_RMPP_STATUS_BAD_LEN 119 /* incon. last and payload len */
#define IB_RMPP_STATUS_BAD_SEG 120 /* incon. first and segment no */
-#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */
-#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */
-#define IB_RMPP_STATUS_S2B 123 /* segment no too big */
+#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */
+#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */
+#define IB_RMPP_STATUS_S2B 123 /* segment no too big */
#define IB_RMPP_STATUS_BAD_STATUS 124 /* illegal status */
-#define IB_RMPP_STATUS_UNV 125 /* unsupported version */
-#define IB_RMPP_STATUS_TMR 126 /* too many retries */
+#define IB_RMPP_STATUS_UNV 125 /* unsupported version */
+#define IB_RMPP_STATUS_TMR 126 /* too many retries */
#define IB_RMPP_STATUS_UNSPEC 127 /* unspecified */
-
/****f* IBA Base: Types/ib_rmpp_is_flag_set
* NAME
* ib_rmpp_is_flag_set
*/
AL_INLINE boolean_t AL_API
ib_rmpp_is_flag_set(
- IN const ib_rmpp_mad_t* const p_rmpp_mad,
- IN const uint8_t flag )
+ IN const ib_rmpp_mad_t* const p_rmpp_mad,
+ IN const uint8_t flag )
{
CL_ASSERT( p_rmpp_mad );
return( (p_rmpp_mad->rmpp_flags & flag) == flag );
AL_INLINE void AL_API
ib_rmpp_set_resp_time(
- IN ib_rmpp_mad_t* const p_rmpp_mad,
- IN const uint8_t resp_time )
+ IN ib_rmpp_mad_t* const p_rmpp_mad,
+ IN const uint8_t resp_time )
{
CL_ASSERT( p_rmpp_mad );
p_rmpp_mad->rmpp_flags |= (resp_time << 3);
AL_INLINE uint8_t AL_API
ib_rmpp_get_resp_time(
- IN const ib_rmpp_mad_t* const p_rmpp_mad )
+ IN const ib_rmpp_mad_t* const p_rmpp_mad )
{
CL_ASSERT( p_rmpp_mad );
return( (uint8_t)(p_rmpp_mad->rmpp_flags >> 3) );
}
-
/****d* IBA Base: Constants/IB_SMP_DIRECTION
* NAME
* IB_SMP_DIRECTION
*
* SOURCE
*/
-#define IB_SMP_DIRECTION_HO 0x8000
-#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO))
+#define IB_SMP_DIRECTION_HO 0x8000
+#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO))
/**********/
/****d* IBA Base: Constants/IB_SMP_STATUS_MASK
* SOURCE
*/
#define IB_SMP_STATUS_MASK_HO 0x7FFF
-#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO))
+#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO))
/**********/
/****s* IBA Base: Types/ib_smp_t
*/
AL_INLINE ib_net16_t AL_API
ib_smp_get_status(
- IN const ib_smp_t* const p_smp )
+ IN const ib_smp_t* const p_smp )
{
return( (ib_net16_t)(p_smp->status & IB_SMP_STATUS_MASK) );
}
*/
AL_INLINE boolean_t AL_API
ib_smp_is_response(
- IN const ib_smp_t* const p_smp )
+ IN const ib_smp_t* const p_smp )
{
return( ib_mad_is_response( (const ib_mad_t*)p_smp ) );
}
* SEE ALSO
* ib_smp_t
*********/
+
/****f* IBA Base: Types/ib_smp_is_d
* NAME
* ib_smp_is_d
*/
AL_INLINE boolean_t AL_API
ib_smp_is_d(
- IN const ib_smp_t* const p_smp )
+ IN const ib_smp_t* const p_smp )
{
return( (p_smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION );
}
*/
AL_INLINE void AL_API
ib_smp_init_new(
- IN ib_smp_t* const p_smp,
- IN const uint8_t method,
- IN const ib_net64_t trans_id,
- IN const ib_net16_t attr_id,
- IN const ib_net32_t attr_mod,
- IN const uint8_t hop_count,
- IN const ib_net64_t m_key,
- IN const uint8_t* path_out,
- IN const ib_net16_t dr_slid,
- IN const ib_net16_t dr_dlid )
+ IN ib_smp_t* const p_smp,
+ IN const uint8_t method,
+ IN const ib_net64_t trans_id,
+ IN const ib_net16_t attr_id,
+ IN const ib_net32_t attr_mod,
+ IN const uint8_t hop_count,
+ IN const ib_net64_t m_key,
+ IN const uint8_t* path_out,
+ IN const ib_net16_t dr_slid,
+ IN const ib_net16_t dr_dlid )
{
CL_ASSERT( p_smp );
CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX );
p_smp->dr_dlid = dr_dlid;
cl_memclr( p_smp->resv1,
- sizeof(p_smp->resv1) +
- sizeof(p_smp->data) +
- sizeof(p_smp->initial_path) +
- sizeof(p_smp->return_path) );
+ sizeof(p_smp->resv1) +
+ sizeof(p_smp->data) +
+ sizeof(p_smp->initial_path) +
+ sizeof(p_smp->return_path) );
/* copy the path */
cl_memcpy( &p_smp->initial_path, path_out,
- sizeof( p_smp->initial_path ) );
+ sizeof( p_smp->initial_path ) );
}
/*
* PARAMETERS
* SEE ALSO
* ib_mad_t
*********/
+
/****f* IBA Base: Types/ib_smp_get_payload_ptr
* NAME
* ib_smp_get_payload_ptr
*/
AL_INLINE void* AL_API
ib_smp_get_payload_ptr(
- IN const ib_smp_t* const p_smp )
+ IN const ib_smp_t* const p_smp )
{
return( (void*)p_smp->data );
}
} PACK_SUFFIX ib_node_info_t;
#include <complib/cl_packoff.h>
/************/
+
/****s* IBA Base: Types/ib_sa_mad_t
* NAME
* ib_sa_mad_t
/**********/
#define IB_SA_MAD_HDR_SIZE (sizeof(ib_sa_mad_t) - IB_SA_DATA_SIZE)
-
-
AL_INLINE uint32_t AL_API
ib_get_attr_size(
- IN const ib_net16_t attr_offset )
+ IN const ib_net16_t attr_offset )
{
return( ((uint32_t)cl_ntoh16( attr_offset )) << 3 );
}
AL_INLINE ib_net16_t AL_API
ib_get_attr_offset(
- IN const uint32_t attr_size )
+ IN const uint32_t attr_size )
{
return( cl_hton16( (uint16_t)(attr_size >> 3) ) );
}
-
/****f* IBA Base: Types/ib_sa_mad_get_payload_ptr
* NAME
* ib_sa_mad_get_payload_ptr
*/
AL_INLINE void* AL_API
ib_sa_mad_get_payload_ptr(
- IN const ib_sa_mad_t* const p_sa_mad )
+ IN const ib_sa_mad_t* const p_sa_mad )
{
return( (void*)p_sa_mad->data );
}
*/
AL_INLINE uint8_t AL_API
ib_node_info_get_local_port_num(
- IN const ib_node_info_t* const p_ni )
+ IN const ib_node_info_t* const p_ni )
{
return( (uint8_t)(( p_ni->port_num_vendor_id &
IB_NODE_INFO_PORT_NUM_MASK )
* SEE ALSO
* ib_node_info_t
*********/
+
/****f* IBA Base: Types/ib_node_info_get_vendor_id
* NAME
* ib_node_info_get_vendor_id
*/
AL_INLINE ib_net32_t AL_API
ib_node_info_get_vendor_id(
- IN const ib_node_info_t* const p_ni )
+ IN const ib_node_info_t* const p_ni )
{
return( (ib_net32_t)( p_ni->port_num_vendor_id &
IB_NODE_INFO_VEND_ID_MASK ) );
uint8_t link_width_enabled;
uint8_t link_width_supported;
uint8_t link_width_active;
- uint8_t state_info1; /* LinkSpeedSupported and PortState */
- uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */
+ uint8_t state_info1; /* LinkSpeedSupported and PortState */
+ uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */
uint8_t mkey_lmc;
- uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */
+ uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */
uint8_t mtu_smsl;
uint8_t vl_cap; /* VLCap and InitType */
uint8_t vl_high_limit;
#include <complib/cl_packoff.h>
/************/
-#define IB_PORT_STATE_MASK 0x0F
-#define IB_PORT_LMC_MASK 0x07
-#define IB_PORT_MPB_MASK 0xC0
-#define IB_PORT_MPB_SHIFT 6
-#define IB_PORT_LINK_SPEED_SHIFT 4
-#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0
-#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0
-#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F
-#define IB_PORT_PHYS_STATE_MASK 0xF0
-#define IB_PORT_PHYS_STATE_SHIFT 4
-#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F
+#define IB_PORT_STATE_MASK 0x0F
+#define IB_PORT_LMC_MASK 0x07
+#define IB_PORT_MPB_MASK 0xC0
+#define IB_PORT_MPB_SHIFT 6
+#define IB_PORT_LINK_SPEED_SHIFT 4
+#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0
+#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0
+#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F
+#define IB_PORT_PHYS_STATE_MASK 0xF0
+#define IB_PORT_PHYS_STATE_SHIFT 4
+#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F
#define IB_PORT_CAP_RESV0 (CL_NTOH32(0x00000001))
#define IB_PORT_CAP_IS_SM (CL_NTOH32(0x00000002))
*/
AL_INLINE uint8_t AL_API
ib_port_info_get_port_state(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->state_info1 & IB_PORT_STATE_MASK) );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_port_state
* NAME
* ib_port_info_set_port_state
*/
AL_INLINE void AL_API
ib_port_info_set_port_state(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t port_state )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t port_state )
{
p_pi->state_info1 = (uint8_t)((p_pi->state_info1 & 0xF0) | port_state );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_vl_cap
* NAME
* ib_port_info_get_vl_cap
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_init_type
* NAME
* ib_port_info_get_init_type
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_op_vls
* NAME
* ib_port_info_get_op_vls
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_op_vls
* NAME
* ib_port_info_set_op_vls
*/
AL_INLINE void AL_API
ib_port_info_set_op_vls(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t op_vls )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t op_vls )
{
p_pi->vl_enforce = (uint8_t)((p_pi->vl_enforce & 0x0F) | (op_vls << 4) );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_state_no_change
* NAME
* ib_port_info_set_state_no_change
*/
AL_INLINE void AL_API
ib_port_info_set_state_no_change(
- IN ib_port_info_t* const p_pi )
+ IN ib_port_info_t* const p_pi )
{
ib_port_info_set_port_state( p_pi, 0 );
p_pi->state_info2 = 0;
*/
AL_INLINE uint8_t AL_API
ib_port_info_get_link_speed_sup(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)((p_pi->state_info1 &
IB_PORT_LINK_SPEED_SUPPORTED_MASK) >>
*/
AL_INLINE void AL_API
ib_port_info_set_link_speed_sup(
- IN uint8_t const speed,
- IN ib_port_info_t* p_pi )
+ IN uint8_t const speed,
+ IN ib_port_info_t* p_pi )
{
p_pi->state_info1 =
( ~IB_PORT_LINK_SPEED_SUPPORTED_MASK & p_pi->state_info1 ) |
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_port_phys_state(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)((p_pi->state_info2 &
IB_PORT_PHYS_STATE_MASK) >>
*/
AL_INLINE void AL_API
ib_port_info_set_port_phys_state(
- IN uint8_t const phys_state,
- IN ib_port_info_t* p_pi )
+ IN uint8_t const phys_state,
+ IN ib_port_info_t* p_pi )
{
p_pi->state_info2 =
( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) |
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_link_down_def_state(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) );
}
*/
AL_INLINE void AL_API
ib_port_info_set_link_down_def_state(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t link_dwn_state )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t link_dwn_state )
{
p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state );
}
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_link_speed_active(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)((p_pi->link_speed &
IB_PORT_LINK_SPEED_ACTIVE_MASK) >>
#define IB_LINK_WIDTH_ACTIVE_1X 1
#define IB_LINK_WIDTH_ACTIVE_4X 2
-#define IB_LINK_WIDTH_ACTIVE_12X 8
-#define IB_LINK_SPEED_ACTIVE_2_5 1
-#define IB_LINK_SPEED_ACTIVE_5 2
-#define IB_LINK_SPEED_ACTIVE_10 4
+#define IB_LINK_WIDTH_ACTIVE_12X 8
+#define IB_LINK_SPEED_ACTIVE_2_5 1
+#define IB_LINK_SPEED_ACTIVE_5 2
+#define IB_LINK_SPEED_ACTIVE_10 4
/* following v1 ver1.2 p901 */
#define IB_PATH_RECORD_RATE_2_5_GBS 2
AL_INLINE uint8_t AL_API
ib_port_info_compute_rate(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
uint8_t rate = 0;
switch (ib_port_info_get_link_speed_active(p_pi))
{
case IB_LINK_SPEED_ACTIVE_2_5:
- switch(p_pi->link_width_active)
+ switch (p_pi->link_width_active)
{
case IB_LINK_WIDTH_ACTIVE_1X:
rate = IB_PATH_RECORD_RATE_2_5_GBS;
break;
-
+
case IB_LINK_WIDTH_ACTIVE_4X:
rate = IB_PATH_RECORD_RATE_10_GBS;
break;
case IB_LINK_WIDTH_ACTIVE_1X:
rate = IB_PATH_RECORD_RATE_5_GBS;
break;
-
+
case IB_LINK_WIDTH_ACTIVE_4X:
rate = IB_PATH_RECORD_RATE_20_GBS;
break;
-
+
case IB_LINK_WIDTH_ACTIVE_12X:
rate = IB_PATH_RECORD_RATE_60_GBS;
break;
-
+
default:
rate = IB_PATH_RECORD_RATE_5_GBS;
break;
case IB_LINK_WIDTH_ACTIVE_1X:
rate = IB_PATH_RECORD_RATE_10_GBS;
break;
-
+
case IB_LINK_WIDTH_ACTIVE_4X:
rate = IB_PATH_RECORD_RATE_40_GBS;
break;
-
+
case IB_LINK_WIDTH_ACTIVE_12X:
- rate = IB_PATH_RECORD_RATE_120_GBS;
+ rate =IB_PATH_RECORD_RATE_120_GBS;
break;
-
+
default:
rate = IB_PATH_RECORD_RATE_10_GBS;
break;
*/
AL_INLINE uint8_t AL_API
ib_path_get_ipd(
- IN uint8_t local_link_width_supported,
- IN uint8_t path_rec_rate )
+ IN uint8_t local_link_width_supported,
+ IN uint8_t path_rec_rate )
{
uint8_t ipd = 0;
switch(local_link_width_supported)
{
- /* link_width_supported = 1: 1x */
+ /* link_width_supported = 1: 1x */
case 1:
break;
- /* link_width_supported = 3: 1x or 4x */
+ /* link_width_supported = 3: 1x or 4x */
case 3:
switch(path_rec_rate & 0x3F)
{
}
break;
- /* link_width_supported = 11: 1x or 4x or 12x */
+ /* link_width_supported = 11: 1x or 4x or 12x */
case 11:
switch(path_rec_rate & 0x3F)
{
*/
AL_INLINE uint8_t AL_API
ib_port_info_get_mtu_cap(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->mtu_cap & 0x0F) );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_neighbor_mtu
* NAME
* ib_port_info_get_neighbor_mtu
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_neighbor_mtu
* NAME
* ib_port_info_set_neighbor_mtu
*/
AL_INLINE void AL_API
ib_port_info_set_neighbor_mtu(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t mtu )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t mtu )
{
CL_ASSERT( mtu <= 5 );
CL_ASSERT( mtu != 0 );
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_master_smsl
* NAME
* ib_port_info_set_master_smsl
*/
AL_INLINE void AL_API
ib_port_info_set_master_smsl(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t smsl )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t smsl )
{
p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | smsl );
}
*/
AL_INLINE void AL_API
ib_port_info_set_timeout(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t timeout )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t timeout )
{
CL_ASSERT( timeout <= 0x1F );
- p_pi->subnet_timeout =
+ p_pi->subnet_timeout =
(uint8_t)(
(p_pi->subnet_timeout & 0x80) | (timeout & 0x1F));
}
*/
OSM_INLINE void AL_API
ib_port_info_set_client_rereg(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t client_rereg )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t client_rereg )
{
CL_ASSERT( client_rereg <= 0x1 );
- p_pi->subnet_timeout =
+ p_pi->subnet_timeout =
(uint8_t)(
(p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80));
}
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_timeout(
- IN ib_port_info_t const* p_pi )
+ IN ib_port_info_t const* p_pi )
{
return(p_pi->subnet_timeout & 0x1F );
}
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_client_rereg(
- IN ib_port_info_t const* p_pi )
+ IN ib_port_info_t const* p_pi )
{
return ( (p_pi->subnet_timeout & 0x80 ) >> 7);
}
*/
OSM_INLINE void AL_API
ib_port_info_set_hoq_lifetime(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t hoq_life )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t hoq_life )
{
p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) |
(p_pi->vl_stall_life & 0xe0));
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_hoq_lifetime(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->vl_stall_life & 0x1f) );
}
* ib_port_info_set_vl_stall_count
*
* DESCRIPTION
-* Sets the VL Stall Count which define the number of contiguous
+* Sets the VL Stall Count which define the number of contiguous
* HLL (hoq) drops that will put the VL into stalled mode.
*
* SYNOPSIS
*/
OSM_INLINE void AL_API
ib_port_info_set_vl_stall_count(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t vl_stall_count )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t vl_stall_count )
{
p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) |
((vl_stall_count << 5) & 0xe0));
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_vl_stall_count(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5);
}
*/
AL_INLINE uint8_t AL_API
ib_port_info_get_lmc(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) );
}
*/
AL_INLINE void AL_API
ib_port_info_set_lmc(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t lmc )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t lmc )
{
CL_ASSERT( lmc <= 0x7 );
p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc);
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_link_speed_enabled(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) );
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_set_link_speed_enabled
* NAME
* ib_port_info_set_link_speed_enabled
*/
AL_INLINE void AL_API
ib_port_info_set_link_speed_enabled(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t link_speed_enabled )
+ IN ib_port_info_t* const p_pi,
+ IN const uint8_t link_speed_enabled )
{
p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled );
}
*/
AL_INLINE uint8_t AL_API
ib_port_info_get_mpb(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return( (uint8_t)((p_pi->mkey_lmc & IB_PORT_MPB_MASK) >>
IB_PORT_MPB_SHIFT) );
*/
AL_INLINE void AL_API
ib_port_info_set_mpb(
- IN ib_port_info_t* p_pi,
- IN uint8_t mpb )
+ IN ib_port_info_t* p_pi,
+ IN uint8_t mpb )
{
p_pi->mkey_lmc =
(~IB_PORT_MPB_MASK & p_pi->mkey_lmc) |
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd
* NAME
* ib_port_info_get_local_phy_err_thd
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_local_phy_err_thd(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4);
}
*
* SEE ALSO
*********/
+
/****f* IBA Base: Types/ib_port_info_get_overrun_err_thd
* NAME
* ib_port_info_get_local_overrun_err_thd
*/
OSM_INLINE uint8_t AL_API
ib_port_info_get_overrun_err_thd(
- IN const ib_port_info_t* const p_pi )
+ IN const ib_port_info_t* const p_pi )
{
return (uint8_t)(p_pi->error_threshold & 0x0F);
}
*/
OSM_INLINE void AL_API
ib_port_info_set_phy_and_overrun_err_thd(
- IN ib_port_info_t* const p_pi,
- IN uint8_t phy_threshold,
- IN uint8_t overrun_threshold )
+ IN ib_port_info_t* const p_pi,
+ IN uint8_t phy_threshold,
+ IN uint8_t overrun_threshold )
{
p_pi->error_threshold =
(uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) );
ib_net16_t resv;
ib_net32_t service_lease;
uint8_t service_key[16];
- ib_svc_name_t service_name;
+ ib_svc_name_t service_name;
uint8_t service_data8[16];
ib_net16_t service_data16[8];
ib_net32_t service_data32[4];
ib_net16_t lid;
uint8_t port_num;
uint8_t resv;
- ib_port_info_t port_info;
+ ib_port_info_t port_info;
uint8_t pad[6];
} PACK_SUFFIX ib_portinfo_record_t;
{
ib_net16_t lid;
uint16_t resv0;
- ib_sm_info_t sm_info;
+ ib_sm_info_t sm_info;
uint8_t pad[7];
} PACK_SUFFIX ib_sminfo_record_t;
{
ib_net16_t lid;
ib_net16_t block_num;
- uint32_t resv0;
- uint8_t lft[64];
+ uint32_t resv0;
+ uint8_t lft[64];
} PACK_SUFFIX ib_lft_record_t;
#include <complib/cl_packoff.h>
/************/
{
ib_net16_t lid;
uint16_t resv0;
- ib_switch_info_t switch_info;
+ ib_switch_info_t switch_info;
uint8_t pad[3];
} PACK_SUFFIX ib_switch_info_record_t;
*/
AL_INLINE boolean_t AL_API
ib_switch_info_get_state_change(
- IN const ib_switch_info_t* const p_si )
+ IN const ib_switch_info_t* const p_si )
{
return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC );
}
*/
AL_INLINE void AL_API
ib_switch_info_clear_state_change(
- IN ib_switch_info_t* const p_si )
+ IN ib_switch_info_t* const p_si )
{
p_si->life_state = (uint8_t)(p_si->life_state & 0xFB);
}
#include <complib/cl_packon.h>
typedef struct _ib_pkey_table
{
- ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK];
+ ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK];
} PACK_SUFFIX ib_pkey_table_t;
#include <complib/cl_packoff.h>
#include <complib/cl_packon.h>
typedef struct _ib_pkey_table_record
{
- ib_net16_t lid; // for CA: lid of port, for switch lid of port 0
- uint16_t block_num;
- uint8_t port_num; // for switch: port number, for CA: reserved
- uint8_t reserved1;
- uint16_t reserved2;
- ib_pkey_table_t pkey_tbl;
+ ib_net16_t lid; // for CA: lid of port, for switch lid of port 0
+ uint16_t block_num;
+ uint8_t port_num; // for switch: port number, for CA: reserved
+ uint8_t reserved1;
+ uint16_t reserved2;
+ ib_pkey_table_t pkey_tbl;
} PACK_SUFFIX ib_pkey_table_record_t;
#include <complib/cl_packoff.h>
* ib_slvl_table_record_t
*
* DESCRIPTION
-* IBA defined Sl to VL Mapping Table Record for SA Query. (15.2.5.4)
+* IBA defined SL to VL Mapping Table Record for SA Query. (15.2.5.4)
*
* SYNOPSIS
*/
uint8_t in_port_num; // reserved for CAs
uint8_t out_port_num; // reserved for CAs
uint32_t resv;
- ib_slvl_table_t slvl_tbl;
+ ib_slvl_table_t slvl_tbl;
} PACK_SUFFIX ib_slvl_table_record_t;
#include <complib/cl_packoff.h>
*/
AL_INLINE void AL_API
ib_slvl_table_set(
- IN ib_slvl_table_t* p_slvl_tbl,
- IN uint8_t sl_index,
- IN uint8_t vl )
+ IN ib_slvl_table_t* p_slvl_tbl,
+ IN uint8_t sl_index,
+ IN uint8_t vl )
{
uint8_t idx = sl_index/2;
CL_ASSERT(vl <= 15);
*/
OSM_INLINE uint8_t AL_API
ib_slvl_table_get(
- IN const ib_slvl_table_t* p_slvl_tbl,
- IN uint8_t sl_index )
+ IN const ib_slvl_table_t* p_slvl_tbl,
+ IN uint8_t sl_index )
{
uint8_t idx = sl_index/2;
CL_ASSERT(sl_index <= 15);
{
uint8_t vl;
uint8_t weight;
-
} PACK_SUFFIX ib_vl_arb_element_t;
#include <complib/cl_packoff.h>
/************/
#define IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK 32
+
/****s* IBA Base: Types/ib_vl_arb_table_t
* NAME
* ib_vl_arb_table_t
typedef struct _ib_vl_arb_table
{
ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK];
-
} PACK_SUFFIX ib_vl_arb_table_t;
#include <complib/cl_packoff.h>
/************/
#include <complib/cl_packon.h>
typedef struct _ib_vl_arb_table_record
{
- ib_net16_t lid; // for CA: lid of port, for switch lid of port 0
- uint8_t port_num;
- uint8_t block_num;
- uint32_t reserved;
+ ib_net16_t lid; // for CA: lid of port, for switch lid of port 0
+ uint8_t port_num;
+ uint8_t block_num;
+ uint32_t reserved;
ib_vl_arb_table_t vl_arb_tbl;
-
} PACK_SUFFIX ib_vl_arb_table_record_t;
#include <complib/cl_packoff.h>
/************/
#include <complib/cl_packon.h>
typedef struct _ib_grh
{
- ib_net32_t ver_class_flow;
- ib_net16_t resv1;
- uint8_t resv2;
- uint8_t hop_limit;
- ib_gid_t src_gid;
- ib_gid_t dest_gid;
-
+ ib_net32_t ver_class_flow;
+ ib_net16_t resv1;
+ uint8_t resv2;
+ uint8_t hop_limit;
+ ib_gid_t src_gid;
+ ib_gid_t dest_gid;
} PACK_SUFFIX ib_grh_t;
#include <complib/cl_packoff.h>
*/
AL_INLINE void AL_API
ib_grh_get_ver_class_flow(
- IN const ib_net32_t ver_class_flow,
- OUT uint8_t* const p_ver,
- OUT uint8_t* const p_tclass,
- OUT uint32_t* const p_flow_lbl )
+ IN const ib_net32_t ver_class_flow,
+ OUT uint8_t* const p_ver,
+ OUT uint8_t* const p_tclass,
+ OUT uint32_t* const p_flow_lbl )
{
ib_net32_t tmp_ver_class_flow;
*/
AL_INLINE ib_net32_t AL_API
ib_grh_set_ver_class_flow(
- IN const uint8_t ver,
- IN const uint8_t tclass,
- IN const uint32_t flow_lbl )
+ IN const uint8_t ver,
+ IN const uint8_t tclass,
+ IN const uint32_t flow_lbl )
{
ib_net32_t ver_class_flow;
*/
AL_INLINE void AL_API
ib_member_get_sl_flow_hop(
- IN const ib_net32_t sl_flow_hop,
- OUT uint8_t* const p_sl,
- OUT uint32_t* const p_flow_lbl,
- OUT uint8_t* const p_hop )
+ IN const ib_net32_t sl_flow_hop,
+ OUT uint8_t* const p_sl,
+ OUT uint32_t* const p_flow_lbl,
+ OUT uint8_t* const p_hop )
{
ib_net32_t tmp_sl_flow_hop;
*/
AL_INLINE ib_net32_t AL_API
ib_member_set_sl_flow_hop(
- IN const uint8_t sl,
- IN const uint32_t flow_label,
- IN const uint8_t hop_limit )
+ IN const uint8_t sl,
+ IN const uint32_t flow_label,
+ IN const uint8_t hop_limit )
{
ib_net32_t sl_flow_hop;
*/
AL_INLINE void AL_API
ib_member_get_scope_state(
- IN const uint8_t scope_state,
- OUT uint8_t* const p_scope,
- OUT uint8_t* const p_state )
+ IN const uint8_t scope_state,
+ OUT uint8_t* const p_scope,
+ OUT uint8_t* const p_state )
{
uint8_t tmp_scope_state;
*/
AL_INLINE uint8_t AL_API
ib_member_set_scope_state(
- IN const uint8_t scope,
- IN const uint8_t state )
+ IN const uint8_t scope,
+ IN const uint8_t state )
{
uint8_t scope_state;
*/
AL_INLINE void AL_API
ib_member_set_join_state(
- IN OUT ib_member_rec_t *p_mc_rec,
- IN const uint8_t state )
+ IN OUT ib_member_rec_t *p_mc_rec,
+ IN const uint8_t state )
{
- /* keep the scope as it is */
+ /* keep the scope as it is */
p_mc_rec->scope_state = (p_mc_rec->scope_state & 0xF0) | (0x0f & state);
}
/*
#define IB_NOTICE_TYPE_URGENT 0x01
#define IB_NOTICE_TYPE_SECURITY 0x02
#define IB_NOTICE_TYPE_SUBN_MGMT 0x03
-#define IB_NOTICE_TYPE_INFO 0x04
+#define IB_NOTICE_TYPE_INFO 0x04
#define IB_NOTICE_TYPE_EMPTY 0x7F
-
#include <complib/cl_packon.h>
typedef struct _ib_mad_notice_attr // Total Size calc Accumulated
{
- uint8_t generic_type; // 1 1
+ uint8_t generic_type; // 1 1
union _notice_g_or_v
{
*/
OSM_INLINE boolean_t AL_API
ib_notice_is_generic(
- IN const ib_mad_notice_attr_t *p_ntc)
+ IN const ib_mad_notice_attr_t *p_ntc )
{
return (p_ntc->generic_type & 0x80);
}
*/
AL_INLINE uint8_t AL_API
ib_notice_get_type(
- IN const ib_mad_notice_attr_t *p_ntc)
+ IN const ib_mad_notice_attr_t *p_ntc )
{
return p_ntc->generic_type & 0x7f;
}
*/
AL_INLINE ib_net32_t AL_API
ib_notice_get_prod_type(
- IN const ib_mad_notice_attr_t *p_ntc)
+ IN const ib_mad_notice_attr_t *p_ntc )
{
uint32_t pt;
+
pt = cl_ntoh16(p_ntc->g_or_v.generic.prod_type_lsb) |
(p_ntc->g_or_v.generic.prod_type_msb << 16);
return cl_hton32(pt);
AL_INLINE void AL_API
ib_notice_set_prod_type(
IN ib_mad_notice_attr_t *p_ntc,
- IN ib_net32_t prod_type_val)
+ IN ib_net32_t prod_type_val )
{
uint32_t ptv = cl_ntoh32(prod_type_val);
- p_ntc->g_or_v.generic.prod_type_lsb = cl_hton16( (uint16_t)(ptv & 0x0000ffff));
+ p_ntc->g_or_v.generic.prod_type_lsb = cl_hton16((uint16_t)(ptv & 0x0000ffff));
p_ntc->g_or_v.generic.prod_type_msb = (uint8_t)( (ptv & 0x00ff0000) >> 16);
}
/*
*/
AL_INLINE ib_net32_t AL_API
ib_notice_get_vend_id(
- IN const ib_mad_notice_attr_t *p_ntc)
+ IN const ib_mad_notice_attr_t *p_ntc )
{
uint32_t vi;
+
vi = cl_ntoh16(p_ntc->g_or_v.vend.vend_id_lsb) |
(p_ntc->g_or_v.vend.vend_id_msb << 16);
return cl_hton32(vi);
/*
* PARAMETERS
* p_ntc
-* [in] Pointer to the notice MAD attribute
+* [in] Pointer to the notice MAD attribute
*
* RETURN VALUES
* The Vendor Id of Vendor type Notice
AL_INLINE void AL_API
ib_notice_set_vend_id(
IN ib_mad_notice_attr_t *p_ntc,
- IN ib_net32_t vend_id)
+ IN ib_net32_t vend_id )
{
uint32_t vi = cl_ntoh32(vend_id);
p_ntc->g_or_v.vend.vend_id_lsb = cl_hton16((uint16_t)(vi & 0x0000ffff));
#include <complib/cl_packon.h>
typedef struct _ib_inform_info
{
- ib_gid_t gid;
+ ib_gid_t gid;
ib_net16_t lid_range_begin;
ib_net16_t lid_range_end;
ib_net16_t reserved1;
- uint8_t is_generic;
- uint8_t subscribe;
+ uint8_t is_generic;
+ uint8_t subscribe;
ib_net16_t trap_type;
union _inform_g_or_v
{
{
ib_net16_t trap_num;
ib_net32_t qpn_resp_time_val;
- uint8_t reserved2;
+ uint8_t reserved2;
uint8_t node_type_msb;
ib_net16_t node_type_lsb;
} PACK_SUFFIX generic;
{
ib_net16_t dev_id;
ib_net32_t qpn_resp_time_val;
- uint8_t reserved2;
+ uint8_t reserved2;
uint8_t vendor_id_msb;
ib_net16_t vendor_id_lsb;
} PACK_SUFFIX vend;
*/
OSM_INLINE void AL_API
ib_inform_info_get_qpn_resp_time(
- IN const ib_net32_t qpn_resp_time_val,
- OUT ib_net32_t* const p_qpn,
- OUT uint8_t* const p_resp_time_val )
+ IN const ib_net32_t qpn_resp_time_val,
+ OUT ib_net32_t* const p_qpn,
+ OUT uint8_t* const p_resp_time_val )
{
uint32_t tmp = cl_ntoh32(qpn_resp_time_val);
IN ib_net32_t const qpn)
{
uint32_t tmp = cl_ntoh32(p_ii->g_or_v.generic.qpn_resp_time_val);
+
p_ii->g_or_v.generic.qpn_resp_time_val =
cl_hton32(
(tmp & 0x000000ff) |
*/
OSM_INLINE ib_net32_t AL_API
ib_inform_info_get_node_type(
- IN const ib_inform_info_t *p_inf)
+ IN const ib_inform_info_t *p_inf)
{
uint32_t nt;
+
nt = cl_ntoh16(p_inf->g_or_v.generic.node_type_lsb) |
(p_inf->g_or_v.generic.node_type_msb << 16);
return cl_hton32(nt);
*/
OSM_INLINE ib_net32_t AL_API
ib_inform_info_get_vend_id(
- IN const ib_inform_info_t *p_inf)
+ IN const ib_inform_info_t *p_inf)
{
uint32_t vi;
+
vi = cl_ntoh16(p_inf->g_or_v.vend.vendor_id_lsb) |
(p_inf->g_or_v.vend.vendor_id_msb << 16);
return cl_hton32(vi);
#include <complib/cl_packon.h>
typedef struct _ib_inform_info_record
{
- ib_gid_t subscriber_gid;
+ ib_gid_t subscriber_gid;
ib_net16_t subscriber_enum;
- uint8_t reserved[6];
- ib_inform_info_t inform_info;
+ uint8_t reserved[6];
+ ib_inform_info_t inform_info;
} PACK_SUFFIX ib_inform_info_record_t;
#include <complib/cl_packoff.h>
ib_mad_t header;
uint8_t resv[40];
-#define IB_DM_DATA_SIZE 192
+#define IB_DM_DATA_SIZE 192
uint8_t data[IB_DM_DATA_SIZE];
} PACK_SUFFIX ib_dm_mad_t;
*/
AL_INLINE uint8_t AL_API
ib_iou_info_diag_dev_id(
- IN const ib_iou_info_t* const p_iou_info )
+ IN const ib_iou_info_t* const p_iou_info )
{
return( (uint8_t)(p_iou_info->diag_rom >> 6 & 1) );
}
*/
AL_INLINE uint8_t AL_API
ib_iou_info_option_rom(
- IN const ib_iou_info_t* const p_iou_info )
+ IN const ib_iou_info_t* const p_iou_info )
{
return( (uint8_t)(p_iou_info->diag_rom >> 7) );
}
*/
AL_INLINE uint8_t AL_API
ioc_at_slot(
- IN const ib_iou_info_t* const p_iou_info,
- IN uint8_t slot )
+ IN const ib_iou_info_t* const p_iou_info,
+ IN uint8_t slot )
{
if( slot >= IB_DM_CTRL_LIST_SIZE ) return SLOT_DOES_NOT_EXIST;
else return (int8_t)
* ib_dm_mad_t
*********/
-
AL_INLINE uint32_t AL_API
ib_ioc_profile_get_vend_id(
- IN const ib_ioc_profile_t* const p_ioc_profile )
+ IN const ib_ioc_profile_t* const p_ioc_profile )
{
return( cl_ntoh32(p_ioc_profile->vend_id) >> 8 );
}
AL_INLINE void AL_API
ib_ioc_profile_set_vend_id(
- IN ib_ioc_profile_t* const p_ioc_profile,
- IN const uint32_t vend_id )
+ IN ib_ioc_profile_t* const p_ioc_profile,
+ IN const uint32_t vend_id )
{
p_ioc_profile->vend_id = (cl_hton32(vend_id) << 8);
}
-
/****s* IBA Base: Types/ib_svc_entry_t
* NAME
* ib_svc_entry_t
typedef struct _ib_svc_entry
{
#define MAX_SVC_ENTRY_NAME_LEN 40
- char name[MAX_SVC_ENTRY_NAME_LEN];
+ char name[MAX_SVC_ENTRY_NAME_LEN];
- ib_net64_t id;
+ ib_net64_t id;
} PACK_SUFFIX ib_svc_entry_t;
#include <complib/cl_packoff.h>
* ib_svc_entries_t
*********/
-
/****s* IBA Base: Types/ib_svc_entries_t
* NAME
* ib_svc_entries_t
#include <complib/cl_packon.h>
typedef struct _ib_svc_entries
{
-#define SVC_ENTRY_COUNT 4
+#define SVC_ENTRY_COUNT 4
ib_svc_entry_t service_entry[SVC_ENTRY_COUNT];
} PACK_SUFFIX ib_svc_entries_t;
* ib_dm_mad_t, ib_svc_entry_t
*********/
-
AL_INLINE void AL_API
ib_dm_get_slot_lo_hi(
- IN const ib_net32_t slot_lo_hi,
- OUT uint8_t *const p_slot,
- OUT uint8_t *const p_lo,
- OUT uint8_t *const p_hi )
+ IN const ib_net32_t slot_lo_hi,
+ OUT uint8_t *const p_slot,
+ OUT uint8_t *const p_lo,
+ OUT uint8_t *const p_hi )
{
ib_net32_t tmp_slot_lo_hi = CL_NTOH32( slot_lo_hi );
*p_lo = (uint8_t)( ( tmp_slot_lo_hi >> 0 ) & 0xff );
}
-
/*
* IBA defined information describing an I/O controller
*/
{
ib_net64_t module_guid;
ib_net64_t iou_guid;
- ib_ioc_profile_t ioc_profile;
+ ib_ioc_profile_t ioc_profile;
ib_net64_t access_key;
uint16_t initiators_conf;
uint8_t resv[38];
/*
* Defines known Communication management class versions
*/
-#define IB_MCLASS_CM_VER_2 2
-#define IB_MCLASS_CM_VER_1 1
+#define IB_MCLASS_CM_VER_2 2
+#define IB_MCLASS_CM_VER_1 1
/*
* Defines the size of user available data in communication management MADs
#define IB_SIDR_REQ_PDATA_SIZE_VER1 216
#define IB_SIDR_REP_PDATA_SIZE_VER1 140
-#define IB_ARI_SIZE 72 // redefine
-#define IB_APR_INFO_SIZE 72
-
+#define IB_ARI_SIZE 72 // redefine
+#define IB_APR_INFO_SIZE 72
/****d* Access Layer/ib_rej_status_t
* NAME
*
* SYNOPSIS
*/
-typedef ib_net16_t ib_rej_status_t;
+typedef ib_net16_t ib_rej_status_t;
/*
* SEE ALSO
* ib_cm_rej, ib_cm_rej_rec_t
*
* SOURCE
- */
-#define IB_REJ_INSUF_QP CL_HTON16(1)
-#define IB_REJ_INSUF_EEC CL_HTON16(2)
+*/
+#define IB_REJ_INSUF_QP CL_HTON16(1)
+#define IB_REJ_INSUF_EEC CL_HTON16(2)
#define IB_REJ_INSUF_RESOURCES CL_HTON16(3)
-#define IB_REJ_TIMEOUT CL_HTON16(4)
-#define IB_REJ_UNSUPPORTED CL_HTON16(5)
+#define IB_REJ_TIMEOUT CL_HTON16(4)
+#define IB_REJ_UNSUPPORTED CL_HTON16(5)
#define IB_REJ_INVALID_COMM_ID CL_HTON16(6)
-#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7)
-#define IB_REJ_INVALID_SID CL_HTON16(8)
+#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7)
+#define IB_REJ_INVALID_SID CL_HTON16(8)
#define IB_REJ_INVALID_XPORT CL_HTON16(9)
-#define IB_REJ_STALE_CONN CL_HTON16(10)
+#define IB_REJ_STALE_CONN CL_HTON16(10)
#define IB_REJ_RDC_NOT_EXIST CL_HTON16(11)
-#define IB_REJ_INVALID_GID CL_HTON16(12)
-#define IB_REJ_INVALID_LID CL_HTON16(13)
-#define IB_REJ_INVALID_SL CL_HTON16(14)
-#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15)
+#define IB_REJ_INVALID_GID CL_HTON16(12)
+#define IB_REJ_INVALID_LID CL_HTON16(13)
+#define IB_REJ_INVALID_SL CL_HTON16(14)
+#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15)
#define IB_REJ_INVALID_HOP_LIMIT CL_HTON16(16)
#define IB_REJ_INVALID_PKT_RATE CL_HTON16(17)
#define IB_REJ_INVALID_ALT_GID CL_HTON16(18)
#define IB_REJ_INVALID_ALT_LID CL_HTON16(19)
#define IB_REJ_INVALID_ALT_SL CL_HTON16(20)
-#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21)
-#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22)
+#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21)
+#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22)
#define IB_REJ_INVALID_ALT_PKT_RATE CL_HTON16(23)
#define IB_REJ_PORT_REDIRECT CL_HTON16(24)
-#define IB_REJ_INVALID_MTU CL_HTON16(26)
-#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27)
-#define IB_REJ_USER_DEFINED CL_HTON16(28)
+#define IB_REJ_INVALID_MTU CL_HTON16(26)
+#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27)
+#define IB_REJ_USER_DEFINED CL_HTON16(28)
#define IB_REJ_INVALID_RNR_RETRY CL_HTON16(29)
-#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30)
+#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30)
#define IB_REJ_INVALID_CLASS_VER CL_HTON16(31)
#define IB_REJ_INVALID_FLOW_LBL CL_HTON16(32)
#define IB_REJ_INVALID_ALT_FLOW_LBL CL_HTON16(33)
#define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535)
/******/
-
/****d* Access Layer/ib_apr_status_t
* NAME
* ib_apr_status_t
*
* SYNOPSIS
*/
-typedef uint8_t ib_apr_status_t;
+typedef uint8_t ib_apr_status_t;
/*
* SEE ALSO
* ib_cm_apr, ib_cm_apr_rec_t
*
* SOURCE
*/
-#define IB_AP_SUCCESS 0
+#define IB_AP_SUCCESS 0
#define IB_AP_INVALID_COMM_ID 1
-#define IB_AP_UNSUPPORTED 2
-#define IB_AP_REJECT 3
-#define IB_AP_REDIRECT 4
-#define IB_AP_IS_CURRENT 5
+#define IB_AP_UNSUPPORTED 2
+#define IB_AP_REJECT 3
+#define IB_AP_REDIRECT 4
+#define IB_AP_IS_CURRENT 5
#define IB_AP_INVALID_QPN_EECN 6
-#define IB_AP_INVALID_LID 7
-#define IB_AP_INVALID_GID 8
+#define IB_AP_INVALID_LID 7
+#define IB_AP_INVALID_GID 8
#define IB_AP_INVALID_FLOW_LBL 9
#define IB_AP_INVALID_TCLASS 10
#define IB_AP_INVALID_HOP_LIMIT 11
#define IB_AP_INVALID_PKT_RATE 12
-#define IB_AP_INVALID_SL 13
+#define IB_AP_INVALID_SL 13
/******/
/****d* Access Layer/ib_cm_cap_mask_t
*/
#define IB_CM_RELIABLE_CONN_CAPABLE CL_HTON16(9)
#define IB_CM_RELIABLE_DGRM_CAPABLE CL_HTON16(10)
-#define IB_CM_RDGRM_CAPABLE CL_HTON16(11)
-#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12)
-#define IB_CM_SIDR_CAPABLE CL_HTON16(13)
+#define IB_CM_RDGRM_CAPABLE CL_HTON16(11)
+#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12)
+#define IB_CM_SIDR_CAPABLE CL_HTON16(13)
/*
* SEE ALSO
* ib_cm_rep, ib_class_port_info_t
*
*******/
-
/*
* Service ID resolution status
*/
-typedef uint16_t ib_sidr_status_t;
-#define IB_SIDR_SUCCESS 0
-#define IB_SIDR_UNSUPPORTED 1
-#define IB_SIDR_REJECT 2
-#define IB_SIDR_NO_QP 3
-#define IB_SIDR_REDIRECT 4
+typedef uint16_t ib_sidr_status_t;
+#define IB_SIDR_SUCCESS 0
+#define IB_SIDR_UNSUPPORTED 1
+#define IB_SIDR_REJECT 2
+#define IB_SIDR_NO_QP 3
+#define IB_SIDR_REDIRECT 4
#define IB_SIDR_UNSUPPORTED_VER 5
-
/*
* The following definitions are shared between the Access Layer and VPD
*/
typedef struct _ib_mr* __ptr64 ib_mr_handle_t;
typedef struct _ib_mw* __ptr64 ib_mw_handle_t;
typedef struct _ib_qp* __ptr64 ib_qp_handle_t;
-typedef struct _ib_eec* __ptr64 ib_eec_handle_t;
+typedef struct _ib_srq* __ptr64 ib_srq_handle_t;
+typedef struct _ib_eec* __ptr64 ib_eec_handle_t;
typedef struct _ib_cq* __ptr64 ib_cq_handle_t;
typedef struct _ib_av* __ptr64 ib_av_handle_t;
typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t;
/* Currently for windows branch, use the extended version of ib special verbs struct
in order to be compliant with Infinicon ib_types; later we'll change it to support
OpenSM ib_types.h */
-#ifndef WIN32
+#ifndef WIN32
/****d* Access Layer/ib_api_status_t
* NAME
* ib_api_status_t
IB_OVERFLOW,
IB_MAX_MCAST_QPS_REACHED,
IB_INVALID_QP_STATE,
- IB_INVALID_EEC_STATE,
IB_INVALID_APM_STATE,
IB_INVALID_PORT_STATE,
IB_INVALID_STATE,
IB_INVALID_MAX_WRS,
IB_INVALID_MAX_SGE,
IB_INVALID_CQ_SIZE,
+ IB_INVALID_SRQ_SIZE,
IB_INVALID_SERVICE_TYPE,
IB_INVALID_GID,
IB_INVALID_LID,
IB_INVALID_CA_HANDLE,
IB_INVALID_AV_HANDLE,
IB_INVALID_CQ_HANDLE,
- IB_INVALID_EEC_HANDLE,
IB_INVALID_QP_HANDLE,
+ IB_INVALID_SRQ_HANDLE,
IB_INVALID_PD_HANDLE,
IB_INVALID_MR_HANDLE,
+ IB_INVALID_FMR_HANDLE,
IB_INVALID_MW_HANDLE,
- IB_INVALID_RDD_HANDLE,
IB_INVALID_MCAST_HANDLE,
IB_INVALID_CALLBACK,
IB_INVALID_AL_HANDLE, /* InfiniBand Access Layer */
IB_EE_IN_TIMEWAIT,
IB_INVALID_PORT,
IB_NOT_DONE,
+ IB_INVALID_INDEX,
+ IB_NO_MATCH,
+ IB_PENDING,
IB_UNKNOWN_ERROR /* ALWAYS LAST ENUM VALUE! */
} ib_api_status_t;
*/
AL_INLINE const char* AL_API
ib_get_err_str(
- IN ib_api_status_t status )
+ IN ib_api_status_t status )
{
if( status > IB_UNKNOWN_ERROR )
status = IB_UNKNOWN_ERROR;
* SEE ALSO
*********/
-
/****d* Verbs/ib_async_event_t
* NAME
* ib_async_event_t -- Async event types
*/
AL_INLINE const char* AL_API
ib_get_async_event_str(
- IN ib_async_event_t event )
+ IN ib_async_event_t event )
{
if( event > IB_AE_UNKNOWN )
event = IB_AE_UNKNOWN;
* SEE ALSO
*********/
-
/****s* Verbs/ib_event_rec_t
* NAME
* ib_event_rec_t -- Async event notification record
typedef struct _ib_event_rec
{
void *context;
- ib_async_event_t type;
+ ib_async_event_t type;
/* HCA vendor specific event information. */
uint64_t vendor_specific;
} info;
- ib_net64_t sysimg_guid;
+ ib_net64_t sysimg_guid;
} trap;
} ib_event_rec_t;
/*******/
-
/****d* Access Layer/ib_atomic_t
* NAME
* ib_atomic_t
* in the system.
*****/
-
/****s* Access Layer/ib_port_cap_t
* NAME
* ib_port_cap_t
} ib_port_cap_t;
/*****/
-
/****d* Access Layer/ib_init_type_t
* NAME
* ib_init_type_t
* SYNOPSIS
*/
typedef uint8_t ib_init_type_t;
-#define IB_INIT_TYPE_NO_LOAD 0x01
+#define IB_INIT_TYPE_NO_LOAD 0x01
#define IB_INIT_TYPE_PRESERVE_CONTENT 0x02
#define IB_INIT_TYPE_PRESERVE_PRESENCE 0x04
#define IB_INIT_TYPE_DO_NOT_RESUSCITATE 0x08
/*****/
-
/****s* Access Layer/ib_port_attr_mod_t
* NAME
* ib_port_attr_mod_t
*/
typedef struct _ib_port_attr_mod
{
- ib_port_cap_t cap;
+ ib_port_cap_t cap;
uint16_t pkey_ctr;
uint16_t qkey_ctr;
- ib_init_type_t init_type;
+ ib_init_type_t init_type;
ib_net64_t system_image_guid;
} ib_port_attr_mod_t;
* ib_port_cap_t
*****/
-
/****s* Access Layer/ib_port_attr_t
* NAME
* ib_port_attr_t
uint8_t sm_sl;
uint8_t link_state;
- ib_init_type_t init_type_reply; /* Optional */
+ ib_init_type_t init_type_reply; /* Optional */
/*
* subnet_timeout:
*/
uint8_t subnet_timeout;
- ib_port_cap_t cap;
+ ib_port_cap_t cap;
uint16_t pkey_ctr;
uint16_t qkey_ctr;
* uint8_t, ib_port_cap_t, ib_link_states_t
*****/
-
/****s* Access Layer/ib_ca_attr_t
* NAME
* ib_ca_attr_t
uint8_t num_ports;
uint32_t *p_page_size;
- ib_port_attr_t *p_port_attr;
+ ib_port_attr_t *p_port_attr;
} ib_ca_attr_t;
/*
* revision
* Revision ID of this adapter
*
-* Fw_ver
+* fw_ver
* Device Firmware version.
*
* size
*/
ib_ca_attr_t*
ib_copy_ca_attr(
- IN ib_ca_attr_t* const p_dest,
- IN const ib_ca_attr_t* const p_src );
+ IN ib_ca_attr_t* const p_dest,
+ IN const ib_ca_attr_t* const p_src );
/*
* PARAMETERS
* p_dest
* ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr
*****/
-
/****s* Access Layer/ib_av_attr_t
* NAME
* ib_av_attr_t
* ib_gid_t
*****/
-
/****d* Access Layer/ib_qp_type_t
* NAME
* ib_qp_type_t
*/
typedef enum _ib_qp_type
{
- IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */
- IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */
- IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */
+ IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */
+ IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */
+ IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */
IB_QPT_UNRELIABLE_DGRM,
IB_QPT_QP0,
IB_QPT_QP1,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETHER,
- IB_QPT_MAD, /* InfiniBand Access Layer */
- IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */
- IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */
+ IB_QPT_MAD, /* InfiniBand Access Layer */
+ IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */
+ IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */
} ib_qp_type_t;
/*
* protection domain.
*****/
-
/****d* Access Layer/ib_access_t
* NAME
* ib_access_t
* SYNOPSIS
*/
typedef uint32_t ib_access_t;
-#define IB_AC_RDMA_READ 0x00000001
-#define IB_AC_RDMA_WRITE 0x00000002
-#define IB_AC_ATOMIC 0x00000004
-#define IB_AC_LOCAL_WRITE 0x00000008
-#define IB_AC_MW_BIND 0x00000010
+#define IB_AC_RDMA_READ 0x00000001
+#define IB_AC_RDMA_WRITE 0x00000002
+#define IB_AC_ATOMIC 0x00000004
+#define IB_AC_LOCAL_WRITE 0x00000008
+#define IB_AC_MW_BIND 0x00000010
/*
* NOTES
* Users may combine access rights using a bit-wise or operation to specify
* RDMA read and write access.
*****/
-
/****d* Access Layer/ib_qp_state_t
* NAME
* ib_qp_state_t
* SYNOPSIS
*/
typedef uint32_t ib_qp_state_t;
-#define IB_QPS_RESET 0x00000001
+#define IB_QPS_RESET 0x00000001
#define IB_QPS_INIT 0x00000002
#define IB_QPS_RTR 0x00000004
#define IB_QPS_RTS 0x00000008
#define IB_QPS_SQD 0x00000010
-#define IB_QPS_SQD_DRAINING 0x00000030
-#define IB_QPS_SQD_DRAINED 0x00000050
-#define IB_QPS_SQERR 0x00000080
-#define IB_QPS_ERROR 0x00000100
-#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */
+#define IB_QPS_SQD_DRAINING 0x00000030
+#define IB_QPS_SQD_DRAINED 0x00000050
+#define IB_QPS_SQERR 0x00000080
+#define IB_QPS_ERROR 0x00000100
+#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */
/*****/
-
/****d* Access Layer/ib_apm_state_t
* NAME
* ib_apm_state_t
} ib_apm_state_t;
/*****/
-
/****s* Access Layer/ib_qp_create_t
* NAME
* ib_qp_create_t
ib_rdd_handle_t h_rdd;
- uint32_t sq_depth;
- uint32_t rq_depth;
- uint32_t sq_sge;
- uint32_t rq_sge;
+ uint32_t sq_depth;
+ uint32_t rq_depth;
+ uint32_t sq_sge;
+ uint32_t rq_sge;
ib_cq_handle_t h_sq_cq;
ib_cq_handle_t h_rq_cq;
- boolean_t sq_signaled;
+ boolean_t sq_signaled;
} ib_qp_create_t;
/*
* ib_qp_type_t, ib_qp_attr_t
*****/
-
/****s* Access Layer/ib_qp_attr_t
* NAME
* ib_qp_attr_t
*/
typedef struct _ib_qp_attr
{
- ib_pd_handle_t h_pd;
- ib_qp_type_t qp_type;
+ ib_pd_handle_t h_pd;
+ ib_qp_type_t qp_type;
ib_access_t access_ctrl;
uint16_t pkey_index;
uint8_t init_depth;
uint8_t resp_res;
- ib_cq_handle_t h_sq_cq;
- ib_cq_handle_t h_rq_cq;
- ib_rdd_handle_t h_rdd;
+ ib_cq_handle_t h_sq_cq;
+ ib_cq_handle_t h_rq_cq;
+ ib_rdd_handle_t h_rdd;
boolean_t sq_signaled;
- ib_qp_state_t state;
+ ib_qp_state_t state;
ib_net32_t num;
ib_net32_t dest_num;
ib_net32_t qkey;
uint8_t primary_port;
uint8_t alternate_port;
- ib_av_attr_t primary_av;
- ib_av_attr_t alternate_av;
- ib_apm_state_t apm_state;
+ ib_av_attr_t primary_av;
+ ib_av_attr_t alternate_av;
+ ib_apm_state_t apm_state;
} ib_qp_attr_t;
/*
* ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t
*****/
-
/****d* Access Layer/ib_qp_opts_t
* NAME
* ib_qp_opts_t
* SYNOPSIS
*/
typedef uint32_t ib_qp_opts_t;
-#define IB_MOD_QP_ALTERNATE_AV 0x00000001
+#define IB_MOD_QP_ALTERNATE_AV 0x00000001
#define IB_MOD_QP_PKEY 0x00000002
#define IB_MOD_QP_APM_STATE 0x00000004
-#define IB_MOD_QP_PRIMARY_AV 0x00000008
-#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010
+#define IB_MOD_QP_PRIMARY_AV 0x00000008
+#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010
#define IB_MOD_QP_RESP_RES 0x00000020
-#define IB_MOD_QP_INIT_DEPTH 0x00000040
-#define IB_MOD_QP_PRIMARY_PORT 0x00000080
-#define IB_MOD_QP_ACCESS_CTRL 0x00000100
+#define IB_MOD_QP_INIT_DEPTH 0x00000040
+#define IB_MOD_QP_PRIMARY_PORT 0x00000080
+#define IB_MOD_QP_ACCESS_CTRL 0x00000100
#define IB_MOD_QP_QKEY 0x00000200
#define IB_MOD_QP_SQ_DEPTH 0x00000400
#define IB_MOD_QP_RQ_DEPTH 0x00000800
-#define IB_MOD_QP_CURRENT_STATE 0x00001000
+#define IB_MOD_QP_CURRENT_STATE 0x00001000
#define IB_MOD_QP_RETRY_CNT 0x00002000
-#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000
-#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000
-
+#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000
+#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000
/*
* SEE ALSO
* ib_qp_mod_t
*****/
-
/****s* Access Layer/ib_qp_mod_t
* NAME
* ib_qp_mod_t
* Time, in milliseconds, that the QP needs to spend in
* the time wait state before being reused.
*/
- uint32_t timewait;
+ uint32_t timewait;
} reset;
struct _qp_init
{
ib_qp_opts_t opts;
- uint8_t primary_port;
- ib_net32_t qkey;
- uint16_t pkey_index;
- ib_access_t access_ctrl;
+ uint8_t primary_port;
+ ib_net32_t qkey;
+ uint16_t pkey_index;
+ ib_access_t access_ctrl;
} init;
struct _qp_rtr
{
- ib_net32_t rq_psn;
- ib_net32_t dest_qp;
+ ib_net32_t rq_psn;
+ ib_net32_t dest_qp;
ib_av_attr_t primary_av;
- uint8_t resp_res;
+ uint8_t resp_res;
ib_qp_opts_t opts;
ib_av_attr_t alternate_av;
- ib_net32_t qkey;
- uint16_t pkey_index;
- ib_access_t access_ctrl;
- uint32_t sq_depth;
- uint32_t rq_depth;
- uint8_t rnr_nak_timeout;
+ ib_net32_t qkey;
+ uint16_t pkey_index;
+ ib_access_t access_ctrl;
+ uint32_t sq_depth;
+ uint32_t rq_depth;
+ uint8_t rnr_nak_timeout;
} rtr;
struct _qp_rts
{
- ib_net32_t sq_psn;
- uint8_t retry_cnt;
- uint8_t rnr_retry_cnt;
- uint8_t rnr_nak_timeout;
- uint8_t local_ack_timeout;
- uint8_t init_depth;
+ ib_net32_t sq_psn;
+ uint8_t retry_cnt;
+ uint8_t rnr_retry_cnt;
+ uint8_t rnr_nak_timeout;
+ uint8_t local_ack_timeout;
+ uint8_t init_depth;
ib_qp_opts_t opts;
ib_qp_state_t current_state;
- ib_net32_t qkey;
- ib_access_t access_ctrl;
- uint8_t resp_res;
+ ib_net32_t qkey;
+ ib_access_t access_ctrl;
+ uint8_t resp_res;
ib_av_attr_t primary_av;
ib_av_attr_t alternate_av;
- uint32_t sq_depth;
- uint32_t rq_depth;
+ uint32_t sq_depth;
+ uint32_t rq_depth;
ib_apm_state_t apm_state;
- uint8_t primary_port;
- uint16_t pkey_index;
+ uint8_t primary_port;
+ uint16_t pkey_index;
} rts;
struct _qp_sqd
{
- boolean_t sqd_event;
+ boolean_t sqd_event;
} sqd;
* ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t
*****/
-
/****s* Access Layer/ib_eec_attr_t
* NAME
* ib_eec_attr_t
*/
typedef struct _ib_eec_attr
{
- ib_qp_state_t state;
- ib_rdd_handle_t h_rdd;
+ ib_qp_state_t state;
+ ib_rdd_handle_t h_rdd;
ib_net32_t local_eecn;
ib_net32_t sq_psn;
ib_net32_t remote_eecn;
uint32_t init_depth;
uint32_t dest_num; // ??? What is this?
- ib_av_attr_t primary_av;
- ib_av_attr_t alternate_av;
- ib_apm_state_t apm_state;
+ ib_av_attr_t primary_av;
+ ib_av_attr_t alternate_av;
+ ib_apm_state_t apm_state;
} ib_eec_attr_t;
/*
* ib_qp_state_t, ib_av_attr_t, ib_apm_state_t
*****/
-
/****d* Access Layer/ib_eec_opts_t
* NAME
* ib_eec_opts_t
* SYNOPSIS
*/
typedef uint32_t ib_eec_opts_t;
-#define IB_MOD_EEC_ALTERNATE_AV 0x00000001
-#define IB_MOD_EEC_PKEY 0x00000002
-#define IB_MOD_EEC_APM_STATE 0x00000004
-#define IB_MOD_EEC_PRIMARY_AV 0x00000008
-#define IB_MOD_EEC_RNR 0x00000010
-#define IB_MOD_EEC_RESP_RES 0x00000020
-#define IB_MOD_EEC_OUTSTANDING 0x00000040
-#define IB_MOD_EEC_PRIMARY_PORT 0x00000080
+#define IB_MOD_EEC_ALTERNATE_AV 0x00000001
+#define IB_MOD_EEC_PKEY 0x00000002
+#define IB_MOD_EEC_APM_STATE 0x00000004
+#define IB_MOD_EEC_PRIMARY_AV 0x00000008
+#define IB_MOD_EEC_RNR 0x00000010
+#define IB_MOD_EEC_RESP_RES 0x00000020
+#define IB_MOD_EEC_OUTSTANDING 0x00000040
+#define IB_MOD_EEC_PRIMARY_PORT 0x00000080
/*
* NOTES
*
*/
typedef struct _ib_eec_mod
{
- ib_qp_state_t req_state;
+ ib_qp_state_t req_state;
union _eec_state
{
{
ib_net32_t rq_psn;
ib_net32_t remote_eecn;
- ib_av_attr_t primary_av;
+ ib_av_attr_t primary_av;
uint8_t resp_res;
- ib_eec_opts_t opts;
- ib_av_attr_t alternate_av;
+ ib_eec_opts_t opts;
+ ib_av_attr_t alternate_av;
uint16_t pkey_index;
} rtr;
uint8_t local_ack_timeout;
uint8_t init_depth;
- ib_eec_opts_t opts;
- ib_av_attr_t alternate_av;
- ib_apm_state_t apm_state;
+ ib_eec_opts_t opts;
+ ib_av_attr_t alternate_av;
+ ib_apm_state_t apm_state;
- ib_av_attr_t primary_av;
+ ib_av_attr_t primary_av;
uint16_t pkey_index;
uint8_t primary_port;
* ib_qp_state_t, ib_av_attr_t, ib_apm_state_t
*****/
-
/****d* Access Layer/ib_wr_type_t
* NAME
* ib_wr_type_t
} ib_wr_type_t;
/*****/
-
/****s* Access Layer/ib_local_ds_t
* NAME
* ib_local_ds_t
} ib_local_ds_t;
/*****/
-
/****d* Access Layer/ib_send_opt_t
* NAME
* ib_send_opt_t
*
* SYNOPSIS
*/
-typedef uint32_t ib_send_opt_t;
+typedef uint32_t ib_send_opt_t;
#define IB_SEND_OPT_IMMEDIATE 0x00000001
-#define IB_SEND_OPT_FENCE 0x00000002
+#define IB_SEND_OPT_FENCE 0x00000002
#define IB_SEND_OPT_SIGNALED 0x00000004
#define IB_SEND_OPT_SOLICITED 0x00000008
-#define IB_SEND_OPT_INLINE 0x00000010
-#define IB_SEND_OPT_LOCAL 0x00000020
+#define IB_SEND_OPT_INLINE 0x00000010
+#define IB_SEND_OPT_LOCAL 0x00000020
#define IB_SEND_OPT_VEND_MASK 0xFFFF0000
-
-
/*
* VALUES
* The following flags determine the behavior of a work request when
*
*****/
-
/****s* Access Layer/ib_send_wr_t
* NAME
* ib_send_wr_t
*/
typedef struct _ib_send_wr
{
- struct _ib_send_wr *p_next;
+ struct _ib_send_wr *p_next;
uint64_t wr_id;
- ib_wr_type_t wr_type;
- ib_send_opt_t send_opt;
+ ib_wr_type_t wr_type;
+ ib_send_opt_t send_opt;
uint32_t num_ds;
- ib_local_ds_t *ds_array;
+ ib_local_ds_t *ds_array;
ib_net32_t immediate_data;
union _send_dgrm
{
ib_net32_t remote_qp;
ib_net32_t remote_qkey;
- ib_av_handle_t h_av;
+ ib_av_handle_t h_av;
} ud;
* ib_wr_type_t, ib_local_ds_t, ib_send_opt_t
*****/
-
/****s* Access Layer/ib_recv_wr_t
* NAME
* ib_recv_wr_t
typedef struct _ib_recv_wr
{
struct _ib_recv_wr *p_next;
- uint64_t wr_id;
- uint32_t num_ds;
+ uint64_t wr_id;
+ uint32_t num_ds;
ib_local_ds_t *ds_array;
-
} ib_recv_wr_t;
/*
* FIELDS
* ib_local_ds_t
*****/
-
/****s* Access Layer/ib_bind_wr_t
* NAME
* ib_bind_wr_t
typedef struct _ib_bind_wr
{
uint64_t wr_id;
- ib_send_opt_t send_opt;
+ ib_send_opt_t send_opt;
- ib_mr_handle_t h_mr;
+ ib_mr_handle_t h_mr;
ib_access_t access_ctrl;
uint32_t current_rkey;
- ib_local_ds_t local_ds;
+ ib_local_ds_t local_ds;
} ib_bind_wr_t;
/*
* ib_send_opt_t, ib_access_t, ib_local_ds_t
*****/
-
/****d* Access Layer/ib_wc_status_t
* NAME
* ib_wc_status_t
IB_WCS_REM_INVALID_RD_REQ_ERR,
IB_WCS_INVALID_EECN,
IB_WCS_INVALID_EEC_STATE,
- IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */
- IB_WCS_CANCELED, /* InfiniBand Access Layer */
- IB_WCS_UNKNOWN /* Must be last. */
+ IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */
+ IB_WCS_CANCELED, /* InfiniBand Access Layer */
+ IB_WCS_UNKNOWN /* Must be last. */
} ib_wc_status_t;
/*
* The completed work request was canceled by the user.
*****/
-
AL_EXPORT const char* ib_wc_status_str[];
-
/****f* IBA Base: Types/ib_get_wc_status_str
* NAME
* ib_get_wc_status_str
*/
AL_INLINE const char* AL_API
ib_get_wc_status_str(
- IN ib_wc_status_t wc_status )
+ IN ib_wc_status_t wc_status )
{
if( wc_status > IB_WCS_UNKNOWN )
wc_status = IB_WCS_UNKNOWN;
* SEE ALSO
*********/
-
/****d* Access Layer/ib_wc_type_t
* NAME
* ib_wc_type_t
} ib_wc_type_t;
/*****/
-
/****d* Access Layer/ib_recv_opt_t
* NAME
* ib_recv_opt_t
*
* SYNOPSIS
*/
-typedef uint32_t ib_recv_opt_t;
+typedef uint32_t ib_recv_opt_t;
#define IB_RECV_OPT_IMMEDIATE 0x00000001
-#define IB_RECV_OPT_FORWARD 0x00000002
+#define IB_RECV_OPT_FORWARD 0x00000002
#define IB_RECV_OPT_GRH_VALID 0x00000004
#define IB_RECV_OPT_VEND_MASK 0xFFFF0000
/*
* but may have specific meaning to the underlying VPD.
*****/
-
/****s* Access Layer/ib_wc_t
* NAME
* ib_wc_t
typedef struct _ib_wc
{
struct _ib_wc *p_next;
- uint64_t wr_id;
+ uint64_t wr_id;
ib_wc_type_t wc_type;
- uint32_t length;
+ uint32_t length;
ib_wc_status_t status;
- uint64_t vendor_specific;
+ uint64_t vendor_specific;
union _wc_recv
{
struct _wc_conn
{
ib_recv_opt_t recv_opt;
- ib_net32_t immediate_data;
+ ib_net32_t immediate_data;
} conn;
struct _wc_ud
{
ib_recv_opt_t recv_opt;
- ib_net32_t immediate_data;
- ib_net32_t remote_qp;
- uint16_t pkey_index;
- ib_net16_t remote_lid;
- uint8_t remote_sl;
- uint8_t path_bits;
+ ib_net32_t immediate_data;
+ ib_net32_t remote_qp;
+ uint16_t pkey_index;
+ ib_net16_t remote_lid;
+ uint8_t remote_sl;
+ uint8_t path_bits;
} ud;
struct _wc_rd
{
- ib_net32_t remote_eecn;
- ib_net32_t remote_qp;
- ib_net16_t remote_lid;
- uint8_t remote_sl;
- uint32_t free_cnt;
+ ib_net32_t remote_eecn;
+ ib_net32_t remote_qp;
+ ib_net16_t remote_lid;
+ uint8_t remote_sl;
+ uint32_t free_cnt;
} rd;
struct _wc_raw_ipv6
{
- ib_net16_t remote_lid;
- uint8_t remote_sl;
- uint8_t path_bits;
+ ib_net16_t remote_lid;
+ uint8_t remote_sl;
+ uint8_t path_bits;
} raw_ipv6;
struct _wc_raw_ether
{
- ib_net16_t remote_lid;
- uint8_t remote_sl;
- uint8_t path_bits;
- ib_net16_t ether_type;
+ ib_net16_t remote_lid;
+ uint8_t remote_sl;
+ uint8_t path_bits;
+ ib_net16_t ether_type;
} raw_ether;
* ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t
*****/
-
/****s* Access Layer/ib_mr_create_t
* NAME
* ib_mr_create_t
void *vaddr;
uint64_t length;
ib_access_t access_ctrl;
-
} ib_mr_create_t;
/*
* FIELDS
* ib_access_t
*****/
-
/****s* Access Layer/ib_phys_create_t
* NAME
* ib_phys_create_t
uint32_t buf_offset;
uint32_t page_size;
ib_access_t access_ctrl;
-
} ib_phys_create_t;
/*
* length
* ib_access_t
*****/
-
/****s* Access Layer/ib_mr_attr_t
* NAME
* ib_mr_attr_t
typedef struct _ib_mr_attr
{
ib_pd_handle_t h_pd;
- void *local_lb;
- void *local_ub;
- void *remote_lb;
- void *remote_ub;
- ib_access_t access_ctrl;
- uint32_t lkey;
- uint32_t rkey;
-
+ void *local_lb;
+ void *local_ub;
+ void *remote_lb;
+ void *remote_ub;
+ ib_access_t access_ctrl;
+ uint32_t lkey;
+ uint32_t rkey;
} ib_mr_attr_t;
/*
* DESCRIPTION
* ib_access_t
*****/
-
/****d* Access Layer/ib_ca_mod_t
* NAME
* ib_ca_mod_t -- Modify port attributes and error counters
*
* SYNOPSIS
*/
-typedef uint32_t ib_ca_mod_t;
-#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001
-#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002
+typedef uint32_t ib_ca_mod_t;
+#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001
+#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002
#define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED 0x00000004
-#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008
-#define IB_CA_MOD_IS_SM 0x00000010
-#define IB_CA_MOD_IS_SM_DISABLED 0x00000020
-#define IB_CA_MOD_QKEY_CTR 0x00000040
-#define IB_CA_MOD_PKEY_CTR 0x00000080
+#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008
+#define IB_CA_MOD_IS_SM 0x00000010
+#define IB_CA_MOD_IS_SM_DISABLED 0x00000020
+#define IB_CA_MOD_QKEY_CTR 0x00000040
+#define IB_CA_MOD_PKEY_CTR 0x00000080
#define IB_CA_MOD_IS_NOTICE_SUPPORTED 0x00000100
-#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200
-#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400
+#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200
+#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400
#define IB_CA_MOD_IS_SLMAP_SUPPORTED 0x00000800
#define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED 0x00001000
#define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED 0x00002000
#define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED 0x00020000
#define IB_CA_MOD_IS_REINIT_SUPORTED 0x00040000
#define IB_CA_MOD_IS_LEDINFO_SUPPORTED 0x00080000
-#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000
-#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000
-#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000
+#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000
+#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000
+#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000
/*
* VALUES
* IB_CA_MOD_IS_CM_SUPPORTED
* Used to modify the system image GUID for the port.
*****/
-
/****d* Access Layer/ib_mr_mod_t
* NAME
* ib_mr_mod_t
*/
typedef struct _ib_ci_op
{
- IN uint32_t command;
- IN OUT void* p_buf OPTIONAL;
- IN uint32_t buf_size;
- IN OUT uint32_t num_bytes_ret;
- IN OUT int32_t status;
+ IN uint32_t command;
+ IN OUT void* p_buf OPTIONAL;
+ IN uint32_t buf_size;
+ IN OUT uint32_t num_bytes_ret;
+ IN OUT int32_t status;
} ib_ci_op_t;
/*
IB_INVALID_MAX_WRS,
IB_INVALID_MAX_SGE,
IB_INVALID_CQ_SIZE,
+ IB_INVALID_SRQ_SIZE,
IB_INVALID_SERVICE_TYPE,
IB_INVALID_GID,
IB_INVALID_LID,
IB_INVALID_AV_HANDLE,
IB_INVALID_CQ_HANDLE,
IB_INVALID_QP_HANDLE,
+ IB_INVALID_SRQ_HANDLE,
IB_INVALID_PD_HANDLE,
IB_INVALID_MR_HANDLE,
+ IB_INVALID_FMR_HANDLE,
IB_INVALID_MW_HANDLE,
IB_INVALID_MCAST_HANDLE,
IB_INVALID_CALLBACK,
IB_VERBS_PROCESSING_DONE, /* See Notes above */
IB_INVALID_WR_TYPE,
IB_QP_IN_TIMEWAIT,
+ IB_EE_IN_TIMEWAIT,
IB_INVALID_PORT,
IB_NOT_DONE,
IB_INVALID_INDEX,
IB_AE_WQ_ACCESS_ERROR,
IB_AE_PORT_ACTIVE,
IB_AE_PORT_DOWN,
+ IB_AE_CLIENT_REREGISTER,
+ IB_AE_SRQ_LIMIT_REACHED,
+ IB_AE_SRQ_QP_LAST_WQE_REACHED,
IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */
} ib_async_event_t;
* The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED,
* IB_LINK_DOWN.
*
+* IB_AE_CLIENT_REREGISTER
+* The SM idicate to client to reregister its SA records.
+*
+* IB_AE_SRQ_CATAS_ERROR
+* An error occurred while processing or accessing the SRQ that prevents
+* dequeuing a WQE from the SRQ and reporting of receive completions.
+*
+* IB_AE_SRQ_QP_LAST_WQE_REACHED
+* An event, issued for a QP, associated with a shared receive queue, when
+* a CQE is generated for the last WQE, or
+* the QP gets in the Error State and there are no more WQEs on the RQ.
+*
* IB_AE_UNKNOWN
* An unknown error occurred which cannot be attributed to any
* resource; behavior is indeterminate.
uint32_t max_qps_per_mcast_grp;
uint32_t max_fmr;
uint32_t max_map_per_fmr;
+ uint32_t max_srq;
+ uint32_t max_srq_wrs;
+ uint32_t max_srq_sges;
/*
* local_ack_delay:
boolean_t av_port_check;
boolean_t change_primary_port;
boolean_t modify_wr_depth;
+ boolean_t modify_srq_depth;
boolean_t current_qp_state_support;
boolean_t shutdown_port_capability;
boolean_t init_type_support;
* Maximum limit on number of responder resources for incomming RDMA
* operations on QPs.
*
+* max_fmr
+* Maximum number of Fast Memory Regions supported.
+*
+* max_map_per_fmr
+* Maximum number of mappings, supported by a Fast Memory Region.
+*
+* max_srq
+* Maximum number of Shared Receive Queues supported.
+*
+* max_srq_wrs
+* Maximum number of work requests supported by this SRQ.
+*
+* max_srq_sges
+* Maximum number of scatter gather elements supported per work request on SRQ.
+*
* max_resp_res
* Maximum number of responder resources per HCA, with this HCA used as
* the target.
* Indicates ability to modify QP depth during a modify QP operation.
* Check the verb specification for permitted states.
*
+* modify_srq_depth
+* Indicates ability to modify SRQ depth during a modify SRQ operation.
+* Check the verb specification for permitted states.
+*
* current_qp_state_support
* Indicates ability of the HCA to support the current QP state modifier
* during a modify QP operation.
} ib_apm_state_t;
/*****/
+/****d* Access Layer/ib_srq_attr_mask_t
+* NAME
+* ib_srq_attr_mask_t
+*
+* DESCRIPTION
+* Indicates valid fields in ib_srq_attr_t structure
+*
+* SYNOPSIS
+*/
+typedef enum _ib_srq_attr_mask {
+ IB_SRQ_MAX_WR = 1 << 0,
+ IB_SRQ_LIMIT = 1 << 1,
+} ib_srq_attr_mask_t;
+/*****/
+
+
+/****s* Access Layer/ib_srq_attr_t
+* NAME
+* ib_srq_attr_t
+*
+* DESCRIPTION
+* Attributes used to initialize a shared queue pair at creation time.
+*
+* SYNOPSIS
+*/
+typedef struct _ib_srq_attr {
+ uint32_t max_wr;
+ uint32_t max_sge;
+ uint32_t srq_limit;
+} ib_srq_attr_t;
+/*
+* FIELDS
+* max_wr
+* Specifies the max number of work request on SRQ.
+*
+* max_sge
+* Specifies the max number of scatter/gather elements in one work request.
+*
+* srq_limit
+* Specifies the low water mark for SRQ.
+*
+* SEE ALSO
+* ib_qp_type_t, ib_srq_attr_mask_t
+*****/
+
+
/****s* Access Layer/ib_qp_create_t
* NAME
ib_cq_handle_t h_sq_cq;
ib_cq_handle_t h_rq_cq;
+ ib_srq_handle_t h_srq;
boolean_t sq_signaled;
* work request completions. This handle must be NULL if the type is
* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS.
*
+* h_srq
+* A handle to an SRQ to get receive completions via. Must be coded NULL
+* when QP is not associated with SRQ
+*
* sq_signaled
* A flag that is used to indicate whether the queue pair will signal
* an event upon completion of a send work request. If set to
ib_cq_handle_t h_sq_cq;
ib_cq_handle_t h_rq_cq;
+ ib_srq_handle_t h_srq;
boolean_t sq_signaled;
p_ifc = &p_hca->p_hba->ifc;\r
\r
// Create QP\r
+ cl_memclr( &qp_create, sizeof(qp_create) );\r
qp_create.qp_type = IB_QPT_RELIABLE_CONN;\r
qp_create.sq_depth = SRP_DEFAULT_SEND_Q_DEPTH;\r
qp_create.rq_depth = SRP_DEFAULT_RECV_Q_DEPTH;\r
}\r
\r
/* Queue pair */\r
+ cl_memclr(&qp_create, sizeof(ib_qp_create_t));\r
qp_create.qp_type = IB_QPT_RELIABLE_CONN;\r
qp_create.sq_depth = QP_ATTRIB_SQ_DEPTH;\r
qp_create.rq_depth = QP_ATTRIB_RQ_DEPTH;\r