From f2c42d8c9ea7ab7a87177f285581f3a6899108ca Mon Sep 17 00:00:00 2001 From: leonidk Date: Mon, 27 Nov 2006 20:03:51 +0000 Subject: [PATCH] [MTHCA, IBAL et al] added SRQ support git-svn-id: svn://openib.tc.cornell.edu/gen1@548 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- trunk/core/al/al_ci_ca_shared.c | 7 + trunk/core/al/al_common.h | 26 +- trunk/core/al/al_debug.h | 5 +- trunk/core/al/al_dev.h | 12 +- trunk/core/al/al_pd.c | 55 + trunk/core/al/al_proxy_ioctl.h | 1 + trunk/core/al/al_qp.c | 17 + trunk/core/al/al_qp.h | 3 + trunk/core/al/al_srq.c | 438 ++++++ trunk/core/al/al_srq.h | 108 ++ trunk/core/al/al_verbs.h | 61 +- trunk/core/al/ib_statustext.c | 32 +- trunk/core/al/kernel/SOURCES | 1 + trunk/core/al/kernel/al_mgr.c | 9 +- trunk/core/al/kernel/al_proxy_verbs.c | 463 ++++++ trunk/core/al/user/SOURCES | 2 + trunk/core/al/user/ual_ci_ca.h | 27 + trunk/core/al/user/ual_mgr.c | 11 +- trunk/core/al/user/ual_qp.c | 19 +- trunk/core/al/user/ual_srq.c | 438 ++++++ trunk/core/bus/kernel/bus_pnp.c | 5 + trunk/hw/mthca/kernel/hca_data.c | 27 +- trunk/hw/mthca/kernel/hca_data.h | 2 + trunk/hw/mthca/kernel/hca_debug.h | 16 +- trunk/hw/mthca/kernel/hca_direct.c | 42 +- trunk/hw/mthca/kernel/hca_driver.h | 1 + trunk/hw/mthca/kernel/hca_mcast.c | 41 +- trunk/hw/mthca/kernel/hca_verbs.c | 157 +- trunk/hw/mthca/kernel/ib_verbs.h | 70 +- trunk/hw/mthca/kernel/mt_l2w.c | 264 ++-- trunk/hw/mthca/kernel/mt_l2w.h | 2 +- trunk/hw/mthca/kernel/mt_verbs.c | 122 +- trunk/hw/mthca/kernel/mthca_cmd.c | 7 + trunk/hw/mthca/kernel/mthca_cmd.h | 2 + trunk/hw/mthca/kernel/mthca_cq.c | 9 +- trunk/hw/mthca/kernel/mthca_dev.h | 13 +- trunk/hw/mthca/kernel/mthca_eq.c | 17 +- trunk/hw/mthca/kernel/mthca_main.c | 1 + trunk/hw/mthca/kernel/mthca_provider.c | 68 +- trunk/hw/mthca/kernel/mthca_provider.h | 851 +++++------ trunk/hw/mthca/kernel/mthca_qp.c | 18 +- trunk/hw/mthca/kernel/mthca_srq.c | 208 ++- trunk/hw/mthca/kernel/mthca_user.h | 67 - trunk/hw/mthca/mx_abi.h | 18 + trunk/hw/mthca/user/SOURCES | 3 +- trunk/hw/mthca/user/mlnx_ual_av.c | 4 + trunk/hw/mthca/user/mlnx_ual_main.c | 5 + trunk/hw/mthca/user/mlnx_ual_main.h | 59 + trunk/hw/mthca/user/mlnx_ual_osbypass.c | 37 + trunk/hw/mthca/user/mlnx_ual_qp.c | 4 +- trunk/hw/mthca/user/mlnx_ual_srq.c | 269 ++++ trunk/hw/mthca/user/mlnx_uvp.h | 2 +- trunk/hw/mthca/user/mlnx_uvp_abi.h | 61 - trunk/hw/mthca/user/mlnx_uvp_debug.h | 12 +- trunk/hw/mthca/user/mlnx_uvp_srq.c | 40 +- trunk/hw/mthca/user/mlnx_uvp_verbs.c | 5 +- trunk/hw/mthca/user/mlnx_uvp_verbs.h | 11 +- trunk/inc/iba/ib_al.h | 327 ++++- trunk/inc/iba/ib_al_ioctl.h | 245 ++++ trunk/inc/iba/ib_at_ioctl.h | 60 +- trunk/inc/iba/ib_ci.h | 239 ++- trunk/inc/iba/ib_types.h | 125 +- trunk/inc/kernel/iba/ib_al_ifc.h | 37 +- trunk/inc/user/iba/ib_uvp.h | 450 ++++++ trunk/tests/alts/createanddestroyqp.c | 1 + trunk/tests/wsd/user/test2/ibwrap.c | 1 + trunk/tests/wsd/user/test3/ibwrap.c | 1 + trunk/tools/vstat/user/vstat_main.c | 10 +- trunk/ulp/opensm/user/include/iba/ib_types.h | 1305 +++++++++-------- .../user/include/iba/ib_types_extended.h | 94 ++ trunk/ulp/srp/kernel/srp_connection.c | 1 + trunk/ulp/wsd/user/ibsp_iblow.c | 1 + 72 files changed, 5470 insertions(+), 1702 deletions(-) create mode 100644 trunk/core/al/al_srq.c create mode 100644 trunk/core/al/al_srq.h create mode 100644 trunk/core/al/user/ual_srq.c delete mode 100644 trunk/hw/mthca/kernel/mthca_user.h create mode 100644 trunk/hw/mthca/user/mlnx_ual_srq.c delete mode 100644 trunk/hw/mthca/user/mlnx_uvp_abi.h diff --git a/trunk/core/al/al_ci_ca_shared.c b/trunk/core/al/al_ci_ca_shared.c index 354761e5..41d7c0d9 100644 --- a/trunk/core/al/al_ci_ca_shared.c +++ b/trunk/core/al/al_ci_ca_shared.c @@ -45,6 +45,7 @@ #include "al_mgr.h" #include "al_pnp.h" #include "al_qp.h" +#include "al_srq.h" #include "ib_common.h" @@ -284,9 +285,15 @@ ci_ca_process_event_cb( case IB_AE_SQ_DRAINED: case IB_AE_WQ_REQ_ERROR: case IB_AE_WQ_ACCESS_ERROR: + case IB_AE_SRQ_QP_LAST_WQE_REACHED: qp_async_event_cb( &p_event_item->event_rec ); break; + case IB_AE_SRQ_LIMIT_REACHED: + case IB_AE_SRQ_CATAS_ERROR: + srq_async_event_cb( &p_event_item->event_rec ); + break; + case IB_AE_CQ_ERROR: cq_async_event_cb( &p_event_item->event_rec ); break; diff --git a/trunk/core/al/al_common.h b/trunk/core/al/al_common.h index 02f04424..d8cdceca 100644 --- a/trunk/core/al/al_common.h +++ b/trunk/core/al/al_common.h @@ -121,7 +121,7 @@ typedef void * Different types of AL object's. Note that the upper byte signifies * a subtype. */ -#define AL_OBJ_TYPE_UNKNOWN 0 +#define AL_OBJ_TYPE_UNKNOWN 0 #define AL_OBJ_TYPE_H_AL 1 #define AL_OBJ_TYPE_H_QP 2 #define AL_OBJ_TYPE_H_AV 3 @@ -133,14 +133,14 @@ typedef void #define AL_OBJ_TYPE_H_CONN 9 #define AL_OBJ_TYPE_H_LISTEN 10 #define AL_OBJ_TYPE_H_IOC 11 -#define AL_OBJ_TYPE_H_SVC_ENTRY 12 +#define AL_OBJ_TYPE_H_SVC_ENTRY 12 #define AL_OBJ_TYPE_H_PNP 13 #define AL_OBJ_TYPE_H_SA_REQ 14 -#define AL_OBJ_TYPE_H_MCAST 15 +#define AL_OBJ_TYPE_H_MCAST 15 #define AL_OBJ_TYPE_H_ATTACH 16 #define AL_OBJ_TYPE_H_MAD 17 -#define AL_OBJ_TYPE_H_MAD_POOL 18 -#define AL_OBJ_TYPE_H_POOL_KEY 19 +#define AL_OBJ_TYPE_H_MAD_POOL 18 +#define AL_OBJ_TYPE_H_POOL_KEY 19 #define AL_OBJ_TYPE_H_MAD_SVC 20 #define AL_OBJ_TYPE_CI_CA 21 #define AL_OBJ_TYPE_CM 22 @@ -151,18 +151,19 @@ typedef void #define AL_OBJ_TYPE_MAD_POOL 27 #define AL_OBJ_TYPE_MAD_DISP 28 #define AL_OBJ_TYPE_AL_MGR 29 -#define AL_OBJ_TYPE_PNP_MGR 30 -#define AL_OBJ_TYPE_IOC_PNP_MGR 31 -#define AL_OBJ_TYPE_IOC_PNP_SVC 32 +#define AL_OBJ_TYPE_PNP_MGR 30 +#define AL_OBJ_TYPE_IOC_PNP_MGR 31 +#define AL_OBJ_TYPE_IOC_PNP_SVC 32 #define AL_OBJ_TYPE_QUERY_SVC 33 #define AL_OBJ_TYPE_MCAST_SVC 34 -#define AL_OBJ_TYPE_SA_REQ_SVC 35 -#define AL_OBJ_TYPE_RES_MGR 36 +#define AL_OBJ_TYPE_SA_REQ_SVC 35 +#define AL_OBJ_TYPE_RES_MGR 36 #define AL_OBJ_TYPE_H_CA_ATTR 37 -#define AL_OBJ_TYPE_H_PNP_EVENT 38 +#define AL_OBJ_TYPE_H_PNP_EVENT 38 #define AL_OBJ_TYPE_H_SA_REG 39 #define AL_OBJ_TYPE_H_FMR 40 -#define AL_OBJ_TYPE_INVALID 41 /* Must be last type. */ +#define AL_OBJ_TYPE_H_SRQ 41 +#define AL_OBJ_TYPE_INVALID 42 /* Must be last type. */ /* Kernel object for a user-mode app. */ #define AL_OBJ_SUBTYPE_UM_EXPORT 0x80000000 @@ -233,7 +234,6 @@ typedef struct _al_obj */ boolean_t hdl_valid; #endif - } al_obj_t; diff --git a/trunk/core/al/al_debug.h b/trunk/core/al/al_debug.h index 96d002fe..6d1ed6d6 100644 --- a/trunk/core/al/al_debug.h +++ b/trunk/core/al/al_debug.h @@ -74,7 +74,7 @@ extern uint32_t g_al_dbg_flags; WPP_DEFINE_BIT( AL_DBG_AV)\ WPP_DEFINE_BIT( AL_DBG_CQ)\ WPP_DEFINE_BIT( AL_DBG_QP)\ - WPP_DEFINE_BIT( AL_DBG_RES3) \ + WPP_DEFINE_BIT( AL_DBG_SRQ)\ WPP_DEFINE_BIT( AL_DBG_MW)\ WPP_DEFINE_BIT( AL_DBG_RES4) \ WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\ @@ -109,7 +109,7 @@ extern uint32_t g_al_dbg_flags; WPP_DEFINE_BIT( AL_DBG_AV)\ WPP_DEFINE_BIT( AL_DBG_CQ)\ WPP_DEFINE_BIT( AL_DBG_QP)\ - WPP_DEFINE_BIT( AL_DBG_RES3) \ + WPP_DEFINE_BIT( AL_DBG_SRQ)\ WPP_DEFINE_BIT( AL_DBG_MW)\ WPP_DEFINE_BIT( AL_DBG_RES4) \ WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\ @@ -168,6 +168,7 @@ extern uint32_t g_al_dbg_flags; #define AL_DBG_AV (1 << 17) #define AL_DBG_CQ (1 << 18) #define AL_DBG_QP (1 << 19) +#define AL_DBG_SRQ (1 << 20) #define AL_DBG_MW (1 << 21) #define AL_DBG_PROXY_CB (1 << 23) #define AL_DBG_UAL (1 << 24) diff --git a/trunk/core/al/al_dev.h b/trunk/core/al/al_dev.h index 4c69fcce..c201f326 100644 --- a/trunk/core/al/al_dev.h +++ b/trunk/core/al/al_dev.h @@ -55,7 +55,7 @@ #define AL_DEVICE_NAME L"\\Device\\ibal" #define ALDEV_KEY (0x3B) /* Matches FILE_DEVICE_INFINIBAND from wdm.h */ -#define AL_IOCTL_VERSION (2) +#define AL_IOCTL_VERSION (3) #ifdef CL_KERNEL @@ -192,6 +192,10 @@ typedef enum _al_verbs_ops ual_query_av_ioctl_cmd, ual_modify_av_ioctl_cmd, ual_destroy_av_ioctl_cmd, + ual_create_srq_ioctl_cmd, + ual_query_srq_ioctl_cmd, + ual_modify_srq_ioctl_cmd, + ual_destroy_srq_ioctl_cmd, ual_create_qp_ioctl_cmd, ual_query_qp_ioctl_cmd, ual_modify_qp_ioctl_cmd, @@ -211,6 +215,7 @@ typedef enum _al_verbs_ops ual_destroy_mw_ioctl_cmd, ual_post_send_ioctl_cmd, ual_post_recv_ioctl_cmd, + ual_post_srq_recv_ioctl_cmd, ual_peek_cq_ioctl_cmd, ual_poll_cq_ioctl_cmd, ual_rearm_cq_ioctl_cmd, @@ -386,6 +391,10 @@ typedef enum _al_dev_ops #define UAL_QUERY_AV IOCTL_CODE(ALDEV_KEY, ual_query_av_ioctl_cmd) #define UAL_MODIFY_AV IOCTL_CODE(ALDEV_KEY, ual_modify_av_ioctl_cmd) #define UAL_DESTROY_AV IOCTL_CODE(ALDEV_KEY, ual_destroy_av_ioctl_cmd) +#define UAL_CREATE_SRQ IOCTL_CODE(ALDEV_KEY, ual_create_srq_ioctl_cmd) +#define UAL_QUERY_SRQ IOCTL_CODE(ALDEV_KEY, ual_query_srq_ioctl_cmd) +#define UAL_MODIFY_SRQ IOCTL_CODE(ALDEV_KEY, ual_modify_srq_ioctl_cmd) +#define UAL_DESTROY_SRQ IOCTL_CODE(ALDEV_KEY, ual_destroy_srq_ioctl_cmd) #define UAL_CREATE_QP IOCTL_CODE(ALDEV_KEY, ual_create_qp_ioctl_cmd) #define UAL_QUERY_QP IOCTL_CODE(ALDEV_KEY, ual_query_qp_ioctl_cmd) #define UAL_MODIFY_QP IOCTL_CODE(ALDEV_KEY, ual_modify_qp_ioctl_cmd) @@ -405,6 +414,7 @@ typedef enum _al_dev_ops #define UAL_DESTROY_MW IOCTL_CODE(ALDEV_KEY, ual_destroy_mw_ioctl_cmd) #define UAL_POST_SEND IOCTL_CODE(ALDEV_KEY, ual_post_send_ioctl_cmd) #define UAL_POST_RECV IOCTL_CODE(ALDEV_KEY, ual_post_recv_ioctl_cmd) +#define UAL_POST_SRQ_RECV IOCTL_CODE(ALDEV_KEY, ual_post_srq_recv_ioctl_cmd) #define UAL_PEEK_CQ IOCTL_CODE(ALDEV_KEY, ual_peek_cq_ioctl_cmd) #define UAL_POLL_CQ IOCTL_CODE(ALDEV_KEY, ual_poll_cq_ioctl_cmd) #define UAL_REARM_CQ IOCTL_CODE(ALDEV_KEY, ual_rearm_cq_ioctl_cmd) diff --git a/trunk/core/al/al_pd.c b/trunk/core/al/al_pd.c index 16c426cb..390f6486 100644 --- a/trunk/core/al/al_pd.c +++ b/trunk/core/al/al_pd.c @@ -48,6 +48,7 @@ #include "al_mw.h" #include "al_pd.h" #include "al_qp.h" +#include "al_srq.h" #include "al_verbs.h" #include "ib_common.h" @@ -259,6 +260,60 @@ free_pd( cl_free( h_pd ); } +ib_api_status_t +ib_create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + if( !p_srq_attr || !ph_srq) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( !p_srq_attr->max_wr) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + + if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr) + { + if (p_srq_attr->max_wr > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + if (p_srq_attr->max_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_sges) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") ); + return IB_INVALID_MAX_SGE; + } + } + + status = create_srq( + h_pd, p_srq_attr, srq_context, pfn_srq_event_cb, ph_srq, NULL ); + + /* Release the reference taken in init_al_obj (init_base_srq). */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_srq)->obj ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} ib_api_status_t diff --git a/trunk/core/al/al_proxy_ioctl.h b/trunk/core/al/al_proxy_ioctl.h index 4c9af8c1..3453aaea 100644 --- a/trunk/core/al/al_proxy_ioctl.h +++ b/trunk/core/al/al_proxy_ioctl.h @@ -59,6 +59,7 @@ typedef enum _misc_cb_rec_type { CA_ERROR_REC, QP_ERROR_REC, + SRQ_ERROR_REC, CQ_ERROR_REC, MCAST_REC, MAD_SEND_REC, diff --git a/trunk/core/al/al_qp.c b/trunk/core/al/al_qp.c index 9ff407ce..69aa89b4 100644 --- a/trunk/core/al/al_qp.c +++ b/trunk/core/al/al_qp.c @@ -294,6 +294,13 @@ create_qp( return IB_INVALID_PARAMETER; } + if (p_qp_create->h_srq && + AL_OBJ_INVALID_HANDLE( p_qp_create->h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + /* Allocate a QP. */ status = alloc_qp( p_qp_create->qp_type, &h_qp ); if( status != IB_SUCCESS ) @@ -721,6 +728,11 @@ init_raw_qp( cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel ); cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel ); + h_qp->h_srq = p_qp_create->h_srq; + h_qp->srq_rel.p_child_obj = (cl_obj_t*)h_qp; + if (h_qp->h_srq) + srq_attach_qp( h_qp->h_srq, &h_qp->srq_rel ); + h_qp->num = qp_attr.num; return IB_SUCCESS; @@ -1118,6 +1130,8 @@ destroying_qp( /* Multicast membership gets cleaned up by object hierarchy. */ cq_detach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel ); cq_detach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel ); + if (h_qp->h_srq) + srq_detach_qp( h_qp->h_srq, &h_qp->srq_rel ); } } @@ -1210,6 +1224,8 @@ cleanup_qp( deref_al_obj( &h_qp->h_recv_cq->obj ); if( h_qp->h_send_cq ) deref_al_obj( &h_qp->h_send_cq->obj ); + if( h_qp->h_srq ) + deref_al_obj( &h_qp->h_srq->obj ); } } @@ -1272,6 +1288,7 @@ query_qp( p_qp_attr->h_rq_cq = h_qp->h_recv_cq; p_qp_attr->h_sq_cq = h_qp->h_send_cq; p_qp_attr->qp_type = h_qp->type; + p_qp_attr->h_srq = h_qp->h_srq; AL_EXIT( AL_DBG_QP ); return IB_SUCCESS; diff --git a/trunk/core/al/al_qp.h b/trunk/core/al/al_qp.h index a8c3c581..8b18d580 100644 --- a/trunk/core/al/al_qp.h +++ b/trunk/core/al/al_qp.h @@ -137,6 +137,9 @@ typedef struct _ib_qp cl_obj_rel_t recv_cq_rel; cl_obj_rel_t send_cq_rel; + ib_srq_handle_t h_srq; + cl_obj_rel_t srq_rel; + ib_pfn_event_cb_t pfn_event_cb; ib_pfn_modify_qp_t pfn_modify_qp; diff --git a/trunk/core/al/al_srq.c b/trunk/core/al/al_srq.c new file mode 100644 index 00000000..94cbcfe1 --- /dev/null +++ b/trunk/core/al/al_srq.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_qp.c 1611 2006-08-20 14:48:55Z leonid $ + */ + +#include +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_srq.tmh" +#endif +#include "al_mgr.h" +#include "al_mr.h" +#include "al_pd.h" +#include "al_srq.h" +#include "al_verbs.h" + +#include "ib_common.h" + +/* + * Function prototypes. + */ +void +destroying_srq( + IN struct _al_obj *p_obj ); + +void +cleanup_srq( + IN al_obj_t *p_obj ); + +void +free_srq( + IN al_obj_t *p_obj ); + + +ib_destroy_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + + /* Don't destroy while there are bound QPs. */ + cl_spinlock_acquire( &h_srq->obj.lock ); + if (!cl_is_qlist_empty( &h_srq->qp_list )) + { + cl_spinlock_release( &h_srq->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_RESOURCE_BUSY\n") ); + return IB_RESOURCE_BUSY; + } + cl_spinlock_release( &h_srq->obj.lock ); + + ref_al_obj( &h_srq->obj ); + h_srq->obj.pfn_destroy( &h_srq->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_SRQ ); + return IB_SUCCESS; +} + + +void +destroying_srq( + IN struct _al_obj *p_obj ) +{ + ib_srq_handle_t h_srq; + cl_list_item_t *p_item; + cl_obj_rel_t *p_rel; + ib_qp_handle_t h_qp; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + /* Initiate destruction of all bound QPs. */ + cl_spinlock_acquire( &h_srq->obj.lock ); + for( p_item = cl_qlist_remove_tail( &h_srq->qp_list ); + p_item != cl_qlist_end( &h_srq->qp_list ); + p_item = cl_qlist_remove_tail( &h_srq->qp_list ) ) + { + p_rel = PARENT_STRUCT( p_item, cl_obj_rel_t, pool_item.list_item ); + p_rel->p_parent_obj = NULL; + h_qp = (ib_qp_handle_t)p_rel->p_child_obj; + if( h_qp ) + { + /* Take a reference to prevent the QP from being destroyed. */ + ref_al_obj( &h_qp->obj ); + cl_spinlock_release( &h_srq->obj.lock ); + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + cl_spinlock_acquire( &h_srq->obj.lock ); + } + } + cl_spinlock_release( &h_srq->obj.lock ); +} + +void +cleanup_srq( + IN struct _al_obj *p_obj ) +{ + ib_srq_handle_t h_srq; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + /* Deallocate the CI srq. */ + if( verbs_check_srq( h_srq ) ) + { + status = verbs_destroy_srq( h_srq ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + +/* + * Release all resources associated with the completion queue. + */ +void +free_srq( + IN al_obj_t *p_obj ) +{ + ib_srq_handle_t h_srq; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + destroy_al_obj( &h_srq->obj ); + cl_free( h_srq ); +} + + +void +srq_attach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + p_qp_rel->p_parent_obj = (cl_obj_t*)h_srq; + ref_al_obj( &h_srq->obj ); + cl_spinlock_acquire( &h_srq->obj.lock ); + cl_qlist_insert_tail( &h_srq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_srq->obj.lock ); +} + + +void +srq_detach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + if( p_qp_rel->p_parent_obj ) + { + CL_ASSERT( p_qp_rel->p_parent_obj == (cl_obj_t*)h_srq ); + p_qp_rel->p_parent_obj = NULL; + cl_spinlock_acquire( &h_srq->obj.lock ); + cl_qlist_remove_item( &h_srq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_srq->obj.lock ); + } +} + + +ib_api_status_t +ib_modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ) +{ + return modify_srq( h_srq, p_srq_attr, srq_attr_mask, NULL ); +} + + +ib_api_status_t +modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + + if( !p_srq_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( !( srq_attr_mask & (IB_SRQ_MAX_WR |IB_SRQ_LIMIT)) || + ( srq_attr_mask & ~(IB_SRQ_MAX_WR |IB_SRQ_LIMIT))) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_SETTING; + } + + if((srq_attr_mask & IB_SRQ_LIMIT) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr ) + { + if (p_srq_attr->srq_limit > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + } + + if((srq_attr_mask & IB_SRQ_MAX_WR) && !p_srq_attr->max_wr) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + + if ((srq_attr_mask & IB_SRQ_MAX_WR) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr) + { + if (p_srq_attr->max_wr > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + } + + status = verbs_modify_srq( h_srq, p_srq_attr, srq_attr_mask ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + +ib_api_status_t +ib_query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ) +{ + return query_srq( h_srq, p_srq_attr, NULL ); +} + + + +ib_api_status_t +query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + if( !p_srq_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_srq( h_srq, p_srq_attr ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +/* + * Initializes the QP information structure. + */ +ib_api_status_t +create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb, + OUT ib_srq_handle_t* const ph_srq, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_srq_handle_t h_srq; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_SRQ; + + h_srq = cl_zalloc( sizeof( ib_srq_t ) ); + if( !h_srq ) + { + return IB_INSUFFICIENT_MEMORY; + } + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + /* Construct the SRQ. */ + construct_al_obj( &h_srq->obj, obj_type ); + + cl_qlist_init( &h_srq->qp_list ); + h_srq->pfn_event_cb = pfn_srq_event_cb; + + /* Initialize the SRQ. */ + status = init_al_obj( &h_srq->obj, srq_context, TRUE, + destroying_srq, cleanup_srq, free_srq ); + if( status != IB_SUCCESS ) + { + free_srq( &h_srq->obj ); + return status; + } + status = attach_al_obj( &h_pd->obj, &h_srq->obj ); + if( status != IB_SUCCESS ) + { + h_srq->obj.pfn_destroy( &h_srq->obj, NULL ); + return status; + } + + status = verbs_create_srq( h_pd, h_srq, p_srq_attr, p_umv_buf ); + if( status != IB_SUCCESS ) + { + h_srq->obj.pfn_destroy( &h_srq->obj, NULL ); + return status; + } + + *ph_srq = h_srq; + + /* + * Note that we don't release the reference taken in init_al_obj here. + * For kernel clients, it is release in ib_create_srq. For user-mode + * clients is released by the proxy after the handle is extracted. + */ + return IB_SUCCESS; +} + + +/* + * Process an asynchronous event on the QP. Notify the user of the event. + */ +void +srq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ) +{ + ib_srq_handle_t h_srq; + + CL_ASSERT( p_event_rec ); + h_srq = (ib_srq_handle_t)p_event_rec->context; + +#if defined(CL_KERNEL) + switch( p_event_rec->code ) + { + case IB_AE_SRQ_LIMIT_REACHED: + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, + ("IB_AE_SRQ_LIMIT_REACHED for srq %p \n", h_srq) ); + //TODO: handle this error. + break; + case IB_AE_SRQ_CATAS_ERROR: + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, + ("IB_AE_SRQ_CATAS_ERROR for srq %p \n", h_srq) ); + //TODO: handle this error. + break; + default: + break; + } +#endif + + p_event_rec->context = (void*)h_srq->obj.context; + p_event_rec->handle.h_srq = h_srq; + + if( h_srq->pfn_event_cb ) + h_srq->pfn_event_cb( p_event_rec ); +} + +ib_api_status_t +ib_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = + h_srq->pfn_post_srq_recv( h_srq->h_recv_srq, p_recv_wr, pp_recv_failure ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + diff --git a/trunk/core/al/al_srq.h b/trunk/core/al/al_srq.h new file mode 100644 index 00000000..28ad8187 --- /dev/null +++ b/trunk/core/al/al_srq.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_srq.h 1611 2006-08-20 14:48:55Z leonid $ + */ + +#if !defined(__AL_SRQ_H__) +#define __AL_SRQ_H__ + +#include +#include +#include +#include + +#include "al_ca.h" +#include "al_common.h" + + +typedef ib_api_status_t +(*ib_pfn_post_srq_recv_t)( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + IN ib_recv_wr_t **p_recv_failure OPTIONAL ); + + +/* + * Shared queue pair information required by the access layer. This structure + * is referenced by a user's SRQ handle. + */ +typedef struct _ib_srq +{ + al_obj_t obj; /* Must be first. */ + + ib_srq_handle_t h_ci_srq; /* kernel SRQ handle */ + ib_pfn_post_srq_recv_t pfn_post_srq_recv; /* post_srq_recv call */ + ib_srq_handle_t h_recv_srq; /* srq handle for the post_srq_recv call */ + ib_pfn_event_cb_t pfn_event_cb; /* user async event handler */ + cl_qlist_t qp_list; /* List of QPs bound to this CQ. */ + +} ib_srq_t; + +ib_api_status_t +create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb, + OUT ib_srq_handle_t* const ph_srq, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +void +srq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ); + +void +srq_attach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ); + +void +srq_detach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ); + +#endif /* __AL_QP_H__ */ + diff --git a/trunk/core/al/al_verbs.h b/trunk/core/al/al_verbs.h index f0b67fb6..fe499377 100644 --- a/trunk/core/al/al_verbs.h +++ b/trunk/core/al/al_verbs.h @@ -37,6 +37,7 @@ #include "al_cq.h" #include "al_pd.h" #include "al_qp.h" +#include "al_srq.h" #ifndef CL_KERNEL #include "ual_mad.h" @@ -230,14 +231,50 @@ deallocate_pd_alias( #define verbs_deallocate_pd(h_pd) \ h_pd->obj.p_ci_ca->verbs.deallocate_pd( h_pd->h_ci_pd ) +static inline ib_api_status_t +verbs_create_srq( + IN const ib_pd_handle_t h_pd, + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + status = h_srq->obj.p_ci_ca->verbs.create_srq( + h_pd->h_ci_pd, h_srq, p_srq_attr, + &h_srq->h_ci_srq, p_umv_buf ); + + h_srq->h_recv_srq = h_srq->h_ci_srq; + h_srq->pfn_post_srq_recv = h_srq->obj.p_ci_ca->verbs.post_srq_recv; + return status; +} + +#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq) + +#define verbs_destroy_srq(h_srq) \ + h_srq->obj.p_ci_ca->verbs.destroy_srq( h_srq->h_ci_srq ) + +#define verbs_query_srq(h_srq, p_srq_attr) \ + h_srq->obj.p_ci_ca->verbs.query_srq( h_srq->h_ci_srq,\ + p_srq_attr, p_umv_buf ) + +#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \ + h_srq->obj.p_ci_ca->verbs.modify_srq( h_srq->h_ci_srq,\ + p_srq_attr, srq_attr_mask, p_umv_buf ) + +#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \ + h_srq->obj.p_ci_ca->verbs.post_srq_recv( h_srq->h_ci_srq,\ + p_recv_wr, pp_recv_failure ) + #define convert_qp_handle( qp_create ) {\ CL_ASSERT( qp_create.h_rq_cq ); \ qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; \ CL_ASSERT( qp_create.h_sq_cq ); \ qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; \ + if (qp_create.h_srq) \ + qp_create.h_srq = qp_create.h_srq->h_ci_srq; \ } - static inline ib_api_status_t verbs_get_spl_qp( IN ib_pd_handle_t h_pd, @@ -283,7 +320,6 @@ verbs_create_qp( return status; } - #define verbs_check_qp(h_qp) ((h_qp)->h_ci_qp) #define verbs_destroy_qp(h_qp) \ h_qp->obj.p_ci_ca->verbs.destroy_qp( h_qp->h_ci_qp, h_qp->timewait ) @@ -507,6 +543,27 @@ allocate_pd_alias( #define verbs_deallocate_pd(h_pd) \ ual_deallocate_pd(h_pd) +#define verbs_create_srq(h_pd, h_srq, p_srq_attr, p_umv_buf) \ + ual_create_srq (h_pd, h_srq, p_srq_attr); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq || (h_srq)->obj.hdl) + +#define verbs_destroy_srq(h_srq) \ + ual_destroy_srq(h_srq) + +#define verbs_query_srq(h_srq, p_srq_attr) \ + ual_query_srq(h_srq, p_srq_attr); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \ + ual_modify_srq(h_srq, p_srq_attr, srq_attr_mask); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \ + ual_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) + + /* For user-mode, handle conversion is done in ual files */ #define convert_qp_handle( qp_create ) diff --git a/trunk/core/al/ib_statustext.c b/trunk/core/al/ib_statustext.c index 6aad3bb5..5740dffa 100644 --- a/trunk/core/al/ib_statustext.c +++ b/trunk/core/al/ib_statustext.c @@ -70,6 +70,7 @@ static const char* const __ib_error_str[] = "IB_INVALID_MAX_WRS", "IB_INVALID_MAX_SGE", "IB_INVALID_CQ_SIZE", + "IB_INVALID_SRQ_SIZE", "IB_INVALID_SERVICE_TYPE", "IB_INVALID_GID", "IB_INVALID_LID", @@ -78,6 +79,7 @@ static const char* const __ib_error_str[] = "IB_INVALID_AV_HANDLE", "IB_INVALID_CQ_HANDLE", "IB_INVALID_QP_HANDLE", + "IB_INVALID_SRQ_HANDLE", "IB_INVALID_PD_HANDLE", "IB_INVALID_MR_HANDLE", "IB_INVALID_FMR_HANDLE", @@ -87,7 +89,7 @@ static const char* const __ib_error_str[] = "IB_INVALID_AL_HANDLE", "IB_INVALID_HANDLE", "IB_ERROR", - "IB_REMOTE_ERROR", /* Infiniband Access Layer */ + "IB_REMOTE_ERROR", "IB_VERBS_PROCESSING_DONE", "IB_INVALID_WR_TYPE", "IB_QP_IN_TIMEWAIT", @@ -228,3 +230,31 @@ ib_get_wr_type_str( return( __ib_wr_type_str[wr_type] ); } +static const char* const __ib_qp_type_str[] = +{ + "IB_QPT_RELIABLE_CONN" + "IB_QPT_UNRELIABLE_CONN", + "IB_QPT_UNKNOWN", + "IB_QPT_UNRELIABLE_DGRM", + "IB_QPT_QP0", + "IB_QPT_QP1", + "IB_QPT_RAW_IPV6", + "IB_QPT_RAW_ETHER", + "IB_QPT_MAD", + "IB_QPT_QP0_ALIAS", + "IB_QPT_QP1_ALIAS", + "IB_QPT_UNKNOWN" + +}; + + +const char* +ib_get_qp_type_str( + IN uint8_t qp_type ) +{ + if( qp_type > IB_QPT_UNKNOWN ) + qp_type = IB_QPT_UNKNOWN; + return( __ib_qp_type_str[qp_type] ); +} + + diff --git a/trunk/core/al/kernel/SOURCES b/trunk/core/al/kernel/SOURCES index 1ca593f1..3ff56ac8 100644 --- a/trunk/core/al/kernel/SOURCES +++ b/trunk/core/al/kernel/SOURCES @@ -49,6 +49,7 @@ SOURCES= ibal.rc \ ..\al_query.c \ ..\al_reg_svc.c \ ..\al_res_mgr.c \ + ..\al_srq.c \ ..\al_sub.c \ ..\ib_common.c \ ..\ib_statustext.c diff --git a/trunk/core/al/kernel/al_mgr.c b/trunk/core/al/kernel/al_mgr.c index 5203c9a0..6babc91e 100644 --- a/trunk/core/al/kernel/al_mgr.c +++ b/trunk/core/al/kernel/al_mgr.c @@ -197,6 +197,10 @@ create_al_mgr() } /* Initialize the AL device management agent. */ + +/* + Disable support of DM agent. + status = create_dm_agent( &gp_al_mgr->obj ); if( status != IB_SUCCESS ) { @@ -205,7 +209,7 @@ create_al_mgr() ("create_dm_agent failed, status = 0x%x.\n", status) ); return status; } - +*/ status = create_ioc_pnp( &gp_al_mgr->obj ); if( status != IB_SUCCESS ) { @@ -530,7 +534,8 @@ al_hdl_ref( if( type == AL_OBJ_TYPE_UNKNOWN && p_h->type != AL_OBJ_TYPE_H_PD && p_h->type != AL_OBJ_TYPE_H_CQ && p_h->type != AL_OBJ_TYPE_H_AV && p_h->type != AL_OBJ_TYPE_H_QP && - p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW ) + p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW && + p_h->type != AL_OBJ_TYPE_H_SRQ ) { cl_spinlock_release( &h_al->obj.lock ); return NULL; diff --git a/trunk/core/al/kernel/al_proxy_verbs.c b/trunk/core/al/kernel/al_proxy_verbs.c index 2a5e0cd3..5bf7db8f 100644 --- a/trunk/core/al/kernel/al_proxy_verbs.c +++ b/trunk/core/al/kernel/al_proxy_verbs.c @@ -49,6 +49,7 @@ #include "al_ca.h" #include "al_pd.h" #include "al_qp.h" +#include "al_srq.h" #include "al_cq.h" #include "al_mr.h" #include "al_mw.h" @@ -961,6 +962,309 @@ proxy_dealloc_pd( } +/* + * Proxy's SRQ error handler + */ +static void +proxy_srq_err_cb( + IN ib_async_event_rec_t *p_err_rec ) +{ + ib_srq_handle_t h_srq = p_err_rec->handle.h_srq; + al_dev_open_context_t *p_context = h_srq->obj.h_al->p_context; + misc_cb_ioctl_info_t cb_info; + + AL_ENTER( AL_DBG_QP ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = SRQ_ERROR_REC; + /* Return the Proxy's SRQ handle and the user's context */ + cb_info.ioctl_rec.event_rec = *p_err_rec; + cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t)h_srq->obj.hdl; + + /* The proxy handle must be valid now. */ + if( !h_srq->obj.hdl_valid ) + h_srq->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( + UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_srq->obj ); + + proxy_context_deref( p_context ); + + AL_EXIT( AL_DBG_QP ); +} + +/* + * Process the ioctl UAL_CREATE_SRQ + * + * Returns the srq_list_obj as the handle to UAL + */ +static cl_status_t +proxy_create_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_srq_ioctl_t *p_ioctl = + (ual_create_srq_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + ib_pfn_event_cb_t pfn_ev; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate handles. */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_create_srq_err1; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_srq_err1; + + if( p_ioctl->in.ev_notify ) + pfn_ev = proxy_srq_err_cb; + else + pfn_ev = NULL; + + status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context, + pfn_ev, &h_srq, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_srq_err1; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_srq = h_srq->obj.hdl; + h_srq->obj.hdl_valid = TRUE; + /* Release the reference taken in create_srq (by init_al_obj) */ + deref_al_obj( &h_srq->obj ); + } + else + { +proxy_create_srq_err1: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_srq = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_QUERY_SRQ: + */ +static +cl_status_t +proxy_query_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_srq_ioctl_t *p_ioctl = + (ual_query_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_query_srq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_srq_err; + + status = query_srq( h_srq, &p_ioctl->out.srq_attr, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_srq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_query_srq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + cl_memclr( &p_ioctl->out.srq_attr, sizeof(ib_srq_attr_t) ); + } + free_umvbuf( p_umv_buf ); + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_SRQ: + */ +static +cl_status_t +proxy_modify_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_srq_ioctl_t *p_ioctl = + (ual_modify_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_modify_srq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_srq_err; + + status = modify_srq( h_srq, &p_ioctl->in.srq_attr, p_ioctl->in.srq_attr_mask, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_modify_srq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_modify_srq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + } + free_umvbuf( p_umv_buf ); + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_DESTROY_SRQ + */ +static cl_status_t +proxy_destroy_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_srq_ioctl_t *p_ioctl = + (ual_destroy_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + p_ioctl->out.status = IB_INVALID_SRQ_HANDLE; + } + else + { + h_srq->obj.pfn_destroy( &h_srq->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + } + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + /* * Proxy's QP error handler @@ -1004,6 +1308,7 @@ proxy_qp_err_cb( } + /* * Process the ioctl UAL_CREATE_QP * @@ -1021,6 +1326,7 @@ proxy_create_qp( (al_dev_open_context_t *)p_open_context; ib_pd_handle_t h_pd; ib_qp_handle_t h_qp; + ib_srq_handle_t h_srq = NULL; ib_cq_handle_t h_sq_cq, h_rq_cq; ci_umv_buf_t *p_umv_buf = NULL; ib_api_status_t status; @@ -1044,6 +1350,15 @@ proxy_create_qp( (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ ); h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ ); + if (p_ioctl->in.qp_create.h_srq) { + h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_create_qp_err1; + } + } if( !h_pd) { status = IB_INVALID_PD_HANDLE; @@ -1064,6 +1379,8 @@ proxy_create_qp( p_ioctl->in.qp_create.h_sq_cq = h_sq_cq; /* Substitute rq_cq handle with AL's cq handle */ p_ioctl->in.qp_create.h_rq_cq = h_rq_cq; + /* Substitute srq handle with AL's srq handle */ + p_ioctl->in.qp_create.h_srq = h_srq; status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); if( status != IB_SUCCESS ) @@ -1116,6 +1433,8 @@ proxy_create_qp_err1: deref_al_obj( &h_rq_cq->obj ); if( h_sq_cq ) deref_al_obj( &h_sq_cq->obj ); + if( h_srq ) + deref_al_obj( &h_srq->obj ); AL_EXIT( AL_DBG_QP ); return CL_SUCCESS; @@ -1198,6 +1517,15 @@ proxy_query_qp( { p_ioctl->out.attr.h_rq_cq = NULL; } + if( p_ioctl->out.attr.h_srq ) + { + p_ioctl->out.attr.h_srq = + (ib_srq_handle_t)p_ioctl->out.attr.h_srq->obj.hdl; + } + else + { + p_ioctl->out.attr.h_srq = NULL; + } } else { @@ -2247,6 +2575,126 @@ proxy_post_recv_done: } +/* + * Process the ioctl UAL_POST_SRQ_RECV + */ +static +cl_status_t +proxy_post_srq_recv( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_post_srq_recv_ioctl_t *p_ioctl = + (ual_post_srq_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ib_recv_wr_t *p_wr; + ib_recv_wr_t *p_recv_failure; + uintn_t i; + ib_local_ds_t *p_ds; + uintn_t num_ds = 0; + ib_api_status_t status; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* + * Additional input buffer validation based on actual settings. + * Note that this validates that work requests are actually + * being passed in. + */ + in_buf_sz = sizeof(p_ioctl->in); + in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1); + in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds; + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Setup p_send_failure to head of list. */ + p_recv_failure = p_wr = p_ioctl->in.recv_wr; + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_QP ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_post_recv_done; + } + + /* Setup the base data segment pointer. */ + p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr]; + + /* Setup the user's work requests and data segments and translate. */ + for( i = 0; i < p_ioctl->in.num_wr; i++ ) + { + /* Setup the data segments, if any. */ + if( p_wr[i].num_ds ) + { + num_ds += p_wr[i].num_ds; + if( num_ds > p_ioctl->in.num_ds ) + { + /* + * The work request submitted exceed the number of data + * segments specified in the IOCTL. + */ + status = IB_INVALID_PARAMETER; + goto proxy_post_recv_done; + } + p_wr[i].ds_array = p_ds; + p_ds += p_wr->num_ds; + } + else + { + p_wr[i].ds_array = NULL; + } + + p_wr[i].p_next = &p_wr[i + 1]; + } + + /* Mark the end of list. */ + p_wr[i-1].p_next = NULL; + + status = ib_post_srq_recv( h_srq, p_wr, &p_recv_failure ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.failed_cnt = 0; + } + else + { +proxy_post_recv_done: + /* First set up as if all failed. */ + p_ioctl->out.failed_cnt = p_ioctl->in.num_wr; + /* Now subtract successful ones. */ + p_ioctl->out.failed_cnt -= (uint32_t)( + (((uintn_t)p_recv_failure) - ((uintn_t)p_wr)) + / sizeof(ib_recv_wr_t)); + } + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + /* * Process the ioctl UAL_PEEK_CQ @@ -3383,6 +3831,21 @@ verbs_ioctl( case UAL_MODIFY_AV: cl_status = proxy_modify_av( p_context, h_ioctl, p_ret_bytes ); break; + case UAL_CREATE_SRQ: + cl_status = proxy_create_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_SRQ: + cl_status = proxy_query_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_SRQ: + cl_status = proxy_modify_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_SRQ: + cl_status = proxy_destroy_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_POST_SRQ_RECV: + cl_status = proxy_post_srq_recv( p_context, h_ioctl, p_ret_bytes ); + break; case UAL_CREATE_QP: cl_status = proxy_create_qp( p_context, h_ioctl, p_ret_bytes ); break; diff --git a/trunk/core/al/user/SOURCES b/trunk/core/al/user/SOURCES index 4dbe0db3..bd1cfbf8 100644 --- a/trunk/core/al/user/SOURCES +++ b/trunk/core/al/user/SOURCES @@ -43,6 +43,7 @@ SOURCES=\ ual_query.c \ ual_reg_svc.c \ ual_sa_req.c \ + ual_srq.c \ ual_sub.c \ ..\al.c \ ..\al_av.c \ @@ -63,6 +64,7 @@ SOURCES=\ ..\al_query.c \ ..\al_reg_svc.c \ ..\al_res_mgr.c \ + ..\al_srq.c \ ..\al_sub.c \ ..\ib_common.c \ ..\ib_statustext.c diff --git a/trunk/core/al/user/ual_ci_ca.h b/trunk/core/al/user/ual_ci_ca.h index ea702193..87dc7e4d 100644 --- a/trunk/core/al/user/ual_ci_ca.h +++ b/trunk/core/al/user/ual_ci_ca.h @@ -112,6 +112,27 @@ ual_query_av( OUT ib_av_attr_t* const p_av_attr, OUT ib_pd_handle_t* const ph_pd ); +ib_api_status_t +ual_create_srq( + IN const ib_pd_handle_t h_pd, + IN OUT ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr); + +ib_api_status_t +ual_modify_srq( + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); + +ib_api_status_t +ual_query_srq( + IN ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* p_srq_attr ); + +ib_api_status_t +ual_destroy_srq( + IN ib_srq_handle_t h_srq ); + ib_api_status_t ual_create_qp( IN const ib_pd_handle_t h_pd, @@ -225,6 +246,12 @@ ual_post_recv( IN ib_recv_wr_t* const p_recv_wr, OUT ib_recv_wr_t **pp_recv_failure ); +ib_api_status_t +ual_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure ); + ib_api_status_t ual_peek_cq( IN const ib_cq_handle_t h_cq, diff --git a/trunk/core/al/user/ual_mgr.c b/trunk/core/al/user/ual_mgr.c index 532e1775..2abf6a22 100644 --- a/trunk/core/al/user/ual_mgr.c +++ b/trunk/core/al/user/ual_mgr.c @@ -802,6 +802,7 @@ __process_misc_cb( { case CA_ERROR_REC: case QP_ERROR_REC: + case SRQ_ERROR_REC: case CQ_ERROR_REC: { /* Initiate user-mode asynchronous event processing. */ @@ -952,6 +953,7 @@ __process_misc_cb( uintn_t bytes_ret; cl_status_t cl_status; ib_ca_attr_t *p_old_ca_attr; + ib_api_status_t status; pnp_event = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_event; ca_guid = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid; @@ -1004,8 +1006,13 @@ __process_misc_cb( ref_al_obj( &p_ci_ca->obj ); cl_spinlock_release( &gp_al_mgr->obj.lock ); - ci_ca_update_attr( p_ci_ca, &p_old_ca_attr ); - if( p_old_ca_attr ) + status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr ); + if( status != IB_SUCCESS) { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("update CA attributes returned %#x.\n", status) ); + break; + } + if ( p_old_ca_attr ) cl_free( p_old_ca_attr ); /* diff --git a/trunk/core/al/user/ual_qp.c b/trunk/core/al/user/ual_qp.c index c8a409f2..0ff64866 100644 --- a/trunk/core/al/user/ual_qp.c +++ b/trunk/core/al/user/ual_qp.c @@ -37,6 +37,7 @@ #include "al_cq.h" #include "al_pd.h" #include "al_qp.h" +#include "al_srq.h" #include "ual_mad.h" #include "ual_support.h" @@ -294,6 +295,8 @@ ual_create_qp( qp_create = *p_qp_create; qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; + if (qp_create.h_srq) + qp_create.h_srq = qp_create.h_srq->h_ci_srq; status = uvp_intf.pre_create_qp( h_pd->h_ci_pd, &qp_create, &qp_ioctl.in.umv_buf ); if( status != IB_SUCCESS ) @@ -312,6 +315,9 @@ ual_create_qp( (ib_cq_handle_t)p_qp_create->h_rq_cq->obj.hdl; qp_ioctl.in.qp_create.h_sq_cq = (ib_cq_handle_t)p_qp_create->h_sq_cq->obj.hdl; + if (p_qp_create->h_srq) + qp_ioctl.in.qp_create.h_srq = + (ib_srq_handle_t)p_qp_create->h_srq->obj.hdl; qp_ioctl.in.context = h_qp; qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE; @@ -329,6 +335,12 @@ ual_create_qp( else { status = qp_ioctl.out.status; + + if( status == IB_SUCCESS ) + { + h_qp->obj.hdl = qp_ioctl.out.h_qp; + *p_qp_attr = qp_ioctl.out.attr; + } } /* Post uvp call */ @@ -367,11 +379,6 @@ ual_create_qp( h_qp->pfn_post_send = ual_post_send; } - if( status == IB_SUCCESS ) - { - h_qp->obj.hdl = qp_ioctl.out.h_qp; - *p_qp_attr = qp_ioctl.out.attr; - } AL_EXIT( AL_DBG_QP ); return status; @@ -554,6 +561,8 @@ ual_query_qp( p_attr->h_rq_cq = h_qp->h_recv_cq->h_ci_cq; if( h_qp->h_send_cq ) p_attr->h_sq_cq = h_qp->h_send_cq->h_ci_cq; + if( h_qp->h_srq ) + p_attr->h_srq = h_qp->h_srq->h_ci_srq; /* Post uvp call */ if( h_qp->h_ci_qp && uvp_intf.post_query_qp ) diff --git a/trunk/core/al/user/ual_srq.c b/trunk/core/al/user/ual_srq.c new file mode 100644 index 00000000..a575b7e9 --- /dev/null +++ b/trunk/core/al/user/ual_srq.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ual_qp.c 1611 2006-08-20 14:48:55Z sleybo $ + */ + + +#include "al.h" +#include "al_av.h" +#include "al_ci_ca.h" +#include "al_cq.h" +#include "al_pd.h" +#include "al_srq.h" +#include "ual_mad.h" +#include "ual_support.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_srq.tmh" +#endif + + +ib_api_status_t +ual_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + uintn_t failed_index; + uintn_t bytes_ret; + uint32_t num_wr = 0; + uint32_t num_ds = 0; + ib_recv_wr_t* p_wr; + ib_local_ds_t* p_ds; + ual_post_srq_recv_ioctl_t *p_srq_ioctl; + size_t ioctl_buf_sz; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* + * Since the work request is a link list and we need to pass this + * to the kernel as a array of work requests. So first walk through + * the list and find out how much memory we need to allocate. + */ + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + num_wr++; + + /* Check for overflow */ + if( !num_wr ) + break; + if( num_ds > num_ds + p_wr->num_ds ) + { + num_wr = 0; + break; + } + + num_ds += p_wr->num_ds; + } + if( !num_wr ) + { + AL_EXIT( AL_DBG_SRQ ); + return IB_INVALID_PARAMETER; + } + + ioctl_buf_sz = sizeof(ual_post_recv_ioctl_t); + ioctl_buf_sz += sizeof(ib_recv_wr_t) * (num_wr - 1); + ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds; + + p_srq_ioctl = (ual_post_srq_recv_ioctl_t*)cl_zalloc( ioctl_buf_sz ); + if( !p_srq_ioctl ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to allocate IOCTL buffer.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + p_ds = (ib_local_ds_t*)&p_srq_ioctl->in.recv_wr[num_wr]; + + /* Now populate the ioctl buffer and send down the ioctl */ + p_srq_ioctl->in.h_srq = h_srq->obj.hdl; + p_srq_ioctl->in.num_wr = num_wr; + p_srq_ioctl->in.num_ds = num_ds; + num_wr = 0; + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + p_srq_ioctl->in.recv_wr[num_wr++] = *p_wr; + cl_memcpy( + p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds ); + p_ds += p_wr->num_ds; + } + + cl_status = do_al_dev_ioctl( UAL_POST_SRQ_RECV, + &p_srq_ioctl->in, ioctl_buf_sz, + &p_srq_ioctl->out, sizeof(p_srq_ioctl->out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_srq_ioctl->out) ) + { + if( pp_recv_failure ) + *pp_recv_failure = p_recv_wr; + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_POST_SRQ_RECV IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = p_srq_ioctl->out.status; + + if( status != IB_SUCCESS && pp_recv_failure ) + { + /* Get the failed index */ + failed_index = num_wr - p_srq_ioctl->out.failed_cnt; + p_wr = p_recv_wr; + while( failed_index-- ) + p_wr = p_wr->p_next; + + *pp_recv_failure = p_wr; + } + } + + cl_free( p_srq_ioctl ); + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + +ib_api_status_t +ual_create_srq( + IN const ib_pd_handle_t h_pd, + IN OUT ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr) +{ + /* The first argument is probably not needed */ + ual_create_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + ib_srq_attr_t srq_attr; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Pre call to the UVP library */ + if( h_pd->h_ci_pd && uvp_intf.pre_create_srq ) + { + /* The post call MUST exist as it sets the UVP srq handle. */ + CL_ASSERT( uvp_intf.post_create_srq ); + /* Convert the handles to UVP handles */ + srq_attr = *p_srq_attr; + status = uvp_intf.pre_create_srq( h_pd->h_ci_pd, + &srq_attr, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + /* + * Convert the handles to KAL handles once again starting + * from the input srq attribute + */ + srq_ioctl.in.h_pd = h_pd->obj.hdl; + srq_ioctl.in.srq_attr = *p_srq_attr; + srq_ioctl.in.context = h_srq; + srq_ioctl.in.ev_notify = (h_srq->pfn_event_cb != NULL) ? TRUE : FALSE; + + cl_status = do_al_dev_ioctl( UAL_CREATE_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CREATE_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Post uvp call */ + if( h_pd->h_ci_pd && uvp_intf.post_create_srq ) + { + uvp_intf.post_create_srq( h_pd->h_ci_pd, + status, &h_srq->h_ci_srq, &srq_ioctl.out.umv_buf ); + + if( uvp_intf.post_recv ) + { + h_srq->h_recv_srq = h_srq->h_ci_srq; + h_srq->pfn_post_srq_recv = uvp_intf.post_srq_recv; + } + else + { + h_srq->h_recv_srq = h_srq; + h_srq->pfn_post_srq_recv = ual_post_srq_recv; + } + } + else + { + h_srq->h_recv_srq = h_srq; + h_srq->pfn_post_srq_recv = ual_post_srq_recv; + } + + if( status == IB_SUCCESS ) + { + h_srq->obj.hdl = srq_ioctl.out.h_srq; + } + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_modify_srq( + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask) +{ + ual_modify_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid srq handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_modify_srq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_modify_srq( h_srq->h_ci_srq, + p_srq_attr, srq_attr_mask, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + srq_ioctl.in.h_srq = h_srq->obj.hdl; + srq_ioctl.in.srq_attr = *p_srq_attr; + srq_ioctl.in.srq_attr_mask = srq_attr_mask; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Post uvp call */ + if( h_srq->h_ci_srq && uvp_intf.post_modify_srq ) + { + uvp_intf.post_modify_srq( h_srq->h_ci_srq, status, + &srq_ioctl.out.umv_buf ); + } + + //if( status == IB_SUCCESS ) + //{ + // *p_srq_attr = srq_ioctl.out.srq_attr; + //} + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_query_srq( + IN ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* p_srq_attr ) +{ + ual_query_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + ib_srq_attr_t* p_attr; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_query_srq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_srq( h_srq->h_ci_srq, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + srq_ioctl.in.h_srq = h_srq->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + p_attr = &srq_ioctl.out.srq_attr; + + /* Post uvp call */ + if( h_srq->h_ci_srq && uvp_intf.post_query_srq ) + { + uvp_intf.post_query_srq( h_srq->h_ci_srq, status, + p_attr, &srq_ioctl.out.umv_buf ); + } + + if( IB_SUCCESS == status ) + { + /* UVP handles in srq_attr will be converted to UAL's handles + * by the common code + */ + *p_srq_attr = *p_attr; + } + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_destroy_srq( + IN ib_srq_handle_t h_srq ) +{ + ual_destroy_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_SRQ ); + + /* Call the uvp pre call if the vendor library provided a valid srq handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_destroy_srq ) + { + status = uvp_intf.pre_destroy_srq( h_srq->h_ci_srq ); + if (status != IB_SUCCESS) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + srq_ioctl.in.h_srq = h_srq->obj.hdl; + cl_status = do_al_dev_ioctl( UAL_DESTROY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Call vendor's post_destroy_srq */ + if( h_srq->h_ci_srq && uvp_intf.post_destroy_srq ) + uvp_intf.post_destroy_srq( h_srq->h_ci_srq, status ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + diff --git a/trunk/core/bus/kernel/bus_pnp.c b/trunk/core/bus/kernel/bus_pnp.c index 9eea76cc..ee235566 100644 --- a/trunk/core/bus/kernel/bus_pnp.c +++ b/trunk/core/bus/kernel/bus_pnp.c @@ -679,6 +679,11 @@ al_set_ifc( p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr; p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr; p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr; + p_ifc->create_srq = ib_create_srq; + p_ifc->modify_srq = ib_modify_srq; + p_ifc->query_srq = ib_query_srq; + p_ifc->destroy_srq = ib_destroy_srq; + p_ifc->post_srq_recv = ib_post_srq_recv; BUS_EXIT( BUS_DBG_PNP ); } diff --git a/trunk/hw/mthca/kernel/hca_data.c b/trunk/hw/mthca/kernel/hca_data.c index 3b8014bb..5e639a33 100644 --- a/trunk/hw/mthca/kernel/hca_data.c +++ b/trunk/hw/mthca/kernel/hca_data.c @@ -270,7 +270,10 @@ mlnx_conv_hca_cap( ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach; ca_attr_p->max_fmr = hca_info_p->max_fmr; ca_attr_p->max_map_per_fmr = hca_info_p->max_map_per_fmr; - + ca_attr_p->max_srq = hca_info_p->max_srq; + ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr; + ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge; + ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay; ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR; ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR; @@ -279,6 +282,7 @@ mlnx_conv_hca_cap( ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE; ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT; ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR; + ca_attr_p->modify_srq_depth = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE; ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host ca_attr_p->num_page_sizes = 1; @@ -356,6 +360,27 @@ void ca_event_handler(struct ib_event *ev, void *context) } } +void srq_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_srq *srq_p; + + // prepare parameters + event_rec.type = ev->event; + event_rec.vendor_specific = ev->vendor_specific; + srq_p = (struct mthca_srq *)ev->element.srq; + event_rec.context = srq_p->srq_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n")); + } +} + + void qp_event_handler(struct ib_event *ev, void *context) { mlnx_hob_t *hob_p = (mlnx_hob_t *)context; diff --git a/trunk/hw/mthca/kernel/hca_data.h b/trunk/hw/mthca/kernel/hca_data.h index b3ce1da7..3f181440 100644 --- a/trunk/hw/mthca/kernel/hca_data.h +++ b/trunk/hw/mthca/kernel/hca_data.h @@ -353,6 +353,8 @@ void cq_comp_handler(struct ib_cq *cq, void *context); void ca_event_handler(struct ib_event *ev, void *context); +void srq_event_handler(struct ib_event *ev, void *context); + void qp_event_handler(struct ib_event *ev, void *context); void cq_event_handler(struct ib_event *ev, void *context); diff --git a/trunk/hw/mthca/kernel/hca_debug.h b/trunk/hw/mthca/kernel/hca_debug.h index 7ecfed43..5fe31819 100644 --- a/trunk/hw/mthca/kernel/hca_debug.h +++ b/trunk/hw/mthca/kernel/hca_debug.h @@ -73,14 +73,15 @@ static void _build_str( const char * format, ... ) #define WPP_CONTROL_GUIDS \ WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE), \ WPP_DEFINE_BIT( HCA_DBG_DEV) \ - WPP_DEFINE_BIT( HCA_DBG_INIT) \ WPP_DEFINE_BIT( HCA_DBG_PNP) \ + WPP_DEFINE_BIT( HCA_DBG_INIT) \ WPP_DEFINE_BIT( HCA_DBG_MAD) \ WPP_DEFINE_BIT( HCA_DBG_PO) \ WPP_DEFINE_BIT( HCA_DBG_CQ) \ WPP_DEFINE_BIT( HCA_DBG_QP) \ WPP_DEFINE_BIT( HCA_DBG_MEMORY) \ WPP_DEFINE_BIT( HCA_DBG_AV) \ + WPP_DEFINE_BIT( HCA_DBG_SRQ) \ WPP_DEFINE_BIT( HCA_DBG_LOW) \ WPP_DEFINE_BIT( HCA_DBG_SHIM)) @@ -97,10 +98,10 @@ static void _build_str( const char * format, ... ) // HCA_ENTER(FLAG); // HCA_EXIT(FLAG); // USEPREFIX(HCA_PRINT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :"); -// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :"); -// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]"); // USESUFFIX(HCA_ENTER, " [MTHCA] :%!FUNC!()["); // USESUFFIX(HCA_EXIT, " [MTHCA] :%!FUNC!()]"); +// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :"); +// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]"); // end_wpp @@ -121,16 +122,17 @@ static void _build_str( const char * format, ... ) #define HCA_DBG_DEV (1 << 0) -#define HCA_DBG_INIT (1<<1) -#define HCA_DBG_PNP (1 << 2) +#define HCA_DBG_PNP (1<<1) +#define HCA_DBG_INIT (1 << 2) #define HCA_DBG_MAD (1 << 3) #define HCA_DBG_PO (1 << 4) #define HCA_DBG_QP (1 << 5) #define HCA_DBG_CQ (1 << 6) #define HCA_DBG_MEMORY (1 << 7) #define HCA_DBG_AV (1<<8) -#define HCA_DBG_LOW (1 << 9) -#define HCA_DBG_SHIM (1 << 10) +#define HCA_DBG_SRQ (1 << 9) +#define HCA_DBG_LOW (1 << 10) +#define HCA_DBG_SHIM (1 << 11) #if DBG diff --git a/trunk/hw/mthca/kernel/hca_direct.c b/trunk/hw/mthca/kernel/hca_direct.c index 5153419b..6e1eb47d 100644 --- a/trunk/hw/mthca/kernel/hca_direct.c +++ b/trunk/hw/mthca/kernel/hca_direct.c @@ -52,6 +52,8 @@ /* * Work Request Processing Verbs. */ + + ib_api_status_t mlnx_post_send ( IN const ib_qp_handle_t h_qp, @@ -65,9 +67,6 @@ mlnx_post_send ( HCA_ENTER(HCA_DBG_QP); - // sanity checks - - // create CQ err = ib_dev->post_send(ib_qp_p, p_send_wr, pp_failed ); if (err) { HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP, @@ -102,9 +101,6 @@ mlnx_post_recv ( HCA_ENTER(HCA_DBG_QP); - // sanity checks - - // create CQ err = ib_dev->post_recv(ib_qp_p, p_recv_wr, pp_failed ); if (err) { HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP, @@ -125,6 +121,39 @@ err_post_recv: } +ib_api_status_t +mlnx_post_srq_recv ( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed OPTIONAL ) +{ + int err; + ib_api_status_t status; + struct ib_srq *ib_srq_p = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq_p->device; + + HCA_ENTER(HCA_DBG_QP); + + err = ib_dev->post_srq_recv(ib_srq_p, p_recv_wr, pp_failed ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP, + ("post_srq_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else + status = errno_to_iberr(err); + goto err_post_recv; + } + + status = IB_SUCCESS; + +err_post_recv: + HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + return status; + +} + /* * Completion Processing and Completion Notification Request Verbs. */ @@ -249,6 +278,7 @@ mlnx_direct_if( { p_interface->post_send = mlnx_post_send; p_interface->post_recv = mlnx_post_recv; + p_interface->post_srq_recv = mlnx_post_srq_recv; p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify; p_interface->peek_cq = NULL; /* mlnx_peek_cq: Not implemented */ diff --git a/trunk/hw/mthca/kernel/hca_driver.h b/trunk/hw/mthca/kernel/hca_driver.h index 02b0f70f..cb395c84 100644 --- a/trunk/hw/mthca/kernel/hca_driver.h +++ b/trunk/hw/mthca/kernel/hca_driver.h @@ -233,6 +233,7 @@ static inline errno_to_iberr(int err) MAP_ERR( ENODEV, IB_UNSUPPORTED ); MAP_ERR( EINVAL, IB_INVALID_PARAMETER ); MAP_ERR( ENOSYS, IB_UNSUPPORTED ); + MAP_ERR( ERANGE, IB_INVALID_SETTING ); default: //HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, // "Unmapped errno (%d)\n", err); diff --git a/trunk/hw/mthca/kernel/hca_mcast.c b/trunk/hw/mthca/kernel/hca_mcast.c index f309577c..5c731105 100644 --- a/trunk/hw/mthca/kernel/hca_mcast.c +++ b/trunk/hw/mthca/kernel/hca_mcast.c @@ -70,6 +70,12 @@ mlnx_attach_mcast ( status = IB_UNSUPPORTED; goto err_user_unsupported; } + + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_mcast_gid || !ph_mcast) { status = IB_INVALID_PARAMETER; goto err_invalid_param; @@ -102,8 +108,8 @@ mlnx_attach_mcast ( RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid); HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM, ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid, - *(uint64_t*)&mcast_p->mcast_gid.raw[0], - *(uint64_t*)&mcast_p->mcast_gid.raw[8] )); + cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[0]), + cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] ))); // return the result if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p; @@ -115,6 +121,7 @@ err_attach: kfree(mcast_p); err_no_mem: err_invalid_param: +err_unsupported: err_user_unsupported: end: HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, @@ -134,33 +141,41 @@ mlnx_detach_mcast ( // sanity check if (!mcast_p || !mcast_p->ib_qp_p) { - HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, ("completes with ERROR status IB_INVALID_PARAMETER\n")); - return IB_INVALID_PARAMETER; + status = IB_INVALID_PARAMETER; + goto err_invalid_param; } - ib_dev = mcast_p->ib_qp_p->device; + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid, *(uint64_t*)&mcast_p->mcast_gid.raw[0], *(uint64_t*)&mcast_p->mcast_gid.raw[8] )); // detach - err = ibv_detach_mcast( mcast_p->ib_qp_p, - (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid ); - if (err) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_detach_mcast failed (%d)\n", err)); - status = errno_to_iberr(err); - goto err_detach_mcast; - } + err = ibv_detach_mcast( mcast_p->ib_qp_p, + (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_detach_mcast failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_detach_mcast; + } status = IB_SUCCESS; err_detach_mcast: kfree(mcast_p); - HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM , +err_unsupported: + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM , ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); +err_invalid_param: return status; } diff --git a/trunk/hw/mthca/kernel/hca_verbs.c b/trunk/hw/mthca/kernel/hca_verbs.c index 79551576..d7f07377 100644 --- a/trunk/hw/mthca/kernel/hca_verbs.c +++ b/trunk/hw/mthca/kernel/hca_verbs.c @@ -793,6 +793,153 @@ err_destroy_ah: return status; } +/* +* Shared Queue Pair Management Verbs +*/ + + +ib_api_status_t +mlnx_create_srq ( + IN const ib_pd_handle_t h_pd, + IN const void *srq_context, + IN const ib_srq_attr_t * const p_srq_attr, + OUT ib_srq_handle_t *ph_srq, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status; + struct ib_srq *ib_srq_p; + struct mthca_srq *srq_p; + struct ib_srq_init_attr srq_init_attr; + struct ib_ucontext *p_context = NULL; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_device *ib_dev = ib_pd_p->device; + mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev); + + HCA_ENTER(HCA_DBG_SRQ); + + if( p_umv_buf && p_umv_buf->command) { + + // sanity checks + if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) || + p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + p_context = ib_pd_p->ucontext; + } + + // prepare the parameters + RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr)); + srq_init_attr.event_handler = srq_event_handler; + srq_init_attr.srq_context = hob_p; + srq_init_attr.attr = *p_srq_attr; + + // allocate srq + ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf ); + if (IS_ERR(ib_srq_p)) { + err = PTR_ERR(ib_srq_p); + HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_srq; + } + + // fill the object + srq_p = (struct mthca_srq *)ib_srq_p; + srq_p->srq_context = (void*)srq_context; + + // return the result + if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p; + + status = IB_SUCCESS; + +err_create_srq: +err_inval_params: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + return status; +} + + +ib_api_status_t +mlnx_modify_srq ( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + UNUSED_PARAM(p_umv_buf); + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_modify_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + return status; +} + +ib_api_status_t +mlnx_query_srq ( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + UNUSED_PARAM(p_umv_buf); + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_query_srq(ib_srq, p_srq_attr); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_query_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + return status; +} + +ib_api_status_t +mlnx_destroy_srq ( + IN const ib_srq_handle_t h_srq ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_destroy_srq(ib_srq); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_destroy_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + return status; +} + /* * Queue Pair Management Verbs */ @@ -830,8 +977,6 @@ _create_qp ( } p_context = ib_pd_p->ucontext; } - else - p_context = NULL; // prepare the parameters RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr)); @@ -840,6 +985,7 @@ _create_qp ( qp_init_attr.qp_context = hob_p; qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq; qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq; + qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq; qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge; qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge; qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth; @@ -1148,7 +1294,7 @@ mlnx_create_cq ( } /* sanity check */ - if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) { + if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) { status = IB_INVALID_CQ_SIZE; goto err_cqe; } @@ -1290,6 +1436,11 @@ setup_ci_interface( p_interface->modify_av = mlnx_modify_av; p_interface->destroy_av = mlnx_destroy_av; + p_interface->create_srq = mlnx_create_srq; + p_interface->modify_srq = mlnx_modify_srq; + p_interface->query_srq = mlnx_query_srq; + p_interface->destroy_srq = mlnx_destroy_srq; + p_interface->create_qp = mlnx_create_qp; p_interface->create_spl_qp = mlnx_create_spl_qp; p_interface->modify_qp = mlnx_modify_qp; diff --git a/trunk/hw/mthca/kernel/ib_verbs.h b/trunk/hw/mthca/kernel/ib_verbs.h index f8fe8925..192c24ae 100644 --- a/trunk/hw/mthca/kernel/ib_verbs.h +++ b/trunk/hw/mthca/kernel/ib_verbs.h @@ -236,12 +236,12 @@ enum ib_event_type { IB_EVENT_DEVICE_FATAL = IB_AE_LOCAL_FATAL, IB_EVENT_PORT_ACTIVE = IB_AE_PORT_ACTIVE, IB_EVENT_PORT_ERR = IB_AE_PORT_DOWN, + IB_EVENT_SRQ_LIMIT_REACHED = IB_AE_SRQ_LIMIT_REACHED, + IB_EVENT_SRQ_CATAS_ERROR = IB_AE_SRQ_CATAS_ERROR, + IB_EVENT_SRQ_QP_LAST_WQE_REACHED = IB_AE_SRQ_QP_LAST_WQE_REACHED, IB_EVENT_LID_CHANGE = IB_AE_UNKNOWN + 1, IB_EVENT_PKEY_CHANGE, - IB_EVENT_SM_CHANGE, - IB_EVENT_SRQ_ERR, - IB_EVENT_SRQ_LIMIT_REACHED, - IB_EVENT_QP_LAST_WQE_REACHED + IB_EVENT_SM_CHANGE }; struct ib_event { @@ -335,21 +335,10 @@ enum ib_cq_notify { IB_CQ_NEXT_COMP }; -enum ib_srq_attr_mask { - IB_SRQ_MAX_WR = 1 << 0, - IB_SRQ_LIMIT = 1 << 1, -}; - -struct ib_srq_attr { - u32 max_wr; - u32 max_sge; - u32 srq_limit; -}; - struct ib_srq_init_attr { - void (*event_handler)(struct ib_event *, void *); - void *srq_context; - struct ib_srq_attr attr; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + ib_srq_attr_t attr; }; struct ib_qp_cap { @@ -566,23 +555,11 @@ struct ib_umem_chunk { }; #pragma warning( default : 4200 ) -struct ib_udata { - void *inbuf; - void *outbuf; - size_t inlen; - size_t outlen; -}; - #define IB_UMEM_MAX_PAGE_CHUNK \ ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ ((char *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ (char *) &((struct ib_umem_chunk *) 0)->page_list[0])) -struct ib_umem_object { - struct ib_uobject uobject; - struct ib_umem umem; -}; - struct ib_pd { struct list_head list; /* for chaining AV MRs (for user mode only) */ struct ib_device *device; @@ -613,10 +590,11 @@ struct ib_cq { struct ib_srq { struct ib_device *device; struct ib_pd *pd; - struct ib_uobject *uobject; + struct ib_ucontext *ucontext; + struct ib_mr *ib_mr; void (*event_handler)(struct ib_event *, void *); void *srq_context; - atomic_t usecnt; + atomic_t usecnt; /* count number of work queues */ }; struct ib_qp { @@ -733,10 +711,10 @@ struct ib_device { struct ib_srq_init_attr *srq_init_attr, ci_umv_buf_t* const p_umv_buf); int (*modify_srq)(struct ib_srq *srq, - struct ib_srq_attr *srq_attr, - enum ib_srq_attr_mask srq_attr_mask); + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask); int (*query_srq)(struct ib_srq *srq, - struct ib_srq_attr *srq_attr); + ib_srq_attr_t *srq_attr); int (*destroy_srq)(struct ib_srq *srq); int (*post_srq_recv)(struct ib_srq *srq, struct _ib_recv_wr *recv_wr, @@ -951,6 +929,8 @@ int ibv_destroy_ah(struct ib_ah *ah); * @srq_init_attr: A list of initial attributes required to create the * SRQ. If SRQ creation succeeds, then the attributes are updated to * the actual capabilities of the created SRQ. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) * * srq_attr->max_wr and srq_attr->max_sge are read the determine the * requested size of the SRQ, and set to the actual values allocated @@ -958,7 +938,9 @@ int ibv_destroy_ah(struct ib_ah *ah); * will always be at least as large as the requested values. */ struct ib_srq *ibv_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr); + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + /** * ibv_modify_srq - Modifies the attributes for the specified SRQ. @@ -973,8 +955,8 @@ struct ib_srq *ibv_create_srq(struct ib_pd *pd, * the number of receives queued drops below the limit. */ int ibv_modify_srq(struct ib_srq *srq, - struct ib_srq_attr *srq_attr, - enum ib_srq_attr_mask srq_attr_mask); + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask); /** * ibv_query_srq - Returns the attribute list and current values for the @@ -983,7 +965,7 @@ int ibv_modify_srq(struct ib_srq *srq, * @srq_attr: The attributes of the specified SRQ. */ int ibv_query_srq(struct ib_srq *srq, - struct ib_srq_attr *srq_attr); + ib_srq_attr_t *srq_attr); /** * ibv_destroy_srq - Destroys the specified SRQ. @@ -999,8 +981,8 @@ int ibv_destroy_srq(struct ib_srq *srq); * the work request that failed to be posted on the QP. */ static inline int ibv_post_srq_recv(struct ib_srq *srq, - struct _ib_recv_wr *recv_wr, - struct _ib_recv_wr **bad_recv_wr) + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr) { return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); } @@ -1015,9 +997,9 @@ static inline int ibv_post_srq_recv(struct ib_srq *srq, * @context: user process context (for application calls only) * @p_umv_buf: parameters structure (for application calls only) */ - struct ib_qp *ibv_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); +struct ib_qp *ibv_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); /** * ibv_modify_qp - Modifies the attributes for the specified QP and then diff --git a/trunk/hw/mthca/kernel/mt_l2w.c b/trunk/hw/mthca/kernel/mt_l2w.c index 43928791..f1b2f027 100644 --- a/trunk/hw/mthca/kernel/mt_l2w.c +++ b/trunk/hw/mthca/kernel/mt_l2w.c @@ -1,132 +1,132 @@ -#include -#include -#if defined(EVENT_TRACING) -#ifdef offsetof -#undef offsetof -#endif -#include "mt_l2w.tmh" -#endif - -pci_pool_t * -pci_pool_create (const char *name, struct mthca_dev *mdev, - size_t size, size_t align, size_t allocation) -{ - pci_pool_t *pool; - UNREFERENCED_PARAMETER(align); - UNREFERENCED_PARAMETER(allocation); - - MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); - - // allocation parameter is not handled yet - ASSERT(allocation == 0); - - // allocate object - pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL ); - if (pool == NULL) - return NULL; - - //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory, - // while default alloc function - ExAllocatePoolWithTag -doesn't. - // But for now it is used for elements of size <= PAGE_SIZE - // Anyway - a sanity check: - ASSERT(size <= PAGE_SIZE); - if (size > PAGE_SIZE) - return NULL; - - //TODO: not too effective: one can read its own alloc/free functions - ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 ); - - // fill the object - pool->mdev = mdev; - pool->size = size; - strncpy( pool->name, name, sizeof pool->name ); - - return pool; -} - -// from lib/string.c -/** -* strlcpy - Copy a %NUL terminated string into a sized buffer -* @dest: Where to copy the string to -* @src: Where to copy the string from -* @size: size of destination buffer -* -* Compatible with *BSD: the result is always a valid -* NUL-terminated string that fits in the buffer (unless, -* of course, the buffer size is zero). It does not pad -* out the result like strncpy() does. -*/ -SIZE_T strlcpy(char *dest, const char *src, SIZE_T size) -{ - SIZE_T ret = strlen(src); - - if (size) { - SIZE_T len = (ret >= size) ? size-1 : ret; - memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; -} - - -int __bitmap_full(const unsigned long *bitmap, int bits) -{ - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (~bitmap[k]) - return 0; - - if (bits % BITS_PER_LONG) - if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; - - return 1; -} - -int __bitmap_empty(const unsigned long *bitmap, int bits) -{ - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap[k]) - return 0; - - if (bits % BITS_PER_LONG) - if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; - - return 1; -} - -int request_irq( - IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ - IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */ - IN PKSERVICE_ROUTINE isr, /* ISR */ - IN void *isr_ctx, /* ISR context */ - OUT PKINTERRUPT *int_obj /* interrupt object */ - ) -{ - NTSTATUS status; - - status = IoConnectInterrupt( - int_obj, /* InterruptObject */ - isr, /* ISR */ - isr_ctx, /* ISR context */ - isr_lock, /* spinlock */ - int_info->u.Interrupt.Vector, /* interrupt vector */ - (KIRQL)int_info->u.Interrupt.Level, /* IRQL */ - (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */ - (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? - Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */ - (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared), /* vector shared or not */ - g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */ - FALSE /* whether to save Float registers */ - ); - - if (!NT_SUCCESS(status)) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt failed status %d (did you change the processor_affinity ? )\n",status)); - return -EFAULT; /* failed to connect interrupt */ - } - else - return 0; -} - +#include +#include +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_l2w.tmh" +#endif + +pci_pool_t * +pci_pool_create (const char *name, struct mthca_dev *mdev, + size_t size, size_t align, size_t allocation) +{ + pci_pool_t *pool; + UNREFERENCED_PARAMETER(align); + UNREFERENCED_PARAMETER(allocation); + + MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // allocation parameter is not handled yet + ASSERT(allocation == 0); + + // allocate object + pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL ); + if (pool == NULL) + return NULL; + + //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory, + // while default alloc function - ExAllocatePoolWithTag -doesn't. + // But for now it is used for elements of size <= PAGE_SIZE + // Anyway - a sanity check: + ASSERT(size <= PAGE_SIZE); + if (size > PAGE_SIZE) + return NULL; + + //TODO: not too effective: one can read its own alloc/free functions + ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 ); + + // fill the object + pool->mdev = mdev; + pool->size = size; + strncpy( pool->name, name, sizeof pool->name ); + + return pool; +} + +// from lib/string.c +/** +* strlcpy - Copy a %NUL terminated string into a sized buffer +* @dest: Where to copy the string to +* @src: Where to copy the string from +* @size: size of destination buffer +* +* Compatible with *BSD: the result is always a valid +* NUL-terminated string that fits in the buffer (unless, +* of course, the buffer size is zero). It does not pad +* out the result like strncpy() does. +*/ +SIZE_T strlcpy(char *dest, const char *src, SIZE_T size) +{ + SIZE_T ret = strlen(src); + + if (size) { + SIZE_T len = (ret >= size) ? size-1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +int __bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int __bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int request_irq( + IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ + IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */ + IN PKSERVICE_ROUTINE isr, /* ISR */ + IN void *isr_ctx, /* ISR context */ + OUT PKINTERRUPT *int_obj /* interrupt object */ + ) +{ + NTSTATUS status; + + status = IoConnectInterrupt( + int_obj, /* InterruptObject */ + isr, /* ISR */ + isr_ctx, /* ISR context */ + isr_lock, /* spinlock */ + int_info->u.Interrupt.Vector, /* interrupt vector */ + (KIRQL)int_info->u.Interrupt.Level, /* IRQL */ + (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */ + (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? + Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */ + (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared), /* vector shared or not */ + g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */ + FALSE /* whether to save Float registers */ + ); + + if (!NT_SUCCESS(status)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt failed status %d (did you change the processor_affinity ? )\n",status)); + return -EFAULT; /* failed to connect interrupt */ + } + else + return 0; +} + diff --git a/trunk/hw/mthca/kernel/mt_l2w.h b/trunk/hw/mthca/kernel/mt_l2w.h index fbe1162f..faf34055 100644 --- a/trunk/hw/mthca/kernel/mt_l2w.h +++ b/trunk/hw/mthca/kernel/mt_l2w.h @@ -74,7 +74,7 @@ typedef void (*MT_EMPTY_FUNC)(); #define CPU_2_BE64_PREP #define CPU_2_BE64(x) cl_hton64(x) #else -#define CPU_2_BE64_PREP unsigned __int64 __tmp__; +#define CPU_2_BE64_PREP unsigned __int64 __tmp__ #define CPU_2_BE64(x) ( __tmp__ = x, cl_hton64(__tmp__) ) #endif diff --git a/trunk/hw/mthca/kernel/mt_verbs.c b/trunk/hw/mthca/kernel/mt_verbs.c index 8ae746f5..3257b813 100644 --- a/trunk/hw/mthca/kernel/mt_verbs.c +++ b/trunk/hw/mthca/kernel/mt_verbs.c @@ -157,6 +157,7 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, struct ib_mr *ib_mr = NULL; u64 start = 0; u64 user_handle = 0; + struct ibv_create_ah_resp *create_ah_resp = 0; // for user call we need also allocate MR if (context && p_umv_buf && p_umv_buf->p_inout_buf) { @@ -185,6 +186,12 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, ah = pd->device->create_ah(pd, ah_attr); + /* fill obligatory fields */ + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf; + create_ah_resp->user_handle = user_handle; + } + if (IS_ERR(ah)) { err = PTR_ERR(ah); HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err)); @@ -203,7 +210,6 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, if (context && p_umv_buf && p_umv_buf->p_inout_buf) { struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf; create_ah_resp->start = start; - create_ah_resp->user_handle = user_handle; create_ah_resp->mr.lkey = ib_mr->lkey; create_ah_resp->mr.rkey = ib_mr->rkey; create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; @@ -304,59 +310,119 @@ int ibv_destroy_ah(struct ib_ah *ah) /* Shared receive queues */ struct ib_srq *ibv_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr) + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) { - struct ib_srq *srq; + int err; + struct ib_srq *ib_srq; + struct ib_mr *ib_mr = NULL; + u64 user_handle = 0; + struct ibv_create_srq_resp *create_srq_resp = 0; - if (!pd->device->create_srq) - return ERR_PTR(-ENOSYS); + // for user call we need also allocate MR + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(void*)p_umv_buf->p_inout_buf; + + // create region + ib_mr = ibv_reg_mr( + (struct ib_pd *)(ULONG_PTR)create_srp->mr.pd_handle, + create_srp->mr.access_flags, + (void*)(ULONG_PTR)create_srp->mr.start, + create_srp->mr.length, create_srp->mr.hca_va, TRUE ); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err)); + goto err_alloc_mr; + } + create_srp->lkey = ib_mr->lkey; + user_handle = create_srp->user_handle; + } - srq = pd->device->create_srq(pd, srq_init_attr, NULL); + ib_srq = pd->device->create_srq(pd, srq_init_attr, p_umv_buf); - if (!IS_ERR(srq)) { - srq->device = pd->device; - srq->pd = pd; - srq->uobject = NULL; - srq->event_handler = srq_init_attr->event_handler; - srq->srq_context = srq_init_attr->srq_context; - atomic_inc(&pd->usecnt); - atomic_set(&srq->usecnt, 0); + /* fill obligatory fields */ + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + create_srq_resp = (struct ibv_create_srq_resp *)(void*)p_umv_buf->p_inout_buf; + create_srq_resp->user_handle = user_handle; + } + + if (IS_ERR(ib_srq)) { + err = PTR_ERR(ib_srq); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err)); + goto err_create_srq; + } + + // fill results + ib_srq->device = pd->device; + ib_srq->pd = pd; + ib_srq->ucontext = context; + ib_srq->event_handler = srq_init_attr->event_handler; + ib_srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&ib_srq->usecnt, 0); + if (context) + atomic_inc(&context->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ , + ("uctx %p, qhndl %p, qnum %#x \n", + pd->ucontext, ib_srq, ((struct mthca_srq*)ib_srq)->srqn ) ); + + // fill results for user + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct mthca_srq *srq = (struct mthca_srq *)ib_srq; + ib_srq->ib_mr = ib_mr; + create_srq_resp->mr.lkey = ib_mr->lkey; + create_srq_resp->mr.rkey = ib_mr->rkey; + create_srq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; + create_srq_resp->srq_handle = (__u64)(ULONG_PTR)srq; + create_srq_resp->max_wr = (mthca_is_memfree(to_mdev(pd->device))) ? srq->max - 1 : srq->max; + create_srq_resp->max_sge = srq->max_gs; + create_srq_resp->srqn= srq->srqn; + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); } - return srq; + return ib_srq; + +err_create_srq: + if (ib_mr) + ibv_dereg_mr(ib_mr); +err_alloc_mr: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + HCA_EXIT(HCA_DBG_QP); + return ERR_PTR(err); } int ibv_modify_srq(struct ib_srq *srq, - struct ib_srq_attr *srq_attr, - enum ib_srq_attr_mask srq_attr_mask) + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask) { return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); } int ibv_query_srq(struct ib_srq *srq, - struct ib_srq_attr *srq_attr) + ib_srq_attr_t *srq_attr) { - return srq->device->query_srq ? - srq->device->query_srq(srq, srq_attr) : -ENOSYS; + return srq->device->query_srq(srq, srq_attr); } int ibv_destroy_srq(struct ib_srq *srq) { - struct ib_pd *pd; int ret; - - if (atomic_read(&srq->usecnt)) - return -EBUSY; - - pd = srq->pd; + struct ib_pd *pd = srq->pd; + struct ib_ucontext *ucontext = pd->ucontext; + struct ib_mr * ib_mr = srq->ib_mr; ret = srq->device->destroy_srq(srq); if (!ret) { atomic_dec(&pd->usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n", - ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + release_user_cq_qp_resources(ucontext, ib_mr); } return ret; diff --git a/trunk/hw/mthca/kernel/mthca_cmd.c b/trunk/hw/mthca/kernel/mthca_cmd.c index ae2fad97..7ecba1b2 100644 --- a/trunk/hw/mthca/kernel/mthca_cmd.c +++ b/trunk/hw/mthca/kernel/mthca_cmd.c @@ -1571,6 +1571,13 @@ int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, CMD_TIME_CLASS_A, status); } +int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, num, 0, + CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status); +} + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) { return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, diff --git a/trunk/hw/mthca/kernel/mthca_cmd.h b/trunk/hw/mthca/kernel/mthca_cmd.h index 36584345..fdeef839 100644 --- a/trunk/hw/mthca/kernel/mthca_cmd.h +++ b/trunk/hw/mthca/kernel/mthca_cmd.h @@ -302,6 +302,8 @@ int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num, u8 *status); int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num, u8 *status); +int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, + struct mthca_mailbox *mailbox, u8 *status); int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, diff --git a/trunk/hw/mthca/kernel/mthca_cq.c b/trunk/hw/mthca/kernel/mthca_cq.c index 719e21ae..8e1801f6 100644 --- a/trunk/hw/mthca/kernel/mthca_cq.c +++ b/trunk/hw/mthca/kernel/mthca_cq.c @@ -912,13 +912,8 @@ void mthca_free_cq(struct mthca_dev *dev, spin_unlock_irq(&lh); /* wait for all RUNNING DPCs on that EQ to complete */ - { - ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); - // wait for DPCs, using this EQ, to complete - spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_COMP].lock); - //TODO: do we need that ? - spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_ASYNC].lock ); - } + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeFlushQueuedDpcs(); atomic_dec(&cq->refcount); wait_event(&cq->wait, !atomic_read(&cq->refcount)); diff --git a/trunk/hw/mthca/kernel/mthca_dev.h b/trunk/hw/mthca/kernel/mthca_dev.h index 4698c3b5..c464cedb 100644 --- a/trunk/hw/mthca/kernel/mthca_dev.h +++ b/trunk/hw/mthca/kernel/mthca_dev.h @@ -137,8 +137,9 @@ struct mthca_limits { int max_qp_init_rdma; int reserved_qps; int num_srqs; - int reserved_srqs; int max_srq_wqes; + int max_srq_sge; + int reserved_srqs; int num_eecs; int reserved_eecs; int num_cqs; @@ -487,12 +488,12 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, struct mthca_srq *srq); int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, - struct ib_srq_attr *attr, struct mthca_srq *srq); + ib_srq_attr_t *attr, struct mthca_srq *srq); void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); -int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask); +int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr, + ib_srq_attr_mask_t attr_mask); void mthca_srq_event(struct mthca_dev *dev, u32 srqn, - enum ib_event_type event_type); + enum ib_event_type event_type, u8 vendor_code); void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr, struct _ib_recv_wr **bad_wr); @@ -594,5 +595,7 @@ int ib_uverbs_init(void); void ib_uverbs_cleanup(void); int mthca_ah_grh_present(struct mthca_ah *ah); +int mthca_max_srq_sge(struct mthca_dev *dev); + #endif /* MTHCA_DEV_H */ diff --git a/trunk/hw/mthca/kernel/mthca_eq.c b/trunk/hw/mthca/kernel/mthca_eq.c index 7c33e3d4..a4eb6ee9 100644 --- a/trunk/hw/mthca/kernel/mthca_eq.c +++ b/trunk/hw/mthca/kernel/mthca_eq.c @@ -179,6 +179,11 @@ struct mthca_eqe { } qp; struct { __be32 srqn; + u32 reserved1; + u32 reserved2; + u8 reserved3[1]; + u8 vendor_code; + u8 reserved4[2]; } srq; struct { __be32 cqn; @@ -351,12 +356,17 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, - IB_EVENT_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code); + IB_EVENT_SRQ_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: + mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff, + IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code); break; case MTHCA_EVENT_TYPE_SRQ_LIMIT: mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff, - IB_EVENT_SRQ_LIMIT_REACHED); + IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code); break; case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: @@ -406,7 +416,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) break; case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: - case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: case MTHCA_EVENT_TYPE_ECC_DETECT: default: @@ -437,7 +446,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) } loops++; if (cl_get_time_stamp() - start > g_max_DPC_time_us ) { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handeling of EQ stopped, and a new DPC is entered after %d loops\n", loops)); + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handling of EQ stopped, and a new DPC is entered after %d loops\n", loops)); KeInsertQueueDpc(&dev->eq_table.eq[eq->eq_num].dpc, NULL, NULL); break; } diff --git a/trunk/hw/mthca/kernel/mthca_main.c b/trunk/hw/mthca/kernel/mthca_main.c index 1a34c14a..fe8829cc 100644 --- a/trunk/hw/mthca/kernel/mthca_main.c +++ b/trunk/hw/mthca/kernel/mthca_main.c @@ -209,6 +209,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) mdev->limits.reserved_srqs = dev_lim->reserved_srqs; mdev->limits.reserved_eecs = dev_lim->reserved_eecs; mdev->limits.max_desc_sz = dev_lim->max_desc_sz; + mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an diff --git a/trunk/hw/mthca/kernel/mthca_provider.c b/trunk/hw/mthca/kernel/mthca_provider.c index a919c2f7..81f811e7 100644 --- a/trunk/hw/mthca/kernel/mthca_provider.c +++ b/trunk/hw/mthca/kernel/mthca_provider.c @@ -51,15 +51,15 @@ #include "mthca_cmd.h" #include "mthca_memfree.h" - static void init_query_mad(struct ib_smp *mad) - { +static void init_query_mad(struct ib_smp *mad) +{ mad->base_version = 1; mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; mad->class_version = 1; mad->method = IB_MGMT_METHOD_GET; - } +} - int mthca_query_device(struct ib_device *ibdev, +int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ib_smp *in_mad = NULL; @@ -116,7 +116,9 @@ props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; props->max_srq_wr = mdev->limits.max_srq_wqes; - props->max_srq_sge = mdev->limits.max_sg; + if (mthca_is_memfree(mdev)) + --props->max_srq_wr; + props->max_srq_sge = mdev->limits.max_srq_sge; props->local_ca_ack_delay = (u8)mdev->limits.local_ca_ack_delay; props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? IB_ATOMIC_LOCAL : IB_ATOMIC_NONE; @@ -220,7 +222,7 @@ out: return err; } -int mthca_query_pkey_chunk(struct ib_device *ibdev, +static int mthca_query_pkey_chunk(struct ib_device *ibdev, u8 port, u16 index, u16 pkey[32]) { struct ib_smp *in_mad = NULL; @@ -260,7 +262,7 @@ int mthca_query_pkey_chunk(struct ib_device *ibdev, return err; } -int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port, +static int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port, int index, union ib_gid gid[8]) { struct ib_smp *in_mad = NULL; @@ -493,7 +495,7 @@ done: return 0; } -struct ib_ah *mthca_ah_create(struct ib_pd *pd, +static struct ib_ah *mthca_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { int err; @@ -512,7 +514,7 @@ struct ib_ah *mthca_ah_create(struct ib_pd *pd, return &ah->ibah; } -int mthca_ah_destroy(struct ib_ah *ah) +static int mthca_ah_destroy(struct ib_ah *ah) { mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); kfree(ah); @@ -520,17 +522,16 @@ int mthca_ah_destroy(struct ib_ah *ah) return 0; } -struct ib_srq *mthca_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *init_attr, - ci_umv_buf_t* const p_umv_buf) +static struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + ci_umv_buf_t* const p_umv_buf) { -#ifdef WIN_TO_BE_CHANGED - struct mthca_create_srq ucmd; + struct ibv_create_srq ucmd = { 0 }; struct mthca_ucontext *context = NULL; struct mthca_srq *srq; int err; - srq = kmalloc(sizeof *srq, GFP_KERNEL); + srq = kzalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); @@ -553,11 +554,11 @@ struct ib_srq *mthca_create_srq(struct ib_pd *pd, } err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), - &init_attr->attr, srq); + &init_attr->attr, srq); if (err && pd->ucontext) mthca_unmap_user_db(to_mdev(pd->device), &context->uar, - context->db_tab, ucmd.db_index); + context->db_tab, ucmd.db_index); if (err) goto err_free; @@ -574,23 +575,17 @@ err_free: kfree(srq); return ERR_PTR(err); -#else - UNREFERENCED_PARAMETER(p_umv_buf); - UNREFERENCED_PARAMETER(init_attr); - UNREFERENCED_PARAMETER(pd); - return NULL; -#endif } -int mthca_destroy_srq(struct ib_srq *srq) +static int mthca_destroy_srq(struct ib_srq *srq) { struct mthca_ucontext *context; - if (srq->uobject) { - context = to_mucontext(srq->uobject->context); + if (srq->ucontext) { + context = to_mucontext(srq->ucontext); mthca_unmap_user_db(to_mdev(srq->device), &context->uar, - context->db_tab, to_msrq(srq)->db_index); + context->db_tab, to_msrq(srq)->db_index); } mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); @@ -599,7 +594,7 @@ int mthca_destroy_srq(struct ib_srq *srq) return 0; } -struct ib_qp *mthca_create_qp(struct ib_pd *pd, +static struct ib_qp *mthca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, ci_umv_buf_t* const p_umv_buf) { @@ -718,7 +713,7 @@ err_mem: err_inval: err_unsupported: return ERR_PTR(err); } -int mthca_destroy_qp(struct ib_qp *qp) +static int mthca_destroy_qp(struct ib_qp *qp) { if (qp->ucontext) { mthca_unmap_user_db(to_mdev(qp->device), @@ -735,7 +730,7 @@ int mthca_destroy_qp(struct ib_qp *qp) return 0; } -struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, +static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) { @@ -829,7 +824,7 @@ err_unmap_set: return ERR_PTR(err); } -int mthca_destroy_cq(struct ib_cq *cq) +static int mthca_destroy_cq(struct ib_cq *cq) { if (cq->ucontext) { mthca_unmap_user_db(to_mdev(cq->device), @@ -888,7 +883,7 @@ struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc) return &mr->ibmr; } -struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, +static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, mthca_qp_access_t acc, @@ -987,7 +982,7 @@ struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, return &mr->ibmr; } -struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, +static struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, void* __ptr64 vaddr, uint64_t length, uint64_t hca_va, mthca_qp_access_t acc, boolean_t um_call) { @@ -1130,7 +1125,7 @@ int mthca_dereg_mr(struct ib_mr *mr) return 0; } -struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc, +static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc, struct ib_fmr_attr *fmr_attr) { struct mthca_fmr *fmr; @@ -1152,7 +1147,7 @@ struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc, return &fmr->ibmr; } -int mthca_dealloc_fmr(struct ib_fmr *fmr) +static int mthca_dealloc_fmr(struct ib_fmr *fmr) { struct mthca_fmr *mfmr = to_mfmr(fmr); int err; @@ -1165,7 +1160,7 @@ int mthca_dealloc_fmr(struct ib_fmr *fmr) return 0; } -int mthca_unmap_fmr(struct list_head *fmr_list) +static int mthca_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; int err; @@ -1258,6 +1253,7 @@ int mthca_register_device(struct mthca_dev *dev) if (dev->mthca_flags & MTHCA_FLAG_SRQ) { dev->ib_dev.create_srq = mthca_create_srq; dev->ib_dev.modify_srq = mthca_modify_srq; + dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; if (mthca_is_memfree(dev)) diff --git a/trunk/hw/mthca/kernel/mthca_provider.h b/trunk/hw/mthca/kernel/mthca_provider.h index b321a7e8..d24e0e43 100644 --- a/trunk/hw/mthca/kernel/mthca_provider.h +++ b/trunk/hw/mthca/kernel/mthca_provider.h @@ -1,447 +1,404 @@ -/* - * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * Copyright (c) 2005 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id$ - */ - -#ifndef MTHCA_PROVIDER_H -#define MTHCA_PROVIDER_H - -#include -#include -#include - -typedef uint32_t mthca_mpt_access_t; -#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) -#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) -#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) -#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) -#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) - -union mthca_buf { - struct scatterlist direct; - struct scatterlist *page_list; -}; - -struct mthca_uar { - PFN_NUMBER pfn; - int index; -}; - -struct mthca_user_db_table; - -struct mthca_ucontext { - struct ib_ucontext ibucontext; - struct mthca_uar uar; - struct mthca_user_db_table *db_tab; - // for user UAR - PMDL mdl; - PVOID kva; - SIZE_T uar_size; -}; - -struct mthca_mtt; - -struct mthca_mr { - //NB: the start of this structure is to be equal to mlnx_mro_t ! - //NB: the structure was not inserted here for not to mix driver and provider structures - struct ib_mr ibmr; - struct mthca_mtt *mtt; - int iobuf_used; - mt_iobuf_t iobuf; - void *secure_handle; -}; - -struct mthca_fmr { - struct ib_fmr ibmr; - struct ib_fmr_attr attr; - struct mthca_mtt *mtt; - int maps; - union { - struct { - struct mthca_mpt_entry __iomem *mpt; - u64 __iomem *mtts; - } tavor; - struct { - struct mthca_mpt_entry *mpt; - __be64 *mtts; - } arbel; - } mem; -}; - -struct mthca_pd { - struct ib_pd ibpd; - u32 pd_num; - atomic_t sqp_count; - struct mthca_mr ntmr; - int privileged; -}; - -struct mthca_eq { - struct mthca_dev *dev; - int eqn; - int eq_num; - u32 eqn_mask; - u32 cons_index; - u16 msi_x_vector; - u16 msi_x_entry; - int have_irq; - int nent; - struct scatterlist *page_list; - struct mthca_mr mr; - KDPC dpc; /* DPC for MSI-X interrupts */ - spinlock_t lock; /* spinlock for simult DPCs */ -}; - -struct mthca_av; - -enum mthca_ah_type { - MTHCA_AH_ON_HCA, - MTHCA_AH_PCI_POOL, - MTHCA_AH_KMALLOC -}; - -struct mthca_ah { - struct ib_ah ibah; - enum mthca_ah_type type; - u32 key; - struct mthca_av *av; - dma_addr_t avdma; -}; - -/* - * Quick description of our CQ/QP locking scheme: - * - * We have one global lock that protects dev->cq/qp_table. Each - * struct mthca_cq/qp also has its own lock. An individual qp lock - * may be taken inside of an individual cq lock. Both cqs attached to - * a qp may be locked, with the send cq locked first. No other - * nesting should be done. - * - * Each struct mthca_cq/qp also has an atomic_t ref count. The - * pointer from the cq/qp_table to the struct counts as one reference. - * This reference also is good for access through the consumer API, so - * modifying the CQ/QP etc doesn't need to take another reference. - * Access because of a completion being polled does need a reference. - * - * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the - * destroy function to sleep on. - * - * This means that access from the consumer API requires nothing but - * taking the struct's lock. - * - * Access because of a completion event should go as follows: - * - lock cq/qp_table and look up struct - * - increment ref count in struct - * - drop cq/qp_table lock - * - lock struct, do your thing, and unlock struct - * - decrement ref count; if zero, wake up waiters - * - * To destroy a CQ/QP, we can do the following: - * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock - * - decrement ref count - * - wait_event until ref count is zero - * - * It is the consumer's responsibilty to make sure that no QP - * operations (WQE posting or state modification) are pending when the - * QP is destroyed. Also, the consumer must make sure that calls to - * qp_modify are serialized. - * - * Possible optimizations (wait for profile data to see if/where we - * have locks bouncing between CPUs): - * - split cq/qp table lock into n separate (cache-aligned) locks, - * indexed (say) by the page in the table - * - split QP struct lock into three (one for common info, one for the - * send queue and one for the receive queue) - */ -//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP -// operations (WQE posting or state modification) are pending when the QP is destroyed" - -struct mthca_cq { - struct ib_cq ibcq; - void *cq_context; // leo: for IBAL shim - spinlock_t lock; - atomic_t refcount; - int cqn; - u32 cons_index; - int is_direct; - int is_kernel; - - /* Next fields are Arbel only */ - int set_ci_db_index; - __be32 *set_ci_db; - int arm_db_index; - __be32 *arm_db; - int arm_sn; - int u_arm_db_index; - int *p_u_arm_sn; - - union mthca_buf queue; - struct mthca_mr mr; - wait_queue_head_t wait; - KMUTEX mutex; -}; - -struct mthca_srq { - struct ib_srq ibsrq; - spinlock_t lock; - atomic_t refcount; - int srqn; - int max; - int max_gs; - int wqe_shift; - int first_free; - int last_free; - u16 counter; /* Arbel only */ - int db_index; /* Arbel only */ - __be32 *db; /* Arbel only */ - void *last; - - int is_direct; - u64 *wrid; - union mthca_buf queue; - struct mthca_mr mr; - - wait_queue_head_t wait; - KMUTEX mutex; -}; - -struct mthca_wq { - spinlock_t lock; - int max; - unsigned next_ind; - unsigned last_comp; - unsigned head; - unsigned tail; - void *last; - int max_gs; - int wqe_shift; - - int db_index; /* Arbel only */ - __be32 *db; -}; - -struct mthca_qp { - struct ib_qp ibqp; - void *qp_context; // leo: for IBAL shim - //TODO: added just because absense of ibv_query_qp - // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr; - struct ib_qp_init_attr qp_init_attr; // leo: for query_qp - atomic_t refcount; - u32 qpn; - int is_direct; - u8 transport; - u8 state; - u8 atomic_rd_en; - u8 resp_depth; - - struct mthca_mr mr; - - struct mthca_wq rq; - struct mthca_wq sq; - enum ib_sig_type sq_policy; - int send_wqe_offset; - int max_inline_data; - - u64 *wrid; - union mthca_buf queue; - - wait_queue_head_t wait; - KMUTEX mutex; -}; - -struct mthca_sqp { - struct mthca_qp qp; - int port; - int pkey_index; - u32 qkey; - u32 send_psn; - struct ib_ud_header ud_header; - struct scatterlist sg; -}; - -static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) -{ - return container_of(ibucontext, struct mthca_ucontext, ibucontext); -} - -static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) -{ - return container_of(ibmr, struct mthca_fmr, ibmr); -} - -static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) -{ - return container_of(ibmr, struct mthca_mr, ibmr); -} - -static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) -{ - return container_of(ibpd, struct mthca_pd, ibpd); -} - -static inline struct mthca_ah *to_mah(struct ib_ah *ibah) -{ - return container_of(ibah, struct mthca_ah, ibah); -} - -static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) -{ - return container_of(ibcq, struct mthca_cq, ibcq); -} - -static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) -{ - return container_of(ibsrq, struct mthca_srq, ibsrq); -} - -static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) -{ - return container_of(ibqp, struct mthca_qp, ibqp); -} - -static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) -{ - return container_of(qp, struct mthca_sqp, qp); -} - -static inline uint8_t start_port(struct ib_device *device) -{ - return device->node_type == IB_NODE_SWITCH ? 0 : 1; -} - -static inline uint8_t end_port(struct ib_device *device) -{ - return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; -} - -static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len) -{ - RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len); - return 0; -} - -static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len) -{ - if (p_umv_buf->output_size < len) { - p_umv_buf->status = IB_INSUFFICIENT_MEMORY; - p_umv_buf->output_size = 0; - return -EFAULT; - } - RtlCopyMemory(p_umv_buf->p_inout_buf, src, len); - p_umv_buf->status = IB_SUCCESS; - p_umv_buf->output_size = (uint32_t)len; - return 0; -} - - - -// API -int mthca_query_device(struct ib_device *ibdev, - struct ib_device_attr *props); - -int mthca_query_port(struct ib_device *ibdev, - u8 port, struct ib_port_attr *props); - -int mthca_modify_port(struct ib_device *ibdev, - u8 port, int port_modify_mask, - struct ib_port_modify *props); - -int mthca_query_pkey_chunk(struct ib_device *ibdev, - u8 port, u16 index, u16 pkey[32]); - -int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port, - int index, union ib_gid gid[8]); - -struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, - ci_umv_buf_t* const p_umv_buf); - -int mthca_dealloc_ucontext(struct ib_ucontext *context); - -struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - ci_umv_buf_t* const p_umv_buf); - -int mthca_dealloc_pd(struct ib_pd *pd); - -struct ib_ah *mthca_ah_create(struct ib_pd *pd, - struct ib_ah_attr *ah_attr); - -int mthca_ah_destroy(struct ib_ah *ah); - -struct ib_srq *mthca_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *init_attr, - ci_umv_buf_t* const p_umv_buf); - -int mthca_destroy_srq(struct ib_srq *srq); - -struct ib_qp *mthca_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - ci_umv_buf_t* const p_umv_buf); - -int mthca_destroy_qp(struct ib_qp *qp); - -struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, - struct ib_ucontext *context, - ci_umv_buf_t* const p_umv_buf); - -int mthca_destroy_cq(struct ib_cq *cq); - -struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc); - -struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, - struct ib_phys_buf *buffer_list, - int num_phys_buf, - mthca_qp_access_t acc, - u64 *iova_start); - -struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, - void* __ptr64 vaddr, uint64_t length, uint64_t hca_va, - mthca_qp_access_t acc, boolean_t um_call); - -int mthca_dereg_mr(struct ib_mr *mr); - -struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc, - struct ib_fmr_attr *fmr_attr); - -int mthca_dealloc_fmr(struct ib_fmr *fmr); - -int mthca_unmap_fmr(struct list_head *fmr_list); - -int mthca_poll_cq_list( - IN struct ib_cq *ibcq, - IN OUT ib_wc_t** const pp_free_wclist, - OUT ib_wc_t** const pp_done_wclist ); - - -#endif /* MTHCA_PROVIDER_H */ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_PROVIDER_H +#define MTHCA_PROVIDER_H + +#include +#include +#include + +typedef uint32_t mthca_mpt_access_t; +#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) +#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) +#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) +#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) +#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) + +union mthca_buf { + struct scatterlist direct; + struct scatterlist *page_list; +}; + +struct mthca_uar { + PFN_NUMBER pfn; + int index; +}; + +struct mthca_user_db_table; + +struct mthca_ucontext { + struct ib_ucontext ibucontext; + struct mthca_uar uar; + struct mthca_user_db_table *db_tab; + // for user UAR + PMDL mdl; + PVOID kva; + SIZE_T uar_size; +}; + +struct mthca_mtt; + +struct mthca_mr { + //NB: the start of this structure is to be equal to mlnx_mro_t ! + //NB: the structure was not inserted here for not to mix driver and provider structures + struct ib_mr ibmr; + struct mthca_mtt *mtt; + int iobuf_used; + mt_iobuf_t iobuf; + void *secure_handle; +}; + +struct mthca_fmr { + struct ib_fmr ibmr; + struct ib_fmr_attr attr; + struct mthca_mtt *mtt; + int maps; + union { + struct { + struct mthca_mpt_entry __iomem *mpt; + u64 __iomem *mtts; + } tavor; + struct { + struct mthca_mpt_entry *mpt; + __be64 *mtts; + } arbel; + } mem; +}; + +struct mthca_pd { + struct ib_pd ibpd; + u32 pd_num; + atomic_t sqp_count; + struct mthca_mr ntmr; + int privileged; +}; + +struct mthca_eq { + struct mthca_dev *dev; + int eqn; + int eq_num; + u32 eqn_mask; + u32 cons_index; + u16 msi_x_vector; + u16 msi_x_entry; + int have_irq; + int nent; + struct scatterlist *page_list; + struct mthca_mr mr; + KDPC dpc; /* DPC for MSI-X interrupts */ + spinlock_t lock; /* spinlock for simult DPCs */ +}; + +struct mthca_av; + +enum mthca_ah_type { + MTHCA_AH_ON_HCA, + MTHCA_AH_PCI_POOL, + MTHCA_AH_KMALLOC +}; + +struct mthca_ah { + struct ib_ah ibah; + enum mthca_ah_type type; + u32 key; + struct mthca_av *av; + dma_addr_t avdma; +}; + +/* + * Quick description of our CQ/QP locking scheme: + * + * We have one global lock that protects dev->cq/qp_table. Each + * struct mthca_cq/qp also has its own lock. An individual qp lock + * may be taken inside of an individual cq lock. Both cqs attached to + * a qp may be locked, with the send cq locked first. No other + * nesting should be done. + * + * Each struct mthca_cq/qp also has an atomic_t ref count. The + * pointer from the cq/qp_table to the struct counts as one reference. + * This reference also is good for access through the consumer API, so + * modifying the CQ/QP etc doesn't need to take another reference. + * Access because of a completion being polled does need a reference. + * + * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the + * destroy function to sleep on. + * + * This means that access from the consumer API requires nothing but + * taking the struct's lock. + * + * Access because of a completion event should go as follows: + * - lock cq/qp_table and look up struct + * - increment ref count in struct + * - drop cq/qp_table lock + * - lock struct, do your thing, and unlock struct + * - decrement ref count; if zero, wake up waiters + * + * To destroy a CQ/QP, we can do the following: + * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock + * - decrement ref count + * - wait_event until ref count is zero + * + * It is the consumer's responsibilty to make sure that no QP + * operations (WQE posting or state modification) are pending when the + * QP is destroyed. Also, the consumer must make sure that calls to + * qp_modify are serialized. + * + * Possible optimizations (wait for profile data to see if/where we + * have locks bouncing between CPUs): + * - split cq/qp table lock into n separate (cache-aligned) locks, + * indexed (say) by the page in the table + * - split QP struct lock into three (one for common info, one for the + * send queue and one for the receive queue) + */ +//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP +// operations (WQE posting or state modification) are pending when the QP is destroyed" + +struct mthca_cq { + struct ib_cq ibcq; + void *cq_context; // leo: for IBAL shim + spinlock_t lock; + atomic_t refcount; + int cqn; + u32 cons_index; + int is_direct; + int is_kernel; + + /* Next fields are Arbel only */ + int set_ci_db_index; + __be32 *set_ci_db; + int arm_db_index; + __be32 *arm_db; + int arm_sn; + int u_arm_db_index; + int *p_u_arm_sn; + + union mthca_buf queue; + struct mthca_mr mr; + wait_queue_head_t wait; + KMUTEX mutex; +}; + +struct mthca_srq { + struct ib_srq ibsrq; + spinlock_t lock; + atomic_t refcount; + int srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + u16 counter; /* Arbel only */ + int db_index; /* Arbel only */ + __be32 *db; /* Arbel only */ + void *last; + + int is_direct; + u64 *wrid; + union mthca_buf queue; + struct mthca_mr mr; + + wait_queue_head_t wait; + KMUTEX mutex; + void *srq_context; +}; + +struct mthca_wq { + spinlock_t lock; + int max; + unsigned next_ind; + unsigned last_comp; + unsigned head; + unsigned tail; + void *last; + int max_gs; + int wqe_shift; + + int db_index; /* Arbel only */ + __be32 *db; +}; + +struct mthca_qp { + struct ib_qp ibqp; + void *qp_context; // leo: for IBAL shim + //TODO: added just because absense of ibv_query_qp + // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr; + struct ib_qp_init_attr qp_init_attr; // leo: for query_qp + atomic_t refcount; + u32 qpn; + int is_direct; + u8 transport; + u8 state; + u8 atomic_rd_en; + u8 resp_depth; + + struct mthca_mr mr; + + struct mthca_wq rq; + struct mthca_wq sq; + enum ib_sig_type sq_policy; + int send_wqe_offset; + int max_inline_data; + + u64 *wrid; + union mthca_buf queue; + + wait_queue_head_t wait; + KMUTEX mutex; +}; + +struct mthca_sqp { + struct mthca_qp qp; + int port; + int pkey_index; + u32 qkey; + u32 send_psn; + struct ib_ud_header ud_header; + struct scatterlist sg; +}; + +static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mthca_ucontext, ibucontext); +} + +static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) +{ + return container_of(ibmr, struct mthca_fmr, ibmr); +} + +static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mthca_mr, ibmr); +} + +static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mthca_pd, ibpd); +} + +static inline struct mthca_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mthca_ah, ibah); +} + +static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mthca_cq, ibcq); +} + +static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mthca_srq, ibsrq); +} + +static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mthca_qp, ibqp); +} + +static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) +{ + return container_of(qp, struct mthca_sqp, qp); +} + +static inline uint8_t start_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : 1; +} + +static inline uint8_t end_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; +} + +static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len) +{ + RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len); + return 0; +} + +static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len) +{ + if (p_umv_buf->output_size < len) { + p_umv_buf->status = IB_INSUFFICIENT_MEMORY; + p_umv_buf->output_size = 0; + return -EFAULT; + } + RtlCopyMemory(p_umv_buf->p_inout_buf, src, len); + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->output_size = (uint32_t)len; + return 0; +} + + + +// API +int mthca_query_device(struct ib_device *ibdev, + struct ib_device_attr *props); + +int mthca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props); + +int mthca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props); + +struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf); + +int mthca_dealloc_pd(struct ib_pd *pd); + +int mthca_dereg_mr(struct ib_mr *mr); + +int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr); + +struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, + ci_umv_buf_t* const p_umv_buf); + +int mthca_dealloc_ucontext(struct ib_ucontext *context); + +struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc); + +int mthca_poll_cq_list( + IN struct ib_cq *ibcq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + + +#endif /* MTHCA_PROVIDER_H */ diff --git a/trunk/hw/mthca/kernel/mthca_qp.c b/trunk/hw/mthca/kernel/mthca_qp.c index 276c5872..f2630495 100644 --- a/trunk/hw/mthca/kernel/mthca_qp.c +++ b/trunk/hw/mthca/kernel/mthca_qp.c @@ -807,11 +807,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, qp->qpn, 0, mailbox, sqd_event, &status); - if (err) + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP returned error (qp-num = 0x%x) returned status %02x " + "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n", + ibqp->qp_num, status, cur_state, new_state, + attr_mask, req_param, opt_param)); goto out_mailbox; + } if (status) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n", - state_table[cur_state][new_state].trans, status)); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP bad status(qp-num = 0x%x) returned status %02x " + "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n", + ibqp->qp_num, status, cur_state, new_state, + attr_mask, req_param, opt_param)); err = -EINVAL; goto out_mailbox; } @@ -1114,7 +1121,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, atomic_set(&qp->refcount, 1); init_waitqueue_head(&qp->wait); KeInitializeMutex(&qp->mutex, 0); - + qp->state = IBQPS_RESET; qp->atomic_rd_en = 0; qp->resp_depth = 0; @@ -1371,8 +1378,9 @@ void mthca_free_qp(struct mthca_dev *dev, atomic_dec(&qp->refcount); wait_event(&qp->wait, !atomic_read(&qp->refcount)); - if (qp->state != IBQPS_RESET) + if (qp->state != IBQPS_RESET) { mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); + } /* * If this is a userspace QP, the buffers, MR, CQs and so on diff --git a/trunk/hw/mthca/kernel/mthca_srq.c b/trunk/hw/mthca/kernel/mthca_srq.c index 46f1bd01..2a8dfdc9 100644 --- a/trunk/hw/mthca/kernel/mthca_srq.c +++ b/trunk/hw/mthca/kernel/mthca_srq.c @@ -50,6 +50,7 @@ #pragma alloc_text (PAGE, mthca_cleanup_srq_table) #endif + enum { MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE }; @@ -59,7 +60,8 @@ struct mthca_tavor_srq_context { __be32 state_pd; __be32 lkey; __be32 uar; - __be32 wqe_cnt; + __be16 limit_watermark; + __be16 wqe_cnt; u32 reserved[2]; }; @@ -129,7 +131,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev, RtlZeroMemory(context, sizeof *context); - logsize = long_log2(srq->max) + srq->wqe_shift; + logsize = long_log2(srq->max); context->state_logsize_srqn = cl_hton32(logsize << 24 | srq->srqn); context->lkey = cl_hton32(srq->mr.ibmr.lkey); context->db_index = cl_hton32(srq->db_index); @@ -194,7 +196,7 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, } int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, - struct ib_srq_attr *attr, struct mthca_srq *srq) + ib_srq_attr_t *attr, struct mthca_srq *srq) { struct mthca_mailbox *mailbox; u8 status; @@ -204,7 +206,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, /* Sanity check SRQ size before proceeding */ if ((int)attr->max_wr > dev->limits.max_srq_wqes || - (int)attr->max_sge > dev->limits.max_sg) + (int)attr->max_sge > dev->limits.max_srq_sge) return -EINVAL; srq->max = attr->max_wr; @@ -217,6 +219,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ds = max(64UL, roundup_pow_of_two(sizeof (struct mthca_next_seg) + srq->max_gs * sizeof (struct mthca_data_seg))); + + if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) + return -EINVAL; + srq->wqe_shift = long_log2(ds); srq->srqn = mthca_alloc(&dev->srq_table.alloc); @@ -261,11 +267,11 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); if (err) { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_SRQ failed (%d)\n", err)); + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ failed (%d)\n", err)); goto err_out_free_buf; } if (status) { - HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_SRQ returned status 0x%02x\n", + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ returned status 0x%02x\n", status)); err = -EINVAL; goto err_out_free_buf; @@ -285,17 +291,17 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, srq->first_free = 0; srq->last_free = srq->max - 1; - attr->max_wr = srq->max; + attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max; attr->max_sge = srq->max_gs; return 0; err_out_free_srq: err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); - if (err){ - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ failed (%d)\n", err)); - }else if (status){ - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ returned status 0x%02x\n", status)); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err)); + } else if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status)); } err_out_free_buf: @@ -327,26 +333,26 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No memory for mailbox to free SRQ.\n")); + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "No memory for mailbox to free SRQ.\n")); return; } err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); - if (err){ - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ failed (%d)\n", err)); - }else if (status){ - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_SRQ returned status 0x%02x\n", status)); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err)); + } else if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status)); } spin_lock_irq(&dev->srq_table.lock, &lh); mthca_array_clear(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1)); + atomic_dec(&srq->refcount); spin_unlock_irq(&lh); - atomic_dec(&srq->refcount); wait_event(&srq->wait, !atomic_read(&srq->refcount)); - if (!srq->ibsrq.uobject) { + if (!srq->ibsrq.ucontext) { mthca_free_srq_buf(dev, srq); if (mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); @@ -357,9 +363,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) mthca_free_mailbox(dev, mailbox); } -int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask) -{ +int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr, + ib_srq_attr_mask_t attr_mask) +{ struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); int ret; @@ -367,11 +373,12 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, /* We don't support resizing SRQs (yet?) */ if (attr_mask & IB_SRQ_MAX_WR) - return -EINVAL; + return -ENOSYS; if (attr_mask & IB_SRQ_LIMIT) { - if (attr->srq_limit > (u32)srq->max) - return -EINVAL; + u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; + if (attr->srq_limit > max_wr) + return -ERANGE; down(&srq->mutex); ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); @@ -386,8 +393,43 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, return 0; } +int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + struct mthca_mailbox *mailbox; + struct mthca_arbel_srq_context *arbel_ctx; + struct mthca_tavor_srq_context *tavor_ctx; + u8 status; + int err; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); + if (err) + goto out; + + if (mthca_is_memfree(dev)) { + arbel_ctx = mailbox->buf; + srq_attr->srq_limit = cl_ntoh16(arbel_ctx->limit_watermark); + } else { + tavor_ctx = mailbox->buf; + srq_attr->srq_limit = cl_ntoh16(tavor_ctx->limit_watermark); + } + + srq_attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max; + srq_attr->max_sge = srq->max_gs; + +out: + mthca_free_mailbox(dev, mailbox); + + return err; +} + void mthca_srq_event(struct mthca_dev *dev, u32 srqn, - enum ib_event_type event_type) + enum ib_event_type event_type, u8 vendor_code) { struct mthca_srq *srq; struct ib_event event; @@ -400,7 +442,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, spin_unlock(&lh); if (!srq) { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Async event for bogus SRQ %08x\n", srqn)); + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "Async event for bogus SRQ %08x\n", srqn)); return; } @@ -409,8 +451,13 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, event.device = &dev->ib_dev; event.event = event_type; - event.element.srq = &srq->ibsrq; - srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); + event.element.srq = &srq->ibsrq; + event.vendor_specific = vendor_code; + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_SRQ, + ("SRQ %06x Async event event_type 0x%x vendor_code 0x%x\n", + srqn,event_type,vendor_code)); + if (srq->ibsrq.event_handler) + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); out: if (atomic_dec_and_test(&srq->refcount)) @@ -440,13 +487,12 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) spin_unlock(&lh); } -//TODO: is this code correct at all ? int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, struct _ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); - __be32 doorbell[2]; + __be32 doorbell[2]; int err = 0; int first_ind; int ind; @@ -455,52 +501,34 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, int i; u8 *wqe; u8 *prev_wqe; + CPU_2_BE64_PREP; SPIN_LOCK_PREP(lh); spin_lock_irqsave(&srq->lock, &lh); first_ind = srq->first_free; - for (nreq = 0; wr; ++nreq, wr = wr->p_next) { - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { - nreq = 0; - - doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); - doorbell[1] = cl_hton32(srq->srqn << 8); - - /* - * Make sure that descriptors are written - * before doorbell is rung. - */ - wmb(); - - mthca_write64(doorbell, - dev->kar + MTHCA_RECEIVE_DOORBELL, - MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); - - first_ind = srq->first_free; - } - + for (nreq = 0; wr; wr = wr->p_next) { ind = srq->first_free; if (ind < 0) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); err = -ENOMEM; *bad_wr = wr; - goto out; + break; } - wqe = get_wqe(srq, ind); - next_ind = *wqe_to_link(wqe); + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); if (next_ind < 0) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); err = -ENOMEM; *bad_wr = wr; break; } - prev_wqe = srq->last; + prev_wqe = srq->last; srq->last = wqe; ((struct mthca_next_seg *) wqe)->nda_op = 0; @@ -513,7 +541,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, err = -EINVAL; *bad_wr = wr; srq->last = prev_wqe; - goto out; + break; } for (i = 0; i < (int)wr->num_ds; ++i) { @@ -522,7 +550,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(wr->ds_array[i].lkey); ((struct mthca_data_seg *) wqe)->addr = - cl_hton64(wr->ds_array[i].vaddr); + CPU_2_BE64(wr->ds_array[i].vaddr); wqe += sizeof (struct mthca_data_seg); } @@ -540,9 +568,28 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, srq->wrid[ind] = wr->wr_id; srq->first_free = next_ind; + + ++nreq; + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32(srq->srqn << 8); + + /* + * Make sure that descriptors are written + * before doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + + first_ind = srq->first_free; + } } -out: if (likely(nreq)) { doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); doorbell[1] = cl_hton32((srq->srqn << 8) | nreq); @@ -562,7 +609,6 @@ out: return err; } -//TODO: is this code correct at all ? int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, struct _ib_recv_wr **bad_wr) { @@ -573,6 +619,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, int nreq; int i; u8 *wqe; + CPU_2_BE64_PREP; SPIN_LOCK_PREP(lh); spin_lock_irqsave(&srq->lock, &lh); @@ -581,23 +628,23 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, ind = srq->first_free; if (ind < 0) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); err = -ENOMEM; *bad_wr = wr; - goto out; + break; } wqe = get_wqe(srq, ind); next_ind = *wqe_to_link(wqe); if (next_ind < 0) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SRQ %06x full\n", srq->srqn)); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); err = -ENOMEM; *bad_wr = wr; break; } - ((struct mthca_next_seg *) wqe)->nda_op = + ((struct mthca_next_seg *) wqe)->nda_op = cl_hton32((next_ind << srq->wqe_shift) | 1); ((struct mthca_next_seg *) wqe)->ee_nds = 0; /* flags field will always remain 0 */ @@ -607,7 +654,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, if (unlikely((int)wr->num_ds > srq->max_gs)) { err = -EINVAL; *bad_wr = wr; - goto out; + break; } for (i = 0; i < (int)wr->num_ds; ++i) { @@ -616,7 +663,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(wr->ds_array[i].lkey); ((struct mthca_data_seg *) wqe)->addr = - cl_hton64(wr->ds_array[i].vaddr); + CPU_2_BE64(wr->ds_array[i].vaddr); wqe += sizeof (struct mthca_data_seg); } @@ -630,9 +677,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, srq->first_free = next_ind; } -out: if (likely(nreq)) { - srq->counter = srq->counter + (u16)nreq; + srq->counter = (u16)(srq->counter + nreq); /* * Make sure that descriptors are written before @@ -646,6 +692,31 @@ out: return err; } +int mthca_max_srq_sge(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) + return dev->limits.max_sg; + + /* + * SRQ allocations are based on powers of 2 for Tavor, + * (although they only need to be multiples of 16 bytes). + * + * Therefore, we need to base the max number of sg entries on + * the largest power of 2 descriptor size that is <= to the + * actual max WQE descriptor size, rather than return the + * max_sg value given by the firmware (which is based on WQE + * sizes as multiples of 16, not powers of 2). + * + * If SRQ implementation is changed for Tavor to be based on + * multiples of 16, the calculation below can be deleted and + * the FW max_sg value returned. + */ + return min( (uint32_t)dev->limits.max_sg, + ((1 << (fls(dev->limits.max_desc_sz) - 1)) - + sizeof (struct mthca_next_seg)) / + sizeof (struct mthca_data_seg)); +} + int mthca_init_srq_table(struct mthca_dev *dev) { int err; @@ -678,4 +749,3 @@ void mthca_cleanup_srq_table(struct mthca_dev *dev) mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); mthca_alloc_cleanup(&dev->srq_table.alloc); } - diff --git a/trunk/hw/mthca/kernel/mthca_user.h b/trunk/hw/mthca/kernel/mthca_user.h deleted file mode 100644 index e603f478..00000000 --- a/trunk/hw/mthca/kernel/mthca_user.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef MTHCA_USER_H -#define MTHCA_USER_H - -/* - * Make sure that all structs defined in this file remain laid out so - * that they pack the same way on 32-bit and 64-bit architectures (to - * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in u64 - * instead. - */ - -struct mthca_alloc_ucontext_resp { - uint64_t uar_addr; - u64 pd_handle; - u32 pdn; - u32 qp_tab_size; - u32 uarc_size; - u32 vend_id; - u16 dev_id; -}; - -struct mthca_create_srq { - u32 lkey; - u32 db_index; - u64 db_page; -}; - -struct mthca_create_srq_resp { - u32 srqn; - u32 reserved; -}; - -#endif /* MTHCA_USER_H */ diff --git a/trunk/hw/mthca/mx_abi.h b/trunk/hw/mthca/mx_abi.h index 9473db60..c746737e 100644 --- a/trunk/hw/mthca/mx_abi.h +++ b/trunk/hw/mthca/mx_abi.h @@ -104,6 +104,24 @@ struct ibv_create_cq_resp { uint32_t cqn; }; +struct ibv_create_srq { + uint64_t user_handle; + struct ibv_reg_mr mr; + uint32_t lkey; /* used only in kernel */ + uint32_t db_index; + uint64_t db_page; +}; + +struct ibv_create_srq_resp { + struct ibv_reg_mr_resp mr; + uint64_t srq_handle; + uint64_t user_handle; + uint32_t max_wr; + uint32_t max_sge; + uint32_t srqn; + uint32_t reserved; +}; + struct ibv_create_qp { uint64_t sq_db_page; uint64_t rq_db_page; diff --git a/trunk/hw/mthca/user/SOURCES b/trunk/hw/mthca/user/SOURCES index 1ab51972..92f6c653 100644 --- a/trunk/hw/mthca/user/SOURCES +++ b/trunk/hw/mthca/user/SOURCES @@ -31,7 +31,8 @@ SOURCES= \ mlnx_ual_mrw.c \ mlnx_ual_osbypass.c \ mlnx_ual_pd.c \ - mlnx_ual_qp.c \ + mlnx_ual_qp.c \ + mlnx_ual_srq.c \ \ mlnx_uvp_debug.c \ mlnx_uvp.c \ diff --git a/trunk/hw/mthca/user/mlnx_ual_av.c b/trunk/hw/mthca/user/mlnx_ual_av.c index dc909911..8bc46a57 100644 --- a/trunk/hw/mthca/user/mlnx_ual_av.c +++ b/trunk/hw/mthca/user/mlnx_ual_av.c @@ -263,6 +263,10 @@ mlnx_post_create_av ( } *ph_uvp_av = (ib_av_handle_t)ah; } + else { + mthca_free_av(ah); + cl_free(ah); + } goto end; end: diff --git a/trunk/hw/mthca/user/mlnx_ual_main.c b/trunk/hw/mthca/user/mlnx_ual_main.c index 3dedca72..03d90576 100644 --- a/trunk/hw/mthca/user/mlnx_ual_main.c +++ b/trunk/hw/mthca/user/mlnx_ual_main.c @@ -156,6 +156,11 @@ uvp_get_interface ( */ mlnx_get_pd_interface (p_uvp); + /* + * SRQ Management Verbs + */ + mlnx_get_srq_interface (p_uvp); + /* * QP Management Verbs */ diff --git a/trunk/hw/mthca/user/mlnx_ual_main.h b/trunk/hw/mthca/user/mlnx_ual_main.h index b6382668..bbc5fdc0 100644 --- a/trunk/hw/mthca/user/mlnx_ual_main.h +++ b/trunk/hw/mthca/user/mlnx_ual_main.h @@ -271,6 +271,59 @@ mlnx_post_destroy_cq ( IN const ib_cq_handle_t h_uvp_cq, IN ib_api_status_t ioctl_status); +/************* SRQ Management *************************/ +void +mlnx_get_srq_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN const ib_srq_attr_mask_t srq_attr_attr, // Fixme + IN const ib_srq_attr_t *p_srq_attr, // Fixme + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_srq ( + IN ib_srq_handle_t h_uvp_srq, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_srq ( + IN ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ib_srq_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq); + +void +mlnx_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ); + + /************* QP Management *************************/ void mlnx_get_qp_interface ( @@ -486,6 +539,12 @@ mlnx_post_recv ( IN ib_recv_wr_t* const p_recv_wr, OUT ib_recv_wr_t** pp_recv_failure ); +ib_api_status_t +mlnx_post_srq_recv ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + ib_api_status_t mlnx_bind_mw ( IN const ib_mw_handle_t h_uvp_mw, diff --git a/trunk/hw/mthca/user/mlnx_ual_osbypass.c b/trunk/hw/mthca/user/mlnx_ual_osbypass.c index bae962ea..08fba46a 100644 --- a/trunk/hw/mthca/user/mlnx_ual_osbypass.c +++ b/trunk/hw/mthca/user/mlnx_ual_osbypass.c @@ -53,6 +53,7 @@ mlnx_get_osbypass_interface ( */ p_uvp->post_send = mlnx_post_send; p_uvp->post_recv = mlnx_post_recv; + p_uvp->post_srq_recv = mlnx_post_srq_recv; /* * Completion Processing and @@ -107,6 +108,42 @@ mlnx_post_send ( } +ib_api_status_t +mlnx_post_srq_recv ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq); + + UVP_ENTER(UVP_DBG_QP); + + CL_ASSERT (srq); + + CL_ASSERT( p_recv_wr ); + + err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure ); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else if (err == -EINVAL) + status = IB_INVALID_WR_TYPE; + else if (err == -ERANGE) + status = IB_INVALID_MAX_SGE; + else if (err == -EBUSY) + status = IB_INVALID_QP_STATE; + else + status = errno_to_iberr(err); + } + + UVP_EXIT(UVP_DBG_QP); + return status; +} + + ib_api_status_t mlnx_post_recv ( IN const void* __ptr64 h_qp, diff --git a/trunk/hw/mthca/user/mlnx_ual_qp.c b/trunk/hw/mthca/user/mlnx_ual_qp.c index 39aa9255..4bf3fb5b 100644 --- a/trunk/hw/mthca/user/mlnx_ual_qp.c +++ b/trunk/hw/mthca/user/mlnx_ual_qp.c @@ -111,7 +111,7 @@ ib_api_status_t /* convert attributes */ attr.send_cq = p_create_attr->h_sq_cq->ibv_cq; attr.recv_cq = p_create_attr->h_rq_cq->ibv_cq; - attr.srq = NULL; /* absent in IBAL */ + attr.srq = (struct ibv_srq*)p_create_attr->h_srq; attr.cap.max_send_wr = p_create_attr->sq_depth; attr.cap.max_recv_wr = p_create_attr->rq_depth; attr.cap.max_send_sge = p_create_attr->sq_sge; @@ -347,7 +347,7 @@ mlnx_post_destroy_qp ( if (ioctl_status == IB_SUCCESS) cl_free (p_qp_info); else - UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", ioctl_status)); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp_post failed (%d)\n", ioctl_status)); UVP_EXIT(UVP_DBG_SHIM); return; diff --git a/trunk/hw/mthca/user/mlnx_ual_srq.c b/trunk/hw/mthca/user/mlnx_ual_srq.c new file mode 100644 index 00000000..196da791 --- /dev/null +++ b/trunk/hw/mthca/user/mlnx_ual_srq.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mlnx_ual_srq.c 1611 2006-08-20 14:48:55Z leonid $ + */ + +#include "mt_l2w.h" +#include "mlnx_ual_main.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_srq.tmh" +#endif + + +extern uint32_t mlnx_dbg_lvl; + +void +mlnx_get_srq_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_DEV); + + CL_ASSERT(p_uvp); + + /* + * Completion Queue Management Verbs + */ + p_uvp->pre_create_srq = mlnx_pre_create_srq; + p_uvp->post_create_srq = mlnx_post_create_srq; + + p_uvp->pre_query_srq = NULL; /* mlnx_pre_query_srq; */ + p_uvp->post_query_srq = NULL; /*mlnx_post_query_srq;*/ + + p_uvp->pre_modify_srq = NULL; /* mlnx_modify_srq;*/ + p_uvp->post_modify_srq = NULL; /*mlnx_post_modify_srq;*/ + + p_uvp->pre_destroy_srq = NULL; /* mlnx_pre_destroy_srq; */ + p_uvp->post_destroy_srq = mlnx_post_destroy_srq; + + UVP_EXIT(UVP_DBG_DEV); +} + +static void __free_srq(struct mthca_srq *srq) +{ + /* srq may be NULL, when ioctl returned with some kind of error, e.g. IB_INVALID_PARAM */ + if (!srq) + return; + + if (mthca_is_memfree(srq->ibv_srq.context)) { + mthca_free_db(to_mctx(srq->ibv_srq.context)->db_tab, MTHCA_DB_TYPE_SRQ, + srq->db_index); + } + + if (srq->buf) { +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(srq->buf); +#else + VirtualFree( srq->buf, 0, MEM_RELEASE); +#endif + } + + if (srq->wrid) + cl_free(srq->wrid); + + cl_spinlock_destroy(&srq->lock); + cl_free (srq); +} + +ib_api_status_t +mlnx_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + struct mthca_srq *srq; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + struct ibv_pd *ibv_pd = p_pd->ibv_pd; + struct ibv_create_srq *p_create_srq; + int err; + + UVP_ENTER(UVP_DBG_SRQ); + + CL_ASSERT(p_umv_buf); + + /* Sanity check SRQ size before proceeding */ + if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) + { + status = IB_INVALID_PARAMETER; + goto err_params; + } + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_srq); + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + p_umv_buf->command = TRUE; + + /* allocate srq */ + srq = cl_zalloc(sizeof *srq); + if (!srq) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_srq; + } + + /* init fields */ + cl_spinlock_construct(&srq->lock); + if (cl_spinlock_init(&srq->lock)) + goto err_lock; + + srq->ibv_srq.pd = ibv_pd; + srq->ibv_srq.context = ibv_pd->context; + srq->max = align_queue_size(ibv_pd->context, p_srq_attr->max_wr, 1); + srq->max_gs = p_srq_attr->max_sge; + srq->counter = 0; + + if (mthca_alloc_srq_buf(ibv_pd, (void*)p_srq_attr, srq)) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_buf; + } + + // fill the parameters for ioctl + p_create_srq = (struct ibv_create_srq *)p_umv_buf->p_inout_buf; + p_create_srq->user_handle = (uint64_t)(ULONG_PTR)srq; + p_create_srq->mr.start = (uint64_t)(ULONG_PTR)srq->buf; + p_create_srq->mr.length = srq->buf_size; + p_create_srq->mr.hca_va = 0; + p_create_srq->mr.pd_handle = p_pd->ibv_pd->handle; + p_create_srq->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn; + p_create_srq->mr.access_flags = 0; //local read + + if (mthca_is_memfree(ibv_pd->context)) { + srq->db_index = mthca_alloc_db(to_mctx(ibv_pd->context)->db_tab, + MTHCA_DB_TYPE_SRQ, &srq->db); + if (srq->db_index < 0) + goto err_alloc_db; + + p_create_srq->db_page = db_align(srq->db); + p_create_srq->db_index = srq->db_index; + } + + status = IB_SUCCESS; + goto end; + +err_alloc_db: +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(srq->buf); +#else + VirtualFree( srq->buf, 0, MEM_RELEASE); +#endif + cl_free(srq->wrid); +err_alloc_buf: + cl_spinlock_destroy(&srq->lock); +err_lock: + cl_free(srq); +err_alloc_srq: + cl_free(p_umv_buf->p_inout_buf); +err_memory: +err_params: +end: + UVP_EXIT(UVP_DBG_SRQ); + return status; +} + + +void +mlnx_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ) +{ + int err; + struct mthca_srq *srq; + struct ibv_create_srq_resp *p_resp; + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + struct ibv_pd *ibv_pd = p_pd->ibv_pd; + ib_api_status_t status = IB_SUCCESS; + + UVP_ENTER(UVP_DBG_SRQ); + + CL_ASSERT(p_umv_buf); + p_resp = (struct ibv_create_srq_resp *)p_umv_buf->p_inout_buf; + srq = (struct mthca_srq *)(ULONG_PTR)p_resp->user_handle; + + if (IB_SUCCESS == ioctl_status) { + + /* complete filling SRQ object */ + srq->ibv_srq.handle = p_resp->srq_handle; + srq->srqn = p_resp->srqn; + srq->max = p_resp->max_wr; + srq->max_gs = p_resp->max_sge; + srq->mr.handle = p_resp->mr.mr_handle; + srq->mr.lkey = p_resp->mr.lkey; + srq->mr.rkey = p_resp->mr.rkey; + srq->mr.pd = ibv_pd; + srq->mr.context = ibv_pd->context; + + if (mthca_is_memfree(ibv_pd->context)) + mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn); + + *ph_uvp_srq = (ib_srq_handle_t)srq; + } + else + __free_srq(srq); + + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_SRQ); + return; +} + +void +mlnx_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status) +{ + int err; + struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_uvp_srq); + + UVP_ENTER(UVP_DBG_CQ); + + CL_ASSERT(srq); + + if (IB_SUCCESS == ioctl_status) + __free_srq(srq); + + UVP_EXIT(UVP_DBG_CQ); +} + + diff --git a/trunk/hw/mthca/user/mlnx_uvp.h b/trunk/hw/mthca/user/mlnx_uvp.h index 2512ed05..6470a398 100644 --- a/trunk/hw/mthca/user/mlnx_uvp.h +++ b/trunk/hw/mthca/user/mlnx_uvp.h @@ -137,7 +137,7 @@ struct mthca_srq { void *buf; void *last; cl_spinlock_t lock; - struct ibv_mr *mr; + struct ibv_mr mr; uint64_t *wrid; uint32_t srqn; int max; diff --git a/trunk/hw/mthca/user/mlnx_uvp_abi.h b/trunk/hw/mthca/user/mlnx_uvp_abi.h deleted file mode 100644 index 155dae1c..00000000 --- a/trunk/hw/mthca/user/mlnx_uvp_abi.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id$ - */ - -#ifndef MTHCA_UVP_ABI_H -#define MTHCA_ABI_H - -#include "mlnx_uvp_kern_abi.h" - -struct mthca_alloc_ucontext_resp { - struct ibv_get_context_resp ibv_resp; -}; - -struct mthca_create_srq { - uint32_t lkey; - uint32_t db_index; - uint64_t db_page; - struct ibv_create_srq ibv_cmd; -}; - -struct mthca_create_srq_resp { - struct ibv_create_srq_resp ibv_resp; - uint32_t srqn; - uint32_t reserved; -}; - -struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p); -void mthca_free_context(struct ibv_context *ibctx); - - -#endif /* MTHCA_ABI_H */ diff --git a/trunk/hw/mthca/user/mlnx_uvp_debug.h b/trunk/hw/mthca/user/mlnx_uvp_debug.h index d7c0a4bd..2a9cbc5b 100644 --- a/trunk/hw/mthca/user/mlnx_uvp_debug.h +++ b/trunk/hw/mthca/user/mlnx_uvp_debug.h @@ -54,6 +54,7 @@ extern uint32_t g_mlnx_dbg_flags; WPP_DEFINE_BIT( UVP_DBG_CQ) \ WPP_DEFINE_BIT( UVP_DBG_QP) \ WPP_DEFINE_BIT( UVP_DBG_MEMORY) \ + WPP_DEFINE_BIT( UVP_DBG_SRQ) \ WPP_DEFINE_BIT( UVP_DBG_AV) \ WPP_DEFINE_BIT( UVP_DBG_SEND) \ WPP_DEFINE_BIT( UVP_DBG_RECV) \ @@ -93,11 +94,12 @@ extern uint32_t g_mlnx_dbg_flags; #define UVP_DBG_QP (1 << 4) #define UVP_DBG_CQ (1 << 5) #define UVP_DBG_MEMORY (1 << 6) -#define UVP_DBG_AV (1 << 7) -#define UVP_DBG_SEND (1 << 8) -#define UVP_DBG_RECV (1 << 9) -#define UVP_DBG_LOW (1 << 10) -#define UVP_DBG_SHIM (1 << 11) +#define UVP_DBG_SRQ (1 << 7) +#define UVP_DBG_AV (1 << 8) +#define UVP_DBG_SEND (1 << 9) +#define UVP_DBG_RECV (1 << 10) +#define UVP_DBG_LOW (1 << 11) +#define UVP_DBG_SHIM (1 << 12) VOID diff --git a/trunk/hw/mthca/user/mlnx_uvp_srq.c b/trunk/hw/mthca/user/mlnx_uvp_srq.c index dd2fdc02..bb81e0cc 100644 --- a/trunk/hw/mthca/user/mlnx_uvp_srq.c +++ b/trunk/hw/mthca/user/mlnx_uvp_srq.c @@ -95,24 +95,7 @@ int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq, first_ind = srq->first_free; - for (nreq = 0; wr; ++nreq, wr = wr->p_next) { - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { - nreq = 0; - - doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); - doorbell[1] = cl_hton32(srq->srqn << 8); - - /* - * Make sure that descriptors are written - * before doorbell is rung. - */ - wmb(); - - mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL); - - first_ind = srq->first_free; - } - + for (nreq = 0; wr; wr = wr->p_next) { ind = srq->first_free; if (ind < 0) { @@ -172,6 +155,23 @@ int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq, srq->wrid[ind] = wr->wr_id; srq->first_free = next_ind; + + if (++nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB) { + nreq = 0; + + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32(srq->srqn << 8); + + /* + * Make sure that descriptors are written + * before doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL); + + first_ind = srq->first_free; + } } if (nreq) { @@ -294,12 +294,12 @@ int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, srq->buf_size = srq->max << srq->wqe_shift; if (posix_memalign(&srq->buf, g_page_size, - align(srq->buf_size, g_page_size))) { + align(srq->buf_size, g_page_size))) { cl_free(srq->wrid); return -1; } - memset(srq->buf, 0, srq->buf_size); + cl_memclr(srq->buf, srq->buf_size); /* * Now initialize the SRQ buffer so that all of the WQEs are diff --git a/trunk/hw/mthca/user/mlnx_uvp_verbs.c b/trunk/hw/mthca/user/mlnx_uvp_verbs.c index 783ff72a..528222fd 100644 --- a/trunk/hw/mthca/user/mlnx_uvp_verbs.c +++ b/trunk/hw/mthca/user/mlnx_uvp_verbs.c @@ -244,7 +244,7 @@ int mthca_destroy_cq(struct ibv_cq *cq) return 0; } -static int align_queue_size(struct ibv_context *context, int size, int spare) +int align_queue_size(struct ibv_context *context, int size, int spare) { int ret; @@ -333,7 +333,7 @@ struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, } // fill the rest qp fields - qp->ibv_qp .pd = pd; + qp->ibv_qp.pd = pd; qp->ibv_qp.send_cq = attr->send_cq; qp->ibv_qp.recv_cq = attr->recv_cq; qp->ibv_qp.srq = attr->srq; @@ -526,3 +526,4 @@ int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid) return -ENOSYS; #endif } + diff --git a/trunk/hw/mthca/user/mlnx_uvp_verbs.h b/trunk/hw/mthca/user/mlnx_uvp_verbs.h index 40ccf3be..19ecb833 100644 --- a/trunk/hw/mthca/user/mlnx_uvp_verbs.h +++ b/trunk/hw/mthca/user/mlnx_uvp_verbs.h @@ -405,16 +405,9 @@ typedef enum MTHCA_QP_ACCESS_FLAGS { struct ibv_srq { - struct ibv_context *context; - void *srq_context; struct ibv_pd *pd; uint64_t handle; - HANDLE mutex; - -#ifdef WIN_TO_BE_CHANGED - pthread_cond_t cond; - uint32_t events_completed; -#endif + struct ibv_context *context; }; struct ibv_qp { @@ -489,6 +482,8 @@ struct ibv_context { void *abi_compat; }; +int align_queue_size(struct ibv_context *context, int size, int spare); + END_C_DECLS #endif /* INFINIBAND_VERBS_H */ diff --git a/trunk/inc/iba/ib_al.h b/trunk/inc/iba/ib_al.h index 75132f1c..a63ecb61 100644 --- a/trunk/inc/iba/ib_al.h +++ b/trunk/inc/iba/ib_al.h @@ -43,7 +43,7 @@ extern "C" { #endif /* __cplusplus */ -/****h* IB Access Layer API/Overview +/****h* IB Access Layer API/Access Layer * NAME * InfiniBand Access Layer * COPYRIGHT @@ -469,6 +469,7 @@ typedef struct _ib_async_event_rec ib_ca_handle_t h_ca; ib_cq_handle_t h_cq; ib_qp_handle_t h_qp; + ib_srq_handle_t h_srq; } handle; @@ -1097,6 +1098,293 @@ ib_destroy_av( *****/ +/****f* Access Layer/ib_create_srq +* NAME +* ib_create_srq +* +* DESCRIPTION +* Creates a shared receive queue and returns its handle to the user. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ); +/* +* PARAMETERS +* h_pd +* [in] This is a handle to a protection domain associated with the shared queue +* pair. +* +* p_srq_attr +* [in] Attributes necessary to allocate and initialize a shared receive queue. +* +* srq_context +* [in] A user-specified context information associated with the shared +* receive queue. +* +* pfn_qp_event_cb +* [in] User-specified error callback routine invoked after an +* asynchronous event has occurred on the shared receive queue. +* +* ph_srq +* [out] Upon successful completion of this call, this references a +* handle to the newly created shared receive queue. +* +* RETURN VALUES +* IB_SUCCESS +* The receive queue was successfully created. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the shared receive queue was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the shared receive queue. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the shared receive queue. +* +* IB_INVALID_SETTING +* The specified shared receive queue creation attributes are invalid. +* +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* +* NOTES +* This routine allocates a shared receive queue with the specified attributes. If +* the shared receive queue cannot be allocated, an error is returned. When creating +* the shared receive queue, users associate a context with the shared receive queue. This +* context is returned to the user through the asynchronous event callback +* if an event occurs. +* +* This routine is used to create receive queues, which work with QPs of type: +* +* IB_QPT_RELIABLE_CONN +* IB_QPT_UNRELIABLE_CONN +* IB_QPT_UNRELIABLE_DGRM +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_query_srq +* NAME +* ib_query_srq +* +* DESCRIPTION +* Query the current attributes of the shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared receive queue. +* +* p_srq_attr +* [out] Upon successful completion of this call, the structure +* referenced by this parameter contains the attributes of the specified +* quere pair. +* +* RETURN VALUES +* IB_SUCCESS +* The shared receive queue attributes were returned successfully. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes structure was not provided. +* +* NOTES +* This routine returns information about the specified shared receive queue. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_modify_srq +* NAME +* ib_modify_srq +* +* DESCRIPTION +* Modifies the attributes of an existing shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared receive queue. +* +* p_srq_attr +* [in] Attributes necessary to allocate and initialize a shared receive queue. +* +* srq_attr_mask +* [in] Flags, indicating which fields in the previous structure are valid. +* +* RETURN VALUES +* IB_SUCCESS +* The shared receive queue was successfully modified. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes was not provided. +* +* IB_INVALID_SETTING +* The specified shared receive queue attributes were invalid. +* +* IB_UNSUPPORTED +* The required action is not supported yet. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the modify the shared receive queue. +* +* NOTES +* This routine modifies the attributes of an existing shared receive queue and +* transitions it to a new state. The new state and attributes are +* specified through the p_qp_mod parameter. Upon successful completion, +* the shared receive queue is in the requested state. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_destroy_srq +* NAME +* ib_destroy_srq +* +* DESCRIPTION +* Release a shared receive queue. Once destroyed, no further access to this +* shared receive queue is possible. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared shared receive queue. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the shared receive queue +* has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The destroy request was registered. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_RESOURCE_BUSY +* There are QPs, bound to the shared receive queue +* +* NOTES +* This call destroys an existing shared receive queue. Since callbacks may be +* outstanding against the shared receive queue at the time the destroy operation is +* invoked, then this call operates asynchronously. The user will be notified +* through a callback once the destroy operation completes, indicating that +* no additional callbacks will be invoked for the specified shared receive queue. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_post_srq_recv +* NAME +* ib_post_srq_recv +* +* DESCRIPTION +* This routine posts a work request to the shared receive queue of a shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); +/* +* PARAMETERS +* h_srq +* [in] The shared receive queue to which this work request is being submitted. +* +* p_recv_wr +* [in] A reference to the head of the work request list. +* +* pp_recv_failure +* [out] If the post receive operation failed, this references the work +* request in the p_recv_wr list where the first failure occurred. +* This parameter may be NULL if only a single work request is being +* posted to the QP. +* +* RETURN VALUES +* IB_SUCCESS +* All work requests were successfully posted. +* +* IB_INVALID_QP_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the receive work request list was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* The number of posted work requests exceed the current depth available +* on the receive queue. +* +* IB_INVALID_WR_TYPE +* The work request type was invalid. +* +* IB_INVALID_QP_STATE +* The current shared receive queue state does not allow posting receives. +* +* NOTES +* This routine posts a work request to the shared receive queue. +* The type of work to perform is defined by the p_recv_wr parameter. This +* call is used to post data buffers to receive incoming message sends. +* +* SEE ALSO +* ib_recv_wr_t +*****/ + + /****f* Access Layer/ib_create_qp * NAME * ib_create_qp @@ -1155,6 +1443,10 @@ ib_create_qp( * The send or receive completion queue to associate with the queue pair * was invalid. * +* IB_INVALID_SRQ_HANDLE +* The shared receive queue to be associated with the queue pair +* was invalid. +* * IB_INVALID_SETTING * The specified queue pair creation attributes are invalid. * @@ -1170,20 +1462,27 @@ ib_create_qp( * receive queue could not be supported. * * NOTES -* This routine allocates a queue pair with the specified attributes. If +* 1. This routine allocates a queue pair with the specified attributes. If * the queue pair cannot be allocated, an error is returned. When creating * the queue pair, users associate a context with the queue pair. This * context is returned to the user through the asynchronous event callback * if an event occurs. * -* This routine is used to create queue pairs of type: -* -* IB_QPT_RELIABLE_CONN -* IB_QPT_UNRELIABLE_CONN -* IB_QPT_UNRELIABLE_DGRM -* IB_QPT_MAD -* -* Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair +* 2. For QPs that are associated with an SRQ, the Consumer should take +* the QP through the Error State before invoking a Destroy QP or a Modify +* QP to the Reset State. The Consumer may invoke the Destroy QP without +* first performing a Modify QP to the Error State and waiting for the Affiliated +* Asynchronous Last WQE Reached Event. However, if the Consumer +* does not wait for the Affiliated Asynchronous Last WQE Reached Event, +* then WQE and Data Segment leakage may occur. +* +* 3. This routine is used to create queue pairs of type: +* IB_QPT_RELIABLE_CONN +* IB_QPT_UNRELIABLE_CONN +* IB_QPT_UNRELIABLE_DGRM +* IB_QPT_MAD +* +* 4. Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair * is of type IB_QPT_UNRELIABLE_DGRM or IB_QPT_MAD before sending or * receiving data. IB_QPT_RELIABLE_CONN, IB_QPT_UNRELIABLE_CONN type * queue pairs should be used by the connection establishment process @@ -8485,8 +8784,10 @@ typedef struct _ib_pnp_rec void* __ptr64 pnp_context; void* __ptr64 context; - + //NOTE: + //guid and ca_guid use as key to flexi map need to keep these field together ib_net64_t guid; + ib_net64_t ca_guid; } ib_pnp_rec_t; /* @@ -8520,6 +8821,9 @@ typedef struct _ib_pnp_rec * The GUID of the adapter, port, IOU, or IOC for which * the PnP event occurred. * +* ca_guid +* The GUID of the HCA +* * NOTES * This structure is returned to the user to notify them of: the addition * of a channel adapter, the removal of a channel adapter, a port up or down @@ -8656,6 +8960,7 @@ typedef struct _ib_pnp_port_rec typedef struct _ib_pnp_iou_rec { ib_pnp_rec_t pnp_rec; + net64_t guid; net64_t ca_guid; net64_t chassis_guid; uint8_t slot; diff --git a/trunk/inc/iba/ib_al_ioctl.h b/trunk/inc/iba/ib_al_ioctl.h index e6b5cc9e..c006a623 100644 --- a/trunk/inc/iba/ib_al_ioctl.h +++ b/trunk/inc/iba/ib_al_ioctl.h @@ -673,6 +673,200 @@ typedef union _ual_destroy_av_ioctl * Status of the operation. *****/ +/****s* User-mode Access Layer/ual_create_srq_ioctl_t +* NAME +* ual_create_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_srq +* +* SYNOPSIS +*/ +typedef union _ual_create_srq_ioctl +{ + struct _ual_create_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + ib_srq_attr_t srq_attr; + void* __ptr64 context; + boolean_t ev_notify; + + } in; + struct _ual_create_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_srq; + + } out; + +} ual_create_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_pd +* Protection domain on which to create the srq. +* +* in.srq_attr +* Attributes necessary for creating the srq. +* +* in.context +* UAL's srq context that needs to be returned on a callback. +* +* in.ev_notify +* Boolean indicating whether asynchronous events should be +* forwarded to user-mode. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.h_srq +* Handle for the newly created srq. +*****/ + + +/****s* User-mode Access Layer/ual_modify_srq_ioctl_t +* NAME +* ual_modify_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_srq +* +* SYNOPSIS +*/ +typedef union _ual_modify_srq_ioctl +{ + struct _ual_modify_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_srq; + ib_srq_attr_mask_t srq_attr_mask; + ib_srq_attr_t srq_attr; + + } in; + struct _ual_modify_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + + } out; + +} ual_modify_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_srq +* A handle to an existing Queue Pair. +* +* in.modify_attr +* Attributes used for modifying the srq. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +*****/ + + + +/****s* User-mode Access Layer/ual_query_srq_ioctl_t +* NAME +* ual_query_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_srq +* +* SYNOPSIS +*/ +typedef union _ual_query_srq_ioctl +{ + struct _ual_query_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_srq; + + } in; + struct _ual_query_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + ib_srq_attr_t srq_attr; + + } out; + +} ual_query_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* h_srq +* Handle to the srq whose attributes to query. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.srq_attr +* Attributes of the srq. +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_srq_ioctl_t +* NAME +* ual_destroy_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_srq +* +* SYNOPSIS +*/ +typedef union _ual_destroy_srq_ioctl +{ + struct _ual_destroy_srq_ioctl_in + { + uint64_t h_srq; + + } in; + struct _ual_destroy_srq_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_srq_ioctl_t; +/* +* FIELDS +* in.h_srq +* Handle of the srq that needs to be destroyed. +* +* out.status +* Status of the operation. +*****/ + /****s* User-mode Access Layer/ual_create_qp_ioctl_t @@ -1568,6 +1762,57 @@ typedef union _ual_post_send_ioctl *****/ +/****s* User-mode Access Layer/ual_post_srq_recv_ioctl_t +* NAME +* ual_post_srq_recv_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_post_srq_recv +* +* SYNOPSIS +*/ +typedef union _ual_post_srq_recv_ioctl +{ + struct _ual_post_srq_recv_ioctl_in + { + uint64_t h_srq; + uint32_t num_wr; + uint32_t num_ds; + ib_recv_wr_t recv_wr[1]; + /* Additional work requests follow, followed by data segments. */ + + } in; + struct _ual_post_srq_recv_ioctl_out + { + ib_api_status_t status; + uint32_t failed_cnt; + + } out; + +} ual_post_srq_recv_ioctl_t; +/* +* FIELDS +* in.h_srq +* A handle to SRQ where the work request is being posted. +* +* in.num_wr +* Number of work request items in the array of work requests. +* +* in.num_ds +* Number of data segments following the array of work requests. +* +* in.recv_wr +* First work request in the array of work requests being posted. +* +* out.status +* Status of the operation. +* +* failed_cnt +* Number of work request that failed. +*****/ + + /****s* User-mode Access Layer/ual_post_recv_ioctl_t * NAME diff --git a/trunk/inc/iba/ib_at_ioctl.h b/trunk/inc/iba/ib_at_ioctl.h index 2cfa247f..f7d4b852 100644 --- a/trunk/inc/iba/ib_at_ioctl.h +++ b/trunk/inc/iba/ib_at_ioctl.h @@ -1,34 +1,34 @@ /* -* Copyright (c) 2005 Mellanox Technologies. All rights reserved. -* Copyright (c) 2005 SilverStorm Technologies. All rights reserved. -* -* This software is available to you under the OpenIB.org BSD license -* below: -* -* Redistribution and use in source and binary forms, with or -* without modification, are permitted provided that the following -* conditions are met: -* -* - Redistributions of source code must retain the above -* copyright notice, this list of conditions and the following -* disclaimer. -* -* - Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials -* provided with the distribution. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE. -* -* $Id$ -*/ + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ /* This file is shared between user- and kernel-mode */ diff --git a/trunk/inc/iba/ib_ci.h b/trunk/inc/iba/ib_ci.h index 8941386d..710f67d3 100644 --- a/trunk/inc/iba/ib_ci.h +++ b/trunk/inc/iba/ib_ci.h @@ -73,7 +73,7 @@ extern "C" * definition. */ #define VERBS_MAJOR_VER (0x0001) -#define VERBS_MINOR_VER (0x0003) +#define VERBS_MINOR_VER (0x0004) #define VERBS_VERSION (((VERBS_MAJOR_VER) << 16) | (VERBS_MINOR_VER)) #define MK_VERBS_VERSION(maj,min) ((((maj) & 0xFFFF) << 16) | \ @@ -697,6 +697,189 @@ typedef ib_api_status_t ********* */ +/****f* Verbs/ci_create_srq +* NAME +* ci_create_srq -- Create a Shared Queue Pair for the specified HCA +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_srq) ( + IN const ib_pd_handle_t h_pd, + IN const void *srq_context, + IN const ib_srq_attr_t * const p_srq_attr, + OUT ib_srq_handle_t *ph_srq, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* A new shared queue pair is created on the specified HCA. The initial set of +* parameters is provided by the srq_attr_mask/p_srq_attr parameters. The newly created +* queue pair with its attributes is returned in the srq_query_attr structure. +* PARAMETERS +* h_pd +* [in] Handle to Protection Domain +* srq_context +* [in] A user specified context passed in a asynchronous error callback. +* p_srq_attr +* [in out] Initial attributes with which the srq must be created. +* ph_srq +* [out] Handle to the queue pair newly created. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The queue pair is successfully created with the provided initial +* attributes. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_PD_HANDLE +* pd_handle supplied in the qp_create_attr is invalid +* IB_INVALID_SERVICE_TYPE +* Invalid service type. +* IB_INVALID_MAX_WRS +* Max WRS capacity exceeded +* IB_INVALID_MAX_SGE +* Max Scatter gather element request exceeds HCA capability +* IB_UNSUPPORTED +* Unreliable datagram not supported +* IB_INVALID_PARAMETER +* The parameter p_create_attr is invalid. +* NOTES +* If any of the initial parameters is not valid, the queue pair is not +* created. If the routine call is not successful then the contents of +* qp_query_attr and qp_handle is undefined. +* SEE ALSO +* ci_query_qp, ci_modify_qp, ci_destroy_qp +****** +*/ + + +/****f* Verbs/ci_modify_srq +* NAME +* ci_modify_srq -- Modify attributes of the specified SRQ. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_srq) ( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine is used to modify the srq states or other attributes of the +* srq. On successful completion, the requested state transition is performed +* and the srq is transitioned to the required state. +* PARAMETERS +* h_srq +* [in] Handle to the queue pair whose state is to be modified. +* p_srq_attr +* [in] Initial attributes with which the srq must be created. +* srq_attr_mask +* [in] Flags, specifying valid fields in ib_srq_attr_t structure. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The operation was successful and the QP attributes are modified +* to the requested state. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_QP_HANDLE +* Invalid QP handle was passed. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* IB_QP_INVALID_STATE +* Invalid state transition request. Current QP state not in allowable +* state. +* IB_INVALID_PARAMETER +* The parameter p_modify_attr is not valid. +* SEE ALSO +* ci_create_qp, ci_destroy_qp, ci_query_qp +****** +*/ + + +/****f* Verbs/ci_query_srq +* NAME +* ci_query_srq -- Query the current SRQ attributes +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_srq) ( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine queries the current attributes for the srq +* corresponding to h_srq. The attributes are returned in p_query_attr. +* Depending on the current state of the srq, some of the fields in the +* attribute structure may not be valid. +* PARAMETERS +* h_srq +* [in] Handle to the srq for which the attributes are being retrieved +* p_srq_attr +* [out] Pointer to the ib_srq_query_t structure where the current +* attributes of the srq is returned. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The values returned in p_qp_attr are valid. +* IB_INVALID_QP_HANDLE +* The h_qp supplied is not a valid handle. +* IB_INVALID_PARAMETER +* Parameter p_qp_attr is not valid. +* SEE ALSO +* ci_create_qp, ci_destroy_qp, ci_modify_srq +***** +*/ + + +/****f* Verbs/ci_destroy_srq +* NAME +* ci_destroy_srq -- Destroy the specified Shared Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_srq) ( + IN const ib_srq_handle_t h_srq ); +/* +* DESCRIPTION +* Destroys the associated srq. The srq could have outstanding work requests +* when this call is made. Any outstanding work requests *SHALL NOT* be +* completed after this routine returns. +* PARAMETERS +* h_srq +* [in] Handle to the srq that needs to be destroyed. +* RETURN VALUE +* IB_SUCCESS +* The intend to destroy this queue pair is registered and no further +* work requests will be processed. When no pending callbacks are in +* progress, the destroy_callback function is invoked which marks the +* destruction of the resource. The consumer can be guaranteed that +* no future callbacks will be propagated on behalf of this resource. +* IB_INVALID_QP_HANDLE +* The handle passed is invalid. +* IB_RESOURCE_BUSY +* If the queue pair is a unreliable datagram service type, and +* is still bound to a multicast group. +* NOTES +* This call cannot be called from any of the notification functions invoked +* by the Verbs driver. For e.g. the completion handler or the async error +* callback provided during the ci_open_ca() call. The call will block until +* all references to this adapter object is closed which includes all the +* pending callbacks returning back to the verbs provider driver. +* SEE ALSO +* ci_cquery_qp, ci_destroy_qp, ci_modify_srq +****** +*/ + + /****f* Verbs/ci_create_qp * NAME * ci_create_qp -- Create a Queue Pair for the specified HCA @@ -2000,6 +2183,51 @@ typedef ib_api_status_t ****** */ +/****f* Verbs/ci_post_srq_recv +* NAME +* ci_post_srq_recv -- Post a work request to the receive queue of a queue pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_post_srq_recv) ( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_failed ); +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a +* shared queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* PARAMETERS +* h_srq +* [in] Handle to the queue pair to which the receive work request is being +* posted. +* p_recv_wr +* [in] Holds the WRs to be posted to the receive queue. +* pp_failed +* [out] If any entry could not be posted with the CI, then this points +* to the first WR that completed unsuccessfully. If all entries are +* posted, then this field is set to NULL on successful exit. +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_SRQ_HANDLE +* srq_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* SEE ALSO +****** +*/ + + + /****f* Verbs/ci_post_recv * NAME * ci_post_recv -- Post a work request to the receive queue of a queue pair. @@ -2518,6 +2746,14 @@ typedef struct _ci_interface ci_modify_av modify_av; ci_destroy_av destroy_av; + /* + * SRQ Management Verbs + */ + ci_create_srq create_srq; + ci_modify_srq modify_srq; + ci_query_srq query_srq; + ci_destroy_srq destroy_srq; + /* * QP Management Verbs */ @@ -2568,6 +2804,7 @@ typedef struct _ci_interface */ ci_post_send post_send; ci_post_recv post_recv; + ci_post_srq_recv post_srq_recv; /* * Completion Processing and diff --git a/trunk/inc/iba/ib_types.h b/trunk/inc/iba/ib_types.h index 1a12dafa..7828f9d7 100644 --- a/trunk/inc/iba/ib_types.h +++ b/trunk/inc/iba/ib_types.h @@ -7257,6 +7257,7 @@ typedef struct _ib_mr* __ptr64 ib_mr_handle_t; typedef struct _mlnx_fmr* __ptr64 mlnx_fmr_handle_t; typedef struct _ib_mw* __ptr64 ib_mw_handle_t; typedef struct _ib_qp* __ptr64 ib_qp_handle_t; +typedef struct _ib_srq* __ptr64 ib_srq_handle_t; typedef struct _ib_cq* __ptr64 ib_cq_handle_t; typedef struct _ib_av* __ptr64 ib_av_handle_t; typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t; @@ -7303,6 +7304,7 @@ typedef enum _ib_api_status_t IB_INVALID_MAX_WRS, IB_INVALID_MAX_SGE, IB_INVALID_CQ_SIZE, + IB_INVALID_SRQ_SIZE, IB_INVALID_SERVICE_TYPE, IB_INVALID_GID, IB_INVALID_LID, @@ -7311,6 +7313,7 @@ typedef enum _ib_api_status_t IB_INVALID_AV_HANDLE, IB_INVALID_CQ_HANDLE, IB_INVALID_QP_HANDLE, + IB_INVALID_SRQ_HANDLE, IB_INVALID_PD_HANDLE, IB_INVALID_MR_HANDLE, IB_INVALID_FMR_HANDLE, @@ -7324,6 +7327,7 @@ typedef enum _ib_api_status_t IB_VERBS_PROCESSING_DONE, /* See Notes above */ IB_INVALID_WR_TYPE, IB_QP_IN_TIMEWAIT, + IB_EE_IN_TIMEWAIT, IB_INVALID_PORT, IB_NOT_DONE, IB_INVALID_INDEX, @@ -7399,6 +7403,9 @@ typedef enum _ib_async_event_t IB_AE_PORT_ACTIVE, IB_AE_PORT_DOWN, IB_AE_CLIENT_REREGISTER, + IB_AE_SRQ_LIMIT_REACHED, + IB_AE_SRQ_CATAS_ERROR, + IB_AE_SRQ_QP_LAST_WQE_REACHED, IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */ } ib_async_event_t; @@ -7491,6 +7498,18 @@ typedef enum _ib_async_event_t * IB_AE_CLIENT_REREGISTER * The SM idicate to client to reregister its SA records. * +* IB_AE_SRQ_LIMIT_REACHED +* Reached SRQ low watermark +* +* IB_AE_SRQ_CATAS_ERROR +* An error occurred while processing or accessing the SRQ that prevents +* dequeuing a WQE from the SRQ and reporting of receive completions. +* +* IB_AE_SRQ_QP_LAST_WQE_REACHED +* An event, issued for a QP, associated with a shared receive queue, when +* a CQE is generated for the last WQE, or +* the QP gets in the Error State and there are no more WQEs on the RQ. +* * IB_AE_UNKNOWN * An unknown error occurred which cannot be attributed to any * resource; behavior is indeterminate. @@ -7828,6 +7847,9 @@ typedef struct _ib_ca_attr uint32_t max_qps_per_mcast_grp; uint32_t max_fmr; uint32_t max_map_per_fmr; + uint32_t max_srq; + uint32_t max_srq_wrs; + uint32_t max_srq_sges; /* * local_ack_delay: @@ -7845,6 +7867,7 @@ typedef struct _ib_ca_attr boolean_t av_port_check; boolean_t change_primary_port; boolean_t modify_wr_depth; + boolean_t modify_srq_depth; boolean_t current_qp_state_support; boolean_t shutdown_port_capability; boolean_t init_type_support; @@ -7932,6 +7955,21 @@ typedef struct _ib_ca_attr * Maximum limit on number of responder resources for incomming RDMA * operations on QPs. * +* max_fmr +* Maximum number of Fast Memory Regions supported. +* +* max_map_per_fmr +* Maximum number of mappings, supported by a Fast Memory Region. +* +* max_srq +* Maximum number of Shared Receive Queues supported. +* +* max_srq_wrs +* Maximum number of work requests supported by this SRQ. +* +* max_srq_sges +* Maximum number of scatter gather elements supported per work request on SRQ. +* * max_resp_res * Maximum number of responder resources per HCA, with this HCA used as * the target. @@ -7979,6 +8017,10 @@ typedef struct _ib_ca_attr * Indicates ability to modify QP depth during a modify QP operation. * Check the verb specification for permitted states. * +* modify_srq_depth +* Indicates ability to modify SRQ depth during a modify SRQ operation. +* Check the verb specification for permitted states. +* * current_qp_state_support * Indicates ability of the HCA to support the current QP state modifier * during a modify QP operation. @@ -8147,8 +8189,8 @@ typedef enum _ib_qp_type IB_QPT_RAW_ETHER, IB_QPT_MAD, /* InfiniBand Access Layer */ IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ - IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */ - + IB_QPT_QP1_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_UNKNOWN } ib_qp_type_t; /* * VALUES @@ -8190,6 +8232,34 @@ typedef enum _ib_qp_type *****/ +/****f* IBA Base: Types/ib_get_qp_type_str +* NAME +* ib_get_qp_type_str +* +* DESCRIPTION +* Returns a string for the specified QP type +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_qp_type_str( + IN uint8_t qp_type ); + +/* +* PARAMETERS +* qp_type +* [in] Encoded QP type as defined in the +QP attribute. + +* RETURN VALUES +* Pointer to the QP type string. +* +* NOTES +* +* SEE ALSO +* ib_qp_type_t +*********/ + /****d* Access Layer/ib_access_t * NAME * ib_access_t @@ -8257,6 +8327,51 @@ typedef enum _ib_apm_state } ib_apm_state_t; /*****/ +/****d* Access Layer/ib_srq_attr_mask_t +* NAME +* ib_srq_attr_mask_t +* +* DESCRIPTION +* Indicates valid fields in ib_srq_attr_t structure +* +* SYNOPSIS +*/ +typedef enum _ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +} ib_srq_attr_mask_t; +/*****/ + + +/****s* Access Layer/ib_srq_attr_t +* NAME +* ib_srq_attr_t +* +* DESCRIPTION +* Attributes used to initialize a shared queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +} ib_srq_attr_t; +/* +* FIELDS +* max_wr +* Specifies the max number of work request on SRQ. +* +* max_sge +* Specifies the max number of scatter/gather elements in one work request. +* +* srq_limit +* Specifies the low water mark for SRQ. +* +* SEE ALSO +* ib_qp_type_t, ib_srq_attr_mask_t +*****/ + /****s* Access Layer/ib_qp_create_t * NAME @@ -8278,6 +8393,7 @@ typedef struct _ib_qp_create ib_cq_handle_t h_sq_cq; ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; boolean_t sq_signaled; @@ -8321,6 +8437,10 @@ typedef struct _ib_qp_create * work request completions. This handle must be NULL if the type is * IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. * +* h_srq +* A handle to an SRQ to get receive completions via. Must be coded NULL +* when QP is not associated with SRQ +* * sq_signaled * A flag that is used to indicate whether the queue pair will signal * an event upon completion of a send work request. If set to @@ -8360,6 +8480,7 @@ typedef struct _ib_qp_attr ib_cq_handle_t h_sq_cq; ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; boolean_t sq_signaled; diff --git a/trunk/inc/kernel/iba/ib_al_ifc.h b/trunk/inc/kernel/iba/ib_al_ifc.h index 7b15737f..b8fefadc 100644 --- a/trunk/inc/kernel/iba/ib_al_ifc.h +++ b/trunk/inc/kernel/iba/ib_al_ifc.h @@ -47,7 +47,7 @@ * IB resources provided by HCAs. *********/ -#define AL_INTERFACE_VERSION (9) +#define AL_INTERFACE_VERSION (10) @@ -126,6 +126,36 @@ typedef ib_api_status_t (*ib_pfn_destroy_av_t)( IN const ib_av_handle_t h_av ); +typedef ib_api_status_t +(*ib_pfn_create_srq_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ); + +typedef ib_api_status_t +(*ib_pfn_query_srq_t)( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ); + +typedef ib_api_status_t +(*ib_pfn_modify_srq_t)( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); + +typedef ib_api_status_t +(*ib_pfn_destroy_srq_t)( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_post_srq_recv_t)( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); + typedef ib_api_status_t (*ib_pfn_create_qp_t)( IN const ib_pd_handle_t h_pd, @@ -686,6 +716,11 @@ typedef struct _ib_al_ifc mlnx_pfn_map_phys_fmr_t map_phys_mlnx_fmr; mlnx_pfn_unmap_fmr_t unmap_mlnx_fmr; mlnx_pfn_destroy_fmr_t destroy_mlnx_fmr; + ib_pfn_create_srq_t create_srq; + ib_pfn_query_srq_t query_srq; + ib_pfn_modify_srq_t modify_srq; + ib_pfn_destroy_srq_t destroy_srq; + ib_pfn_post_srq_recv_t post_srq_recv; } ib_al_ifc_t; diff --git a/trunk/inc/user/iba/ib_uvp.h b/trunk/inc/user/iba/ib_uvp.h index 495c1712..5bbe45ef 100644 --- a/trunk/inc/user/iba/ib_uvp.h +++ b/trunk/inc/user/iba/ib_uvp.h @@ -1123,6 +1123,387 @@ typedef void /********/ +/****f* user-mode Verbs/uvp_pre_create_srq +* NAME +* uvp_pre_create_srq -- Pre-ioctl function to Create a Shared Queue Pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_srq) ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_create_srq() is implemented by vendor. It is the pre-ioctl routine +* for ib_create_srq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* p_srq_attr +* [in] Initial attributes with which the srq must be created. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INVALID_PD_HANDLE +* The PD handle is invalid. +* IB_UNSUPPORTED +* The specified queue pair type was not supported by the channel adapter. +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* IB_INVALID_PARAMETER +* At least one parameter is invalid. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_create_srq_t +* NAME +* uvp_post_create_srq_t -- Post-ioctl function to Create a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_srq_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_create_srq_t() is implemented by vendor. It is the post-ioctl routine +* for ib_create_srq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_srq +* [out] Vendor's srq handle for the newly created srq (in user-mode +* library). +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_modify_srq +* NAME +* uvp_pre_modify_srq -- Pre-ioctl function to Modify attributes of the +* specified srq. +* +* SYNOPSIS +* +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_modify_srq) ( + IN const ib_srq_handle_t h_uvp_srq, + IN const ib_srq_attr_t * const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_modify_srq() is implemented by vendor to modify the attributes of a +* srq. It is the pre-ioctl routine for ib_modify_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's srq Handle to the queue pair (in user-mode library) +* whose state is to be modified. +* p_srq_attr +* [in] Specifies what attributes need to be modified in the srq. +* srq_attr_mask +* [in] Specifies which fields of ib_srq_attr_t are valid. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_SRQ_HANDLE +* Invalid srq handle. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_modify_srq_t +* NAME +* uvp_post_modify_srq_t -- Post-ioctl function to Modify attributes of +* the specified srq. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_modify_srq_t) ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_modify_srq_t() is implemented by vendor to modify the srq attributes. +* It is the post-ioctl routine for ib_modify_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's srq Handle to the queue pair (in user-mode library) +* whose state is modified. +* ioctl_status +* [in] The ioctl status of the AL API. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_modify_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_srq +* NAME +* uvp_pre_query_srq -- Pre-ioctl function to Query the attributes of the srq +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_srq) ( + IN ib_srq_handle_t h_uvp_srq, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_srq() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_query_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's handle to the srq (in user-mode library). +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_SRQ_HANDLE +* srq handle is invalid +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_srq_t +* NAME +* uvp_post_query_srq_t -- Post-ioctl operation for user-mode ib_query_srq() +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_srq_t) ( + IN ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ib_srq_attr_t *p_query_attr, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_query_srq_t() is implemented by vendor. It is the post-ioctl routine +* for ib_query_srq(). +* UAL provides the results of the query to the vendor library in this +* post-ioctl routine. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's handle to the srq (in user-mode library). +* ioctl_status +* [in] The ioctl status of the AL API. +* p_query_attr +* [in] srq attribute as returned by the ioctl. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_srq +* NAME +* uvp_pre_destroy_srq -- Pre-ioctl function to Destroy a Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_srq) ( + IN const ib_srq_handle_t h_uvp_srq ); + +/* +* DESCRIPTION +* uvp_pre_destroy_srq() is the pre-ioctl routine implemented by vendor +* to destroy srq. +* UAL invokes this pre-ioctl routine to destroy srq. +* The vendor is expected to perform any preliminary steps in preparation +* for destroying the srq and perform any book-keeping. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's Handle to the srq (in user-mode library) +* that needs to be destroyed. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_post_destroy_srq_t +* +********/ + +/********/ + + +/****f* user-mode Verbs/uvp_post_destroy_srq_t +* NAME +* uvp_post_destroy_srq_t -- Post-ioctl function to Destroy a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_srq_t) ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_destroy_srq_t() is implemented by vendor. It is the post-ioctl +* routine for ib_destroy_srq(). +* UAL invokes this post-ioctl routine to destroy srq when it receives +* asynchronous notification from the user-mode proxy in kernel. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's Handle to the srq (in user-mode library) +* that needs to be destroyed. +* +* RETURN VALUE +* IB_SUCCESS +* The post-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_pre_destroy_srq +* +********/ + +/********/ + /****f* user-mode Verbs/uvp_pre_create_qp * NAME * uvp_pre_create_qp -- Pre-ioctl function to Create a Queue Pair. @@ -2362,6 +2743,58 @@ typedef ib_api_status_t /********/ +/****f* user-mode Verbs/uvp_post_srq_recv +* NAME +* uvp_post_srq_recv -- Post a work request to the shared receive queue of a queue pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_srq_recv) ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a shared +* queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* +* PARAMETERS +* h_srq +* [in] Type-cast as appropriate for user/kernel mode, this is +* the shared Queue pair handle to which the receive work request is being +* posted. +* p_recv_wr +* [in] List of recv work requests that needs to be posted. +* pp_recv_failure +* [out] The work requests that failed. + +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_SRQ_HANDLE +* srq_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* +********/ + +/********/ + /****f* user-mode Verbs/uvp_peek_cq * NAME * uvp_peek_cq @@ -2803,6 +3236,22 @@ typedef struct _uvp_interface uvp_pre_destroy_av pre_destroy_av; uvp_post_destroy_av_t post_destroy_av; + /* + * SRQ Management Verbs + */ + uvp_pre_create_srq pre_create_srq; + uvp_post_create_srq_t post_create_srq; + + uvp_pre_modify_srq pre_modify_srq; + uvp_post_modify_srq_t post_modify_srq; + + uvp_pre_query_srq pre_query_srq; + uvp_post_query_srq_t post_query_srq; + + uvp_pre_destroy_srq pre_destroy_srq; + uvp_post_destroy_srq_t post_destroy_srq; + + /* * QP Management Verbs */ @@ -2854,6 +3303,7 @@ typedef struct _uvp_interface */ uvp_post_send post_send; uvp_post_recv post_recv; + uvp_post_srq_recv post_srq_recv; /* * Completion Processing and diff --git a/trunk/tests/alts/createanddestroyqp.c b/trunk/tests/alts/createanddestroyqp.c index a7c72e03..0e7e9795 100644 --- a/trunk/tests/alts/createanddestroyqp.c +++ b/trunk/tests/alts/createanddestroyqp.c @@ -186,6 +186,7 @@ alts_qp( /* * Create QP Attributes */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); qp_create.sq_depth= 1; qp_create.rq_depth= 1; qp_create.sq_sge = 1; diff --git a/trunk/tests/wsd/user/test2/ibwrap.c b/trunk/tests/wsd/user/test2/ibwrap.c index 4eae3ccd..2441b772 100644 --- a/trunk/tests/wsd/user/test2/ibwrap.c +++ b/trunk/tests/wsd/user/test2/ibwrap.c @@ -204,6 +204,7 @@ int create_qp(struct qp_pack *qp) } /* Create a qp */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); qp_create.qp_type = IB_QPT_RELIABLE_CONN; qp_create.h_rdd = NULL; qp_create.sq_depth = 255; diff --git a/trunk/tests/wsd/user/test3/ibwrap.c b/trunk/tests/wsd/user/test3/ibwrap.c index 0ac6dd0a..10223e0b 100644 --- a/trunk/tests/wsd/user/test3/ibwrap.c +++ b/trunk/tests/wsd/user/test3/ibwrap.c @@ -208,6 +208,7 @@ int create_qp(struct qp_pack *qp) } /* Create a qp */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); qp_create.qp_type = IB_QPT_RELIABLE_CONN; qp_create.h_rdd = NULL; qp_create.sq_depth = 255; diff --git a/trunk/tools/vstat/user/vstat_main.c b/trunk/tools/vstat/user/vstat_main.c index 803c13ec..465e7fae 100644 --- a/trunk/tools/vstat/user/vstat_main.c +++ b/trunk/tools/vstat/user/vstat_main.c @@ -152,7 +152,7 @@ void printPortInfo(ib_port_attr_t* portPtr, BOOLEAN fullPrint){ printPortMTU(portPtr->mtu); if(fullPrint){ printf("\t\tmax_msg_sz=0x%x (Max message size)\n", portPtr->max_msg_size); - printf("\t\tcapability_mask=TBD\n"); + printf("\t\tcapability_mask=0x%x (Port capability mask)\n", portPtr->cap); printf("\t\tmax_vl_num=0x%x (Maximum number of VL supported by this port)\n", portPtr->max_vls); printf("\t\tbad_pkey_counter=0x%x (Bad PKey counter)\n", portPtr->pkey_ctr); printf("\t\tqkey_viol_counter=0x%x (QKey violation counter)\n", portPtr->qkey_ctr); @@ -229,10 +229,10 @@ void vstat_print_ca_attr(int idx, ib_ca_attr_t* ca_attr, BOOLEAN fullPrint){ printf("\tmax_qp_ous_wr = 0x%x (Maximum Number of outstanding WR on any WQ)\n", ca_attr->max_wrs); printf("\tmax_num_sg_ent = 0x%x (Max num of scatter/gather entries for WQE other than RD)\n", ca_attr->max_sges); printf("\tmax_num_sg_ent_rd = 0x%x (Max num of scatter/gather entries for RD WQE)\n", ca_attr->max_rd_sges); - printf("\tmax_num_srq = 0 (Maximum Number of SRQs supported)\n"); - printf("\tmax_wqe_per_srq = 0 (Maximum Number of outstanding WR on any SRQ)\n"); - printf("\tmax_srq_sentries = 0 (Maximum Number of scatter/gather entries for SRQ WQE)\n"); - printf("\tsrq_resize_supported = 0 (SRQ resize supported)\n"); + printf("\tmax_num_srq = 0x%x (Maximum Number of SRQs supported)\n", ca_attr->max_srq); + printf("\tmax_wqe_per_srq = 0x%x (Maximum Number of outstanding WR on any SRQ)\n", ca_attr->max_srq_wrs); + printf("\tmax_srq_sentries = 0x%x (Maximum Number of scatter/gather entries for SRQ WQE)\n", ca_attr->max_srq_sges); + printf("\tsrq_resize_supported = %d (SRQ resize supported)\n", ca_attr->modify_srq_depth); printf("\tmax_num_cq = 0x%x (Max num of supported CQs)\n", ca_attr->max_cqs); printf("\tmax_num_ent_cq = 0x%x (Max num of supported entries per CQ)\n", ca_attr->max_cqes); printf("\tmax_num_mr = 0x%x (Maximum number of memory region supported)\n", ca_attr->init_regions); diff --git a/trunk/ulp/opensm/user/include/iba/ib_types.h b/trunk/ulp/opensm/user/include/iba/ib_types.h index f14ffc68..691e41a3 100644 --- a/trunk/ulp/opensm/user/include/iba/ib_types.h +++ b/trunk/ulp/opensm/user/include/iba/ib_types.h @@ -3,11 +3,8 @@ * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: + * This software is available to you under the OpenIB.org BSD license + * below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following @@ -35,6 +32,7 @@ */ + #if !defined(__IB_TYPES_H__) #define __IB_TYPES_H__ @@ -77,8 +75,6 @@ BEGIN_C_DECLS #define __ptr64 #endif - - /****h* IBA Base/Constants * NAME * Constants @@ -581,7 +577,7 @@ ib_class_is_vendor_specific_low( IN const uint8_t class_code ) { return( (class_code >= IB_MCLASS_VENDOR_LOW_RANGE_MIN) && - (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX)) ; + (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX) ); } /* * PARAMETERS @@ -613,7 +609,7 @@ ib_class_is_vendor_specific_high( IN const uint8_t class_code ) { return( (class_code >= IB_MCLASS_VENDOR_HIGH_RANGE_MIN) && - (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX)) ; + (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX) ); } /* * PARAMETERS @@ -665,6 +661,7 @@ ib_class_is_vendor_specific( /* * MAD methods */ + /****d* IBA Base: Constants/IB_MAX_METHOD * NAME * IB_MAX_METHOD @@ -676,6 +673,7 @@ ib_class_is_vendor_specific( */ #define IB_MAX_METHODS 128 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_RESP_MASK * NAME * IB_MAD_METHOD_RESP_MASK @@ -687,6 +685,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_RESP_MASK 0x80 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_GET * NAME * IB_MAD_METHOD_GET @@ -698,6 +697,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_GET 0x01 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_SET * NAME * IB_MAD_METHOD_SET @@ -709,6 +709,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_SET 0x02 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_GET_RESP * NAME * IB_MAD_METHOD_GET_RESP @@ -764,6 +765,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_SEND 0x03 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_TRAP * NAME * IB_MAD_METHOD_TRAP @@ -775,6 +777,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_TRAP 0x05 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_REPORT * NAME * IB_MAD_METHOD_REPORT @@ -786,6 +789,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_REPORT 0x06 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP * NAME * IB_MAD_METHOD_REPORT_RESP @@ -797,6 +801,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_REPORT_RESP 0x86 /**********/ + /****d* IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS * NAME * IB_MAD_METHOD_TRAP_REPRESS @@ -808,6 +813,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_METHOD_TRAP_REPRESS 0x07 /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_BUSY * NAME * IB_MAD_STATUS_BUSY @@ -817,8 +823,9 @@ ib_class_is_vendor_specific( * * SOURCE */ -#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001)) +#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001)) /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_REDIRECT * NAME * IB_MAD_STATUS_REDIRECT @@ -830,6 +837,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_STATUS_REDIRECT (CL_HTON16(0x0002)) /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER * NAME * IB_MAD_STATUS_UNSUP_CLASS_VER @@ -839,8 +847,9 @@ ib_class_is_vendor_specific( * * SOURCE */ -#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004)) +#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004)) /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD * NAME * IB_MAD_STATUS_UNSUP_METHOD @@ -852,6 +861,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_STATUS_UNSUP_METHOD (CL_HTON16(0x0008)) /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR * NAME * IB_MAD_STATUS_UNSUP_METHOD_ATTR @@ -861,8 +871,9 @@ ib_class_is_vendor_specific( * * SOURCE */ -#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C)) +#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C)) /**********/ + /****d* IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD * NAME * IB_MAD_STATUS_INVALID_FIELD @@ -878,16 +889,16 @@ ib_class_is_vendor_specific( #define IB_MAD_STATUS_CLASS_MASK (CL_HTON16(0xFF00)) #define IB_SA_MAD_STATUS_SUCCESS (CL_HTON16(0x0000)) -#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100)) -#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200)) +#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100)) +#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200)) #define IB_SA_MAD_STATUS_NO_RECORDS (CL_HTON16(0x0300)) -#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400)) -#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500)) -#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600)) +#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400)) +#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500)) +#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600)) -#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100)) -#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200)) -#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000)) +#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100)) +#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200)) +#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000)) /****d* IBA Base: Constants/IB_MAD_ATTR_CLASS_PORT_INFO * NAME @@ -900,6 +911,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_CLASS_PORT_INFO (CL_NTOH16(0x0001)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_NOTICE * NAME * IB_MAD_ATTR_NOTICE @@ -911,6 +923,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_NOTICE (CL_NTOH16(0x0002)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO * NAME * IB_MAD_ATTR_INFORM_INFO @@ -922,6 +935,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_INFORM_INFO (CL_NTOH16(0x0003)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_NODE_DESC * NAME * IB_MAD_ATTR_NODE_DESC @@ -932,6 +946,7 @@ ib_class_is_vendor_specific( * SOURCE */ #define IB_MAD_ATTR_NODE_DESC (CL_NTOH16(0x0010)) + /****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL * NAME * IB_MAD_ATTR_PORT_SMPL_CTRL @@ -943,6 +958,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_PORT_SMPL_CTRL (CL_NTOH16(0x0010)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_NODE_INFO * NAME * IB_MAD_ATTR_NODE_INFO @@ -954,6 +970,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_NODE_INFO (CL_NTOH16(0x0011)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT * NAME * IB_MAD_ATTR_PORT_SMPL_RSLT @@ -965,6 +982,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_PORT_SMPL_RSLT (CL_NTOH16(0x0011)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO * NAME * IB_MAD_ATTR_SWITCH_INFO @@ -976,6 +994,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_SWITCH_INFO (CL_NTOH16(0x0012)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS * NAME * IB_MAD_ATTR_PORT_CNTRS @@ -987,6 +1006,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_PORT_CNTRS (CL_NTOH16(0x0012)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_GUID_INFO * NAME * IB_MAD_ATTR_GUID_INFO @@ -998,6 +1018,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_GUID_INFO (CL_NTOH16(0x0014)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_PORT_INFO * NAME * IB_MAD_ATTR_PORT_INFO @@ -1009,6 +1030,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_PORT_INFO (CL_NTOH16(0x0015)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE * NAME * IB_MAD_ATTR_P_KEY_TABLE @@ -1020,6 +1042,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_P_KEY_TABLE (CL_NTOH16(0x0016)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE * NAME * IB_MAD_ATTR_SLVL_TABLE @@ -1031,6 +1054,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_SLVL_TABLE (CL_NTOH16(0x0017)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION * NAME * IB_MAD_ATTR_VL_ARBITRATION @@ -1042,6 +1066,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_VL_ARBITRATION (CL_NTOH16(0x0018)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL * NAME * IB_MAD_ATTR_LIN_FWD_TBL @@ -1053,6 +1078,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_LIN_FWD_TBL (CL_NTOH16(0x0019)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL * NAME * IB_MAD_ATTR_RND_FWD_TBL @@ -1064,6 +1090,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_RND_FWD_TBL (CL_NTOH16(0x001A)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL * NAME * IB_MAD_ATTR_MCAST_FWD_TBL @@ -1087,6 +1114,7 @@ ib_class_is_vendor_specific( */ #define IB_MAD_ATTR_NODE_RECORD (CL_NTOH16(0x0011)) /**********/ + /****d* IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD * NAME * IB_MAD_ATTR_PORTINFO_RECORD @@ -1099,7 +1127,6 @@ ib_class_is_vendor_specific( #define IB_MAD_ATTR_PORTINFO_RECORD (CL_NTOH16(0x0012)) /**********/ - /****d* IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD * NAME * IB_MAD_ATTR_LINK_RECORD @@ -1121,7 +1148,7 @@ ib_class_is_vendor_specific( * * SOURCE */ -#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020)) +#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020)) /**********/ /****d* IBA Base: Constants/IB_MAD_ATTR_SMINFO_RECORD @@ -1129,7 +1156,7 @@ ib_class_is_vendor_specific( * IB_MAD_ATTR_SMINFO_RECORD * * DESCRIPTION -* SmInfoRecord attribute (15.2.5) +* SMInfoRecord attribute (15.2.5) * * SOURCE */ @@ -1193,7 +1220,7 @@ ib_class_is_vendor_specific( * * SOURCE */ -#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015)) +#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015)) /**********/ /****d* IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD @@ -1526,9 +1553,9 @@ ib_class_is_vendor_specific( * SOURCE */ #define IB_PATH_SELECTOR_GREATER_THAN 0 -#define IB_PATH_SELECTOR_LESS_THAN 1 -#define IB_PATH_SELECTOR_EXACTLY 2 -#define IB_PATH_SELECTOR_LARGEST 3 +#define IB_PATH_SELECTOR_LESS_THAN 1 +#define IB_PATH_SELECTOR_EXACTLY 2 +#define IB_PATH_SELECTOR_LARGEST 3 /**********/ /****d* IBA Base: Constants/IB_SMINFO_STATE_NOTACTIVE @@ -1542,6 +1569,7 @@ ib_class_is_vendor_specific( */ #define IB_SMINFO_STATE_NOTACTIVE 0 /**********/ + /****d* IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING * NAME * IB_SMINFO_STATE_DISCOVERING @@ -1553,6 +1581,7 @@ ib_class_is_vendor_specific( */ #define IB_SMINFO_STATE_DISCOVERING 1 /**********/ + /****d* IBA Base: Constants/IB_SMINFO_STATE_STANDBY * NAME * IB_SMINFO_STATE_STANDBY @@ -1564,6 +1593,7 @@ ib_class_is_vendor_specific( */ #define IB_SMINFO_STATE_STANDBY 2 /**********/ + /****d* IBA Base: Constants/IB_SMINFO_STATE_MASTER * NAME * IB_SMINFO_STATE_MASTER @@ -1575,6 +1605,7 @@ ib_class_is_vendor_specific( */ #define IB_SMINFO_STATE_MASTER 3 /**********/ + /****d* IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK * NAME * IB_PATH_REC_SELECTOR_MASK @@ -1587,6 +1618,7 @@ ib_class_is_vendor_specific( */ #define IB_PATH_REC_SELECTOR_MASK 0xC0 /**********/ + /****d* IBA Base: Constants/IB_PATH_REC_BASE_MASK * NAME * IB_PATH_REC_BASE_MASK @@ -1608,6 +1640,7 @@ ib_class_is_vendor_specific( * Definitions are from the InfiniBand Architecture Specification v1.2 * *********/ + /****d* IBA Base: Types/ib_net16_t * NAME * ib_net16_t @@ -1619,6 +1652,7 @@ ib_class_is_vendor_specific( */ typedef uint16_t ib_net16_t; /**********/ + /****d* IBA Base: Types/ib_net32_t * NAME * ib_net32_t @@ -1630,6 +1664,7 @@ typedef uint16_t ib_net16_t; */ typedef uint32_t ib_net32_t; /**********/ + /****d* IBA Base: Types/ib_net64_t * NAME * ib_net64_t @@ -1641,6 +1676,7 @@ typedef uint32_t ib_net32_t; */ typedef uint64_t ib_net64_t; /**********/ + /****d* IBA Base: Types/ib_gid_prefix_t * NAME * ib_gid_prefix_t @@ -1663,7 +1699,7 @@ typedef ib_net64_t ib_gid_prefix_t; */ #define IB_LINK_NO_CHANGE 0 #define IB_LINK_DOWN 1 -#define IB_LINK_INIT 2 +#define IB_LINK_INIT 2 #define IB_LINK_ARMED 3 #define IB_LINK_ACTIVE 4 #define IB_LINK_ACT_DEFER 5 @@ -1764,13 +1800,13 @@ AL_INLINE uint8_t AL_API ib_get_port_state_from_str( IN char* p_port_state_str ) { - if( !strncmp(p_port_state_str,"No State Change (NOP)",12) ) + if( !strncmp(p_port_state_str,"No State Change (NOP)", 12) ) return(0); - else if( !strncmp(p_port_state_str, "DOWN",4) ) + else if( !strncmp(p_port_state_str, "DOWN", 4) ) return(1); else if( !strncmp(p_port_state_str, "INIT", 4) ) return(2); - else if( !strncmp(p_port_state_str,"ARMED" , 5) ) + else if( !strncmp(p_port_state_str, "ARMED" , 5) ) return(3); else if( !strncmp(p_port_state_str, "ACTIVE", 6) ) return(4); @@ -1801,8 +1837,8 @@ ib_get_port_state_from_str( * * SOURCE */ -#define IB_JOIN_STATE_FULL 1 -#define IB_JOIN_STATE_NON 2 +#define IB_JOIN_STATE_FULL 1 +#define IB_JOIN_STATE_NON 2 #define IB_JOIN_STATE_SEND_ONLY 4 /**********/ @@ -1817,7 +1853,7 @@ ib_get_port_state_from_str( */ AL_INLINE ib_net16_t AL_API ib_pkey_get_base( - IN const ib_net16_t pkey ) + IN const ib_net16_t pkey ) { return( (ib_net16_t)(pkey & IB_PKEY_BASE_MASK) ); } @@ -1833,6 +1869,7 @@ ib_pkey_get_base( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_pkey_is_full_member * NAME * ib_pkey_is_full_member @@ -1844,7 +1881,7 @@ ib_pkey_get_base( */ AL_INLINE boolean_t AL_API ib_pkey_is_full_member( - IN const ib_net16_t pkey ) + IN const ib_net16_t pkey ) { return( (pkey & IB_PKEY_TYPE_MASK) == IB_PKEY_TYPE_MASK ); } @@ -1876,7 +1913,7 @@ ib_pkey_is_full_member( */ OSM_INLINE boolean_t AL_API ib_pkey_is_invalid( - IN const ib_net16_t pkey ) + IN const ib_net16_t pkey ) { if (ib_pkey_get_base(pkey) == 0x0000) return TRUE; @@ -1907,18 +1944,18 @@ ib_pkey_is_invalid( #include typedef union _ib_gid { - uint8_t raw[16]; + uint8_t raw[16]; struct _ib_gid_unicast { ib_gid_prefix_t prefix; - ib_net64_t interface_id; + ib_net64_t interface_id; } PACK_SUFFIX unicast; struct _ib_gid_multicast { - uint8_t header[2]; - uint8_t raw_group_id[14]; + uint8_t header[2]; + uint8_t raw_group_id[14]; } PACK_SUFFIX multicast; @@ -1941,7 +1978,7 @@ typedef union _ib_gid AL_INLINE boolean_t AL_API ib_gid_is_multicast( - IN const ib_gid_t* p_gid ) + IN const ib_gid_t* p_gid ) { return( p_gid->raw[0] == 0xFF ); } @@ -1957,8 +1994,8 @@ ib_gid_is_multicast( */ AL_INLINE void AL_API ib_gid_set_default( - IN ib_gid_t* const p_gid, - IN const ib_net64_t interface_id ) + IN ib_gid_t* const p_gid, + IN const ib_net64_t interface_id ) { p_gid->unicast.prefix = IB_DEFAULT_SUBNET_PREFIX; p_gid->unicast.interface_id = interface_id; @@ -1979,6 +2016,7 @@ ib_gid_set_default( * SEE ALSO * ib_gid_t *********/ + /****f* IBA Base: Types/ib_gid_get_subnet_prefix * NAME * ib_gid_get_subnet_prefix @@ -1990,7 +2028,7 @@ ib_gid_set_default( */ AL_INLINE ib_net64_t AL_API ib_gid_get_subnet_prefix( - IN const ib_gid_t* const p_gid ) + IN const ib_gid_t* const p_gid ) { return( p_gid->unicast.prefix ); } @@ -2007,6 +2045,7 @@ ib_gid_get_subnet_prefix( * SEE ALSO * ib_gid_t *********/ + /****f* IBA Base: Types/ib_gid_is_link_local * NAME * ib_gid_is_link_local @@ -2019,7 +2058,7 @@ ib_gid_get_subnet_prefix( */ AL_INLINE boolean_t AL_API ib_gid_is_link_local( - IN const ib_gid_t* const p_gid ) + IN const ib_gid_t* const p_gid ) { return( ib_gid_get_subnet_prefix( p_gid ) == IB_DEFAULT_SUBNET_PREFIX ); } @@ -2037,6 +2076,7 @@ ib_gid_is_link_local( * SEE ALSO * ib_gid_t *********/ + /****f* IBA Base: Types/ib_gid_is_site_local * NAME * ib_gid_is_site_local @@ -2049,7 +2089,7 @@ ib_gid_is_link_local( */ AL_INLINE boolean_t AL_API ib_gid_is_site_local( - IN const ib_gid_t* const p_gid ) + IN const ib_gid_t* const p_gid ) { return( ( ib_gid_get_subnet_prefix( p_gid ) & CL_HTON64( 0xFFFFFFFFFFFF0000ULL ) ) == CL_HTON64( 0xFEC0000000000000ULL ) ); @@ -2068,6 +2108,7 @@ ib_gid_is_site_local( * SEE ALSO * ib_gid_t *********/ + /****f* IBA Base: Types/ib_gid_get_guid * NAME * ib_gid_get_guid @@ -2079,7 +2120,7 @@ ib_gid_is_site_local( */ AL_INLINE ib_net64_t AL_API ib_gid_get_guid( - IN const ib_gid_t* const p_gid ) + IN const ib_gid_t* const p_gid ) { return( p_gid->unicast.interface_id ); } @@ -2188,7 +2229,7 @@ typedef struct _ib_path_rec #define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3)) #define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4)) #define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5)) -#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6)) #define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7)) #define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8)) #define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9)) @@ -2209,7 +2250,7 @@ typedef struct _ib_path_rec #define IB_LR_COMPMASK_FROM_LID (CL_HTON64(((uint64_t)1)<<0)) #define IB_LR_COMPMASK_FROM_PORT (CL_HTON64(((uint64_t)1)<<1)) #define IB_LR_COMPMASK_TO_PORT (CL_HTON64(((uint64_t)1)<<2)) -#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3)) /* VL Arbitration Record MASKs */ #define IB_VLA_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) @@ -2235,17 +2276,17 @@ typedef struct _ib_path_rec #define IB_NR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1)) #define IB_NR_COMPMASK_BASEVERSION (CL_HTON64(((uint64_t)1)<<2)) #define IB_NR_COMPMASK_CLASSVERSION (CL_HTON64(((uint64_t)1)<<3)) -#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4)) -#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5)) -#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6)) -#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7)) -#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8)) -#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9)) -#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10)) -#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11)) -#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12)) -#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13)) -#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14)) +#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5)) +#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7)) +#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8)) +#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9)) +#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10)) +#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11)) +#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12)) +#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13)) +#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14)) /* Service Record Component Masks Sec 15.2.5.14 Ver 1.1*/ #define IB_SR_COMPMASK_SID (CL_HTON64(((uint64_t)1)<<0)) @@ -2287,55 +2328,55 @@ typedef struct _ib_path_rec #define IB_SR_COMPMASK_SDATA64_1 (CL_HTON64(((uint64_t)1)<<36)) /* Port Info Record Component Masks */ -#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) -#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1)) -#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) -#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3)) -#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4)) -#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5)) -#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6)) -#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7)) -#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8)) -#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9)) -#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10)) -#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11)) -#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12)) -#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13)) -#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14)) -#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15)) -#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16)) -#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17)) -#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18)) -#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19)) -#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20)) -#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21)) -#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22)) -#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23)) -#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24)) -#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25)) -#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26)) -#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27)) -#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28)) -#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29)) -#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30)) -#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31)) -#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32)) -#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33)) -#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34)) -#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35)) -#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36)) -#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37)) -#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38)) -#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39)) -#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40)) -#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41)) -#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42)) -#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43)) -#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44)) -#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45)) -#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46)) -#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47)) -#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48)) +#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3)) +#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5)) +#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7)) +#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8)) +#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9)) +#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10)) +#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11)) +#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12)) +#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13)) +#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14)) +#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16)) +#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17)) +#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18)) +#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19)) +#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20)) +#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21)) +#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22)) +#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23)) +#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24)) +#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25)) +#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26)) +#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27)) +#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28)) +#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29)) +#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30)) +#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31)) +#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32)) +#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33)) +#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34)) +#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35)) +#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36)) +#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37)) +#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38)) +#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39)) +#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40)) +#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41)) +#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42)) +#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43)) +#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44)) +#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45)) +#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46)) +#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47)) +#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48)) /* Multicast Member Record Component Masks */ #define IB_MCR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<0)) @@ -2383,21 +2424,21 @@ typedef struct _ib_path_rec */ AL_INLINE void AL_API ib_path_rec_init_local( - IN ib_path_rec_t* const p_rec, - IN ib_gid_t* const p_dgid, - IN ib_gid_t* const p_sgid, - IN ib_net16_t dlid, - IN ib_net16_t slid, - IN uint8_t num_path, - IN ib_net16_t pkey, - IN uint8_t sl, - IN uint8_t mtu_selector, - IN uint8_t mtu, - IN uint8_t rate_selector, - IN uint8_t rate, - IN uint8_t pkt_life_selector, - IN uint8_t pkt_life, - IN uint8_t preference ) + IN ib_path_rec_t* const p_rec, + IN ib_gid_t* const p_dgid, + IN ib_gid_t* const p_sgid, + IN ib_net16_t dlid, + IN ib_net16_t slid, + IN uint8_t num_path, + IN ib_net16_t pkey, + IN uint8_t sl, + IN uint8_t mtu_selector, + IN uint8_t mtu, + IN uint8_t rate_selector, + IN uint8_t rate, + IN uint8_t pkt_life_selector, + IN uint8_t pkt_life, + IN uint8_t preference ) { p_rec->dgid = *p_dgid; p_rec->sgid = *p_sgid; @@ -2493,7 +2534,7 @@ ib_path_rec_init_local( */ AL_INLINE uint8_t AL_API ib_path_rec_num_path( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( p_rec->num_path &0x7F ); } @@ -2503,7 +2544,7 @@ ib_path_rec_num_path( * [in] Pointer to the path record object. * * RETURN VALUES -* Maximum number of paths toreturn for each unique SGID_DGID combination. +* Maximum number of paths to return for each unique SGID_DGID combination. * * NOTES * @@ -2522,7 +2563,7 @@ ib_path_rec_num_path( */ AL_INLINE uint8_t AL_API ib_path_rec_sl( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) ); } @@ -2551,7 +2592,7 @@ ib_path_rec_sl( */ AL_INLINE uint8_t AL_API ib_path_rec_mtu( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)(p_rec->mtu & IB_PATH_REC_BASE_MASK) ); } @@ -2586,7 +2627,7 @@ ib_path_rec_mtu( */ AL_INLINE uint8_t AL_API ib_path_rec_mtu_sel( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)((p_rec->mtu & IB_PATH_REC_SELECTOR_MASK) >> 6) ); } @@ -2619,7 +2660,7 @@ ib_path_rec_mtu_sel( */ AL_INLINE uint8_t AL_API ib_path_rec_rate( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)(p_rec->rate & IB_PATH_REC_BASE_MASK) ); } @@ -2658,7 +2699,7 @@ ib_path_rec_rate( */ AL_INLINE uint8_t AL_API ib_path_rec_rate_sel( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)((p_rec->rate & IB_PATH_REC_SELECTOR_MASK) >> 6) ); } @@ -2691,7 +2732,7 @@ ib_path_rec_rate_sel( */ AL_INLINE uint8_t AL_API ib_path_rec_pkt_life( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)(p_rec->pkt_life & IB_PATH_REC_BASE_MASK) ); } @@ -2701,7 +2742,7 @@ ib_path_rec_pkt_life( * [in] Pointer to the path record object. * * RETURN VALUES -* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime. +* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime. * * NOTES * @@ -2720,7 +2761,7 @@ ib_path_rec_pkt_life( */ AL_INLINE uint8_t AL_API ib_path_rec_pkt_life_sel( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)((p_rec->pkt_life & IB_PATH_REC_SELECTOR_MASK) >> 6 )); } @@ -2753,7 +2794,7 @@ ib_path_rec_pkt_life_sel( */ AL_INLINE uint32_t AL_API ib_path_rec_flow_lbl( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( ((cl_ntoh32(p_rec->hop_flow_raw) >> 8) & 0x000FFFFF) ); } @@ -2782,7 +2823,7 @@ ib_path_rec_flow_lbl( */ AL_INLINE uint8_t AL_API ib_path_rec_hop_limit( - IN const ib_path_rec_t* const p_rec ) + IN const ib_path_rec_t* const p_rec ) { return( (uint8_t)(p_rec->hop_flow_raw & 0x000000FF ) ); } @@ -2815,6 +2856,7 @@ ib_path_rec_hop_limit( */ #define IB_CLASS_CAP_TRAP 0x0001 /*********/ + /****s* IBA Base: Constants/IB_CLASS_CAP_GETSET * NAME * IB_CLASS_CAP_GETSET @@ -2830,6 +2872,7 @@ ib_path_rec_hop_limit( */ #define IB_CLASS_CAP_GETSET 0x0002 /*********/ + /****s* IBA Base: Constants/IB_CLASS_RESP_TIME_MASK * NAME * IB_CLASS_RESP_TIME_MASK @@ -2935,6 +2978,7 @@ typedef struct _ib_class_port_info * IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP * *********/ + /****s* IBA Base: Types/ib_sm_info_t * NAME * ib_sm_info_t @@ -2982,7 +3026,7 @@ typedef struct _ib_sm_info */ AL_INLINE uint8_t AL_API ib_sminfo_get_priority( - IN const ib_sm_info_t* const p_smi ) + IN const ib_sm_info_t* const p_smi ) { return( (uint8_t)((p_smi->pri_state & 0xF0)>>4) ); } @@ -3010,7 +3054,7 @@ ib_sminfo_get_priority( */ AL_INLINE uint8_t AL_API ib_sminfo_get_state( - IN const ib_sm_info_t* const p_smi ) + IN const ib_sm_info_t* const p_smi ) { return( (uint8_t)(p_smi->pri_state & 0x0F) ); } @@ -3128,13 +3172,13 @@ typedef struct _ib_rmpp_mad */ AL_INLINE void AL_API ib_mad_init_new( - IN ib_mad_t* const p_mad, - IN const uint8_t mgmt_class, - IN const uint8_t class_ver, - IN const uint8_t method, - IN const ib_net64_t trans_id, - IN const ib_net16_t attr_id, - IN const ib_net32_t attr_mod ) + IN ib_mad_t* const p_mad, + IN const uint8_t mgmt_class, + IN const uint8_t class_ver, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod ) { CL_ASSERT( p_mad ); p_mad->base_ver = 1; @@ -3191,9 +3235,9 @@ ib_mad_init_new( */ AL_INLINE void AL_API ib_mad_init_response( - IN const ib_mad_t* const p_req_mad, - IN ib_mad_t* const p_mad, - IN const ib_net16_t status ) + IN const ib_mad_t* const p_req_mad, + IN ib_mad_t* const p_mad, + IN const ib_net16_t status ) { CL_ASSERT( p_req_mad ); CL_ASSERT( p_mad ); @@ -3236,7 +3280,7 @@ ib_mad_init_response( */ AL_INLINE boolean_t AL_API ib_mad_is_response( - IN const ib_mad_t* const p_mad ) + IN const ib_mad_t* const p_mad ) { CL_ASSERT( p_mad ); return( (p_mad->method & IB_MAD_METHOD_RESP_MASK) == @@ -3257,31 +3301,29 @@ ib_mad_is_response( * ib_mad_t *********/ - -#define IB_RMPP_TYPE_DATA 1 -#define IB_RMPP_TYPE_ACK 2 -#define IB_RMPP_TYPE_STOP 3 -#define IB_RMPP_TYPE_ABORT 4 +#define IB_RMPP_TYPE_DATA 1 +#define IB_RMPP_TYPE_ACK 2 +#define IB_RMPP_TYPE_STOP 3 +#define IB_RMPP_TYPE_ABORT 4 #define IB_RMPP_NO_RESP_TIME 0x1F -#define IB_RMPP_FLAG_ACTIVE 0x01 -#define IB_RMPP_FLAG_FIRST 0x02 -#define IB_RMPP_FLAG_LAST 0x04 +#define IB_RMPP_FLAG_ACTIVE 0x01 +#define IB_RMPP_FLAG_FIRST 0x02 +#define IB_RMPP_FLAG_LAST 0x04 #define IB_RMPP_STATUS_SUCCESS 0 -#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */ -#define IB_RMPP_STATUS_T2L 118 /* time too long */ +#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */ +#define IB_RMPP_STATUS_T2L 118 /* time too long */ #define IB_RMPP_STATUS_BAD_LEN 119 /* incon. last and payload len */ #define IB_RMPP_STATUS_BAD_SEG 120 /* incon. first and segment no */ -#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */ -#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */ -#define IB_RMPP_STATUS_S2B 123 /* segment no too big */ +#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */ +#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */ +#define IB_RMPP_STATUS_S2B 123 /* segment no too big */ #define IB_RMPP_STATUS_BAD_STATUS 124 /* illegal status */ -#define IB_RMPP_STATUS_UNV 125 /* unsupported version */ -#define IB_RMPP_STATUS_TMR 126 /* too many retries */ +#define IB_RMPP_STATUS_UNV 125 /* unsupported version */ +#define IB_RMPP_STATUS_TMR 126 /* too many retries */ #define IB_RMPP_STATUS_UNSPEC 127 /* unspecified */ - /****f* IBA Base: Types/ib_rmpp_is_flag_set * NAME * ib_rmpp_is_flag_set @@ -3293,8 +3335,8 @@ ib_mad_is_response( */ AL_INLINE boolean_t AL_API ib_rmpp_is_flag_set( - IN const ib_rmpp_mad_t* const p_rmpp_mad, - IN const uint8_t flag ) + IN const ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t flag ) { CL_ASSERT( p_rmpp_mad ); return( (p_rmpp_mad->rmpp_flags & flag) == flag ); @@ -3318,8 +3360,8 @@ ib_rmpp_is_flag_set( AL_INLINE void AL_API ib_rmpp_set_resp_time( - IN ib_rmpp_mad_t* const p_rmpp_mad, - IN const uint8_t resp_time ) + IN ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t resp_time ) { CL_ASSERT( p_rmpp_mad ); p_rmpp_mad->rmpp_flags |= (resp_time << 3); @@ -3328,13 +3370,12 @@ ib_rmpp_set_resp_time( AL_INLINE uint8_t AL_API ib_rmpp_get_resp_time( - IN const ib_rmpp_mad_t* const p_rmpp_mad ) + IN const ib_rmpp_mad_t* const p_rmpp_mad ) { CL_ASSERT( p_rmpp_mad ); return( (uint8_t)(p_rmpp_mad->rmpp_flags >> 3) ); } - /****d* IBA Base: Constants/IB_SMP_DIRECTION * NAME * IB_SMP_DIRECTION @@ -3344,8 +3385,8 @@ ib_rmpp_get_resp_time( * * SOURCE */ -#define IB_SMP_DIRECTION_HO 0x8000 -#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO)) +#define IB_SMP_DIRECTION_HO 0x8000 +#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO)) /**********/ /****d* IBA Base: Constants/IB_SMP_STATUS_MASK @@ -3358,7 +3399,7 @@ ib_rmpp_get_resp_time( * SOURCE */ #define IB_SMP_STATUS_MASK_HO 0x7FFF -#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO)) +#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO)) /**********/ /****s* IBA Base: Types/ib_smp_t @@ -3465,7 +3506,7 @@ typedef struct _ib_smp */ AL_INLINE ib_net16_t AL_API ib_smp_get_status( - IN const ib_smp_t* const p_smp ) + IN const ib_smp_t* const p_smp ) { return( (ib_net16_t)(p_smp->status & IB_SMP_STATUS_MASK) ); } @@ -3494,7 +3535,7 @@ ib_smp_get_status( */ AL_INLINE boolean_t AL_API ib_smp_is_response( - IN const ib_smp_t* const p_smp ) + IN const ib_smp_t* const p_smp ) { return( ib_mad_is_response( (const ib_mad_t*)p_smp ) ); } @@ -3511,6 +3552,7 @@ ib_smp_is_response( * SEE ALSO * ib_smp_t *********/ + /****f* IBA Base: Types/ib_smp_is_d * NAME * ib_smp_is_d @@ -3522,7 +3564,7 @@ ib_smp_is_response( */ AL_INLINE boolean_t AL_API ib_smp_is_d( - IN const ib_smp_t* const p_smp ) + IN const ib_smp_t* const p_smp ) { return( (p_smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION ); } @@ -3555,16 +3597,16 @@ ib_smp_is_d( */ AL_INLINE void AL_API ib_smp_init_new( - IN ib_smp_t* const p_smp, - IN const uint8_t method, - IN const ib_net64_t trans_id, - IN const ib_net16_t attr_id, - IN const ib_net32_t attr_mod, - IN const uint8_t hop_count, - IN const ib_net64_t m_key, - IN const uint8_t* path_out, - IN const ib_net16_t dr_slid, - IN const ib_net16_t dr_dlid ) + IN ib_smp_t* const p_smp, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod, + IN const uint8_t hop_count, + IN const ib_net64_t m_key, + IN const uint8_t* path_out, + IN const ib_net16_t dr_slid, + IN const ib_net16_t dr_dlid ) { CL_ASSERT( p_smp ); CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX ); @@ -3584,14 +3626,14 @@ ib_smp_init_new( p_smp->dr_dlid = dr_dlid; cl_memclr( p_smp->resv1, - sizeof(p_smp->resv1) + - sizeof(p_smp->data) + - sizeof(p_smp->initial_path) + - sizeof(p_smp->return_path) ); + sizeof(p_smp->resv1) + + sizeof(p_smp->data) + + sizeof(p_smp->initial_path) + + sizeof(p_smp->return_path) ); /* copy the path */ cl_memcpy( &p_smp->initial_path, path_out, - sizeof( p_smp->initial_path ) ); + sizeof( p_smp->initial_path ) ); } /* * PARAMETERS @@ -3630,6 +3672,7 @@ ib_smp_init_new( * SEE ALSO * ib_mad_t *********/ + /****f* IBA Base: Types/ib_smp_get_payload_ptr * NAME * ib_smp_get_payload_ptr @@ -3641,7 +3684,7 @@ ib_smp_init_new( */ AL_INLINE void* AL_API ib_smp_get_payload_ptr( - IN const ib_smp_t* const p_smp ) + IN const ib_smp_t* const p_smp ) { return( (void*)p_smp->data ); } @@ -3686,6 +3729,7 @@ typedef struct _ib_node_info } PACK_SUFFIX ib_node_info_t; #include /************/ + /****s* IBA Base: Types/ib_sa_mad_t * NAME * ib_sa_mad_t @@ -3732,23 +3776,20 @@ typedef struct _ib_sa_mad /**********/ #define IB_SA_MAD_HDR_SIZE (sizeof(ib_sa_mad_t) - IB_SA_DATA_SIZE) - - AL_INLINE uint32_t AL_API ib_get_attr_size( - IN const ib_net16_t attr_offset ) + IN const ib_net16_t attr_offset ) { return( ((uint32_t)cl_ntoh16( attr_offset )) << 3 ); } AL_INLINE ib_net16_t AL_API ib_get_attr_offset( - IN const uint32_t attr_size ) + IN const uint32_t attr_size ) { return( cl_hton16( (uint16_t)(attr_size >> 3) ) ); } - /****f* IBA Base: Types/ib_sa_mad_get_payload_ptr * NAME * ib_sa_mad_get_payload_ptr @@ -3760,7 +3801,7 @@ ib_get_attr_offset( */ AL_INLINE void* AL_API ib_sa_mad_get_payload_ptr( - IN const ib_sa_mad_t* const p_sa_mad ) + IN const ib_sa_mad_t* const p_sa_mad ) { return( (void*)p_sa_mad->data ); } @@ -3797,7 +3838,7 @@ ib_sa_mad_get_payload_ptr( */ AL_INLINE uint8_t AL_API ib_node_info_get_local_port_num( - IN const ib_node_info_t* const p_ni ) + IN const ib_node_info_t* const p_ni ) { return( (uint8_t)(( p_ni->port_num_vendor_id & IB_NODE_INFO_PORT_NUM_MASK ) @@ -3816,6 +3857,7 @@ ib_node_info_get_local_port_num( * SEE ALSO * ib_node_info_t *********/ + /****f* IBA Base: Types/ib_node_info_get_vendor_id * NAME * ib_node_info_get_vendor_id @@ -3827,7 +3869,7 @@ ib_node_info_get_local_port_num( */ AL_INLINE ib_net32_t AL_API ib_node_info_get_vendor_id( - IN const ib_node_info_t* const p_ni ) + IN const ib_node_info_t* const p_ni ) { return( (ib_net32_t)( p_ni->port_num_vendor_id & IB_NODE_INFO_VEND_ID_MASK ) ); @@ -3894,10 +3936,10 @@ typedef struct _ib_port_info uint8_t link_width_enabled; uint8_t link_width_supported; uint8_t link_width_active; - uint8_t state_info1; /* LinkSpeedSupported and PortState */ - uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */ + uint8_t state_info1; /* LinkSpeedSupported and PortState */ + uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */ uint8_t mkey_lmc; - uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */ + uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */ uint8_t mtu_smsl; uint8_t vl_cap; /* VLCap and InitType */ uint8_t vl_high_limit; @@ -3919,17 +3961,17 @@ typedef struct _ib_port_info #include /************/ -#define IB_PORT_STATE_MASK 0x0F -#define IB_PORT_LMC_MASK 0x07 -#define IB_PORT_MPB_MASK 0xC0 -#define IB_PORT_MPB_SHIFT 6 -#define IB_PORT_LINK_SPEED_SHIFT 4 -#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0 -#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0 -#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F -#define IB_PORT_PHYS_STATE_MASK 0xF0 -#define IB_PORT_PHYS_STATE_SHIFT 4 -#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F +#define IB_PORT_STATE_MASK 0x0F +#define IB_PORT_LMC_MASK 0x07 +#define IB_PORT_MPB_MASK 0xC0 +#define IB_PORT_MPB_SHIFT 6 +#define IB_PORT_LINK_SPEED_SHIFT 4 +#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F +#define IB_PORT_PHYS_STATE_MASK 0xF0 +#define IB_PORT_PHYS_STATE_SHIFT 4 +#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F #define IB_PORT_CAP_RESV0 (CL_NTOH32(0x00000001)) #define IB_PORT_CAP_IS_SM (CL_NTOH32(0x00000002)) @@ -3975,7 +4017,7 @@ typedef struct _ib_port_info */ AL_INLINE uint8_t AL_API ib_port_info_get_port_state( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->state_info1 & IB_PORT_STATE_MASK) ); } @@ -3991,6 +4033,7 @@ ib_port_info_get_port_state( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_port_state * NAME * ib_port_info_set_port_state @@ -4002,8 +4045,8 @@ ib_port_info_get_port_state( */ AL_INLINE void AL_API ib_port_info_set_port_state( - IN ib_port_info_t* const p_pi, - IN const uint8_t port_state ) + IN ib_port_info_t* const p_pi, + IN const uint8_t port_state ) { p_pi->state_info1 = (uint8_t)((p_pi->state_info1 & 0xF0) | port_state ); } @@ -4022,6 +4065,7 @@ ib_port_info_set_port_state( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_vl_cap * NAME * ib_port_info_get_vl_cap @@ -4049,6 +4093,7 @@ ib_port_info_get_vl_cap( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_init_type * NAME * ib_port_info_get_init_type @@ -4076,6 +4121,7 @@ ib_port_info_get_init_type( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_op_vls * NAME * ib_port_info_get_op_vls @@ -4103,6 +4149,7 @@ ib_port_info_get_op_vls( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_op_vls * NAME * ib_port_info_set_op_vls @@ -4114,8 +4161,8 @@ ib_port_info_get_op_vls( */ AL_INLINE void AL_API ib_port_info_set_op_vls( - IN ib_port_info_t* const p_pi, - IN const uint8_t op_vls ) + IN ib_port_info_t* const p_pi, + IN const uint8_t op_vls ) { p_pi->vl_enforce = (uint8_t)((p_pi->vl_enforce & 0x0F) | (op_vls << 4) ); } @@ -4134,6 +4181,7 @@ ib_port_info_set_op_vls( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_state_no_change * NAME * ib_port_info_set_state_no_change @@ -4145,7 +4193,7 @@ ib_port_info_set_op_vls( */ AL_INLINE void AL_API ib_port_info_set_state_no_change( - IN ib_port_info_t* const p_pi ) + IN ib_port_info_t* const p_pi ) { ib_port_info_set_port_state( p_pi, 0 ); p_pi->state_info2 = 0; @@ -4174,7 +4222,7 @@ ib_port_info_set_state_no_change( */ AL_INLINE uint8_t AL_API ib_port_info_get_link_speed_sup( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)((p_pi->state_info1 & IB_PORT_LINK_SPEED_SUPPORTED_MASK) >> @@ -4205,8 +4253,8 @@ ib_port_info_get_link_speed_sup( */ AL_INLINE void AL_API ib_port_info_set_link_speed_sup( - IN uint8_t const speed, - IN ib_port_info_t* p_pi ) + IN uint8_t const speed, + IN ib_port_info_t* p_pi ) { p_pi->state_info1 = ( ~IB_PORT_LINK_SPEED_SUPPORTED_MASK & p_pi->state_info1 ) | @@ -4240,7 +4288,7 @@ ib_port_info_set_link_speed_sup( */ OSM_INLINE uint8_t AL_API ib_port_info_get_port_phys_state( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)((p_pi->state_info2 & IB_PORT_PHYS_STATE_MASK) >> @@ -4271,8 +4319,8 @@ ib_port_info_get_port_phys_state( */ AL_INLINE void AL_API ib_port_info_set_port_phys_state( - IN uint8_t const phys_state, - IN ib_port_info_t* p_pi ) + IN uint8_t const phys_state, + IN ib_port_info_t* p_pi ) { p_pi->state_info2 = ( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) | @@ -4306,7 +4354,7 @@ ib_port_info_set_port_phys_state( */ OSM_INLINE uint8_t AL_API ib_port_info_get_link_down_def_state( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) ); } @@ -4334,8 +4382,8 @@ ib_port_info_get_link_down_def_state( */ AL_INLINE void AL_API ib_port_info_set_link_down_def_state( - IN ib_port_info_t* const p_pi, - IN const uint8_t link_dwn_state ) + IN ib_port_info_t* const p_pi, + IN const uint8_t link_dwn_state ) { p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state ); } @@ -4366,7 +4414,7 @@ ib_port_info_set_link_down_def_state( */ OSM_INLINE uint8_t AL_API ib_port_info_get_link_speed_active( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)((p_pi->link_speed & IB_PORT_LINK_SPEED_ACTIVE_MASK) >> @@ -4387,10 +4435,10 @@ ib_port_info_get_link_speed_active( #define IB_LINK_WIDTH_ACTIVE_1X 1 #define IB_LINK_WIDTH_ACTIVE_4X 2 -#define IB_LINK_WIDTH_ACTIVE_12X 8 -#define IB_LINK_SPEED_ACTIVE_2_5 1 -#define IB_LINK_SPEED_ACTIVE_5 2 -#define IB_LINK_SPEED_ACTIVE_10 4 +#define IB_LINK_WIDTH_ACTIVE_12X 8 +#define IB_LINK_SPEED_ACTIVE_2_5 1 +#define IB_LINK_SPEED_ACTIVE_5 2 +#define IB_LINK_SPEED_ACTIVE_10 4 /* following v1 ver1.2 p901 */ #define IB_PATH_RECORD_RATE_2_5_GBS 2 @@ -4418,19 +4466,19 @@ ib_port_info_get_link_speed_active( AL_INLINE uint8_t AL_API ib_port_info_compute_rate( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { uint8_t rate = 0; switch (ib_port_info_get_link_speed_active(p_pi)) { case IB_LINK_SPEED_ACTIVE_2_5: - switch(p_pi->link_width_active) + switch (p_pi->link_width_active) { case IB_LINK_WIDTH_ACTIVE_1X: rate = IB_PATH_RECORD_RATE_2_5_GBS; break; - + case IB_LINK_WIDTH_ACTIVE_4X: rate = IB_PATH_RECORD_RATE_10_GBS; break; @@ -4450,15 +4498,15 @@ ib_port_info_compute_rate( case IB_LINK_WIDTH_ACTIVE_1X: rate = IB_PATH_RECORD_RATE_5_GBS; break; - + case IB_LINK_WIDTH_ACTIVE_4X: rate = IB_PATH_RECORD_RATE_20_GBS; break; - + case IB_LINK_WIDTH_ACTIVE_12X: rate = IB_PATH_RECORD_RATE_60_GBS; break; - + default: rate = IB_PATH_RECORD_RATE_5_GBS; break; @@ -4470,15 +4518,15 @@ ib_port_info_compute_rate( case IB_LINK_WIDTH_ACTIVE_1X: rate = IB_PATH_RECORD_RATE_10_GBS; break; - + case IB_LINK_WIDTH_ACTIVE_4X: rate = IB_PATH_RECORD_RATE_40_GBS; break; - + case IB_LINK_WIDTH_ACTIVE_12X: - rate = IB_PATH_RECORD_RATE_120_GBS; + rate =IB_PATH_RECORD_RATE_120_GBS; break; - + default: rate = IB_PATH_RECORD_RATE_10_GBS; break; @@ -4515,18 +4563,18 @@ ib_port_info_compute_rate( */ AL_INLINE uint8_t AL_API ib_path_get_ipd( - IN uint8_t local_link_width_supported, - IN uint8_t path_rec_rate ) + IN uint8_t local_link_width_supported, + IN uint8_t path_rec_rate ) { uint8_t ipd = 0; switch(local_link_width_supported) { - /* link_width_supported = 1: 1x */ + /* link_width_supported = 1: 1x */ case 1: break; - /* link_width_supported = 3: 1x or 4x */ + /* link_width_supported = 3: 1x or 4x */ case 3: switch(path_rec_rate & 0x3F) { @@ -4538,7 +4586,7 @@ ib_path_get_ipd( } break; - /* link_width_supported = 11: 1x or 4x or 12x */ + /* link_width_supported = 11: 1x or 4x or 12x */ case 11: switch(path_rec_rate & 0x3F) { @@ -4586,7 +4634,7 @@ ib_path_get_ipd( */ AL_INLINE uint8_t AL_API ib_port_info_get_mtu_cap( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->mtu_cap & 0x0F) ); } @@ -4602,6 +4650,7 @@ ib_port_info_get_mtu_cap( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_neighbor_mtu * NAME * ib_port_info_get_neighbor_mtu @@ -4629,6 +4678,7 @@ ib_port_info_get_neighbor_mtu( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_neighbor_mtu * NAME * ib_port_info_set_neighbor_mtu @@ -4640,8 +4690,8 @@ ib_port_info_get_neighbor_mtu( */ AL_INLINE void AL_API ib_port_info_set_neighbor_mtu( - IN ib_port_info_t* const p_pi, - IN const uint8_t mtu ) + IN ib_port_info_t* const p_pi, + IN const uint8_t mtu ) { CL_ASSERT( mtu <= 5 ); CL_ASSERT( mtu != 0 ); @@ -4690,6 +4740,7 @@ ib_port_info_get_master_smsl( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_master_smsl * NAME * ib_port_info_set_master_smsl @@ -4701,8 +4752,8 @@ ib_port_info_get_master_smsl( */ AL_INLINE void AL_API ib_port_info_set_master_smsl( - IN ib_port_info_t* const p_pi, - IN const uint8_t smsl ) + IN ib_port_info_t* const p_pi, + IN const uint8_t smsl ) { p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | smsl ); } @@ -4733,11 +4784,11 @@ ib_port_info_set_master_smsl( */ AL_INLINE void AL_API ib_port_info_set_timeout( - IN ib_port_info_t* const p_pi, - IN const uint8_t timeout ) + IN ib_port_info_t* const p_pi, + IN const uint8_t timeout ) { CL_ASSERT( timeout <= 0x1F ); - p_pi->subnet_timeout = + p_pi->subnet_timeout = (uint8_t)( (p_pi->subnet_timeout & 0x80) | (timeout & 0x1F)); } @@ -4768,11 +4819,11 @@ ib_port_info_set_timeout( */ OSM_INLINE void AL_API ib_port_info_set_client_rereg( - IN ib_port_info_t* const p_pi, - IN const uint8_t client_rereg ) + IN ib_port_info_t* const p_pi, + IN const uint8_t client_rereg ) { CL_ASSERT( client_rereg <= 0x1 ); - p_pi->subnet_timeout = + p_pi->subnet_timeout = (uint8_t)( (p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80)); } @@ -4803,7 +4854,7 @@ ib_port_info_set_client_rereg( */ OSM_INLINE uint8_t AL_API ib_port_info_get_timeout( - IN ib_port_info_t const* p_pi ) + IN ib_port_info_t const* p_pi ) { return(p_pi->subnet_timeout & 0x1F ); } @@ -4831,7 +4882,7 @@ ib_port_info_get_timeout( */ OSM_INLINE uint8_t AL_API ib_port_info_get_client_rereg( - IN ib_port_info_t const* p_pi ) + IN ib_port_info_t const* p_pi ) { return ( (p_pi->subnet_timeout & 0x80 ) >> 7); } @@ -4860,8 +4911,8 @@ ib_port_info_get_client_rereg( */ OSM_INLINE void AL_API ib_port_info_set_hoq_lifetime( - IN ib_port_info_t* const p_pi, - IN const uint8_t hoq_life ) + IN ib_port_info_t* const p_pi, + IN const uint8_t hoq_life ) { p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) | (p_pi->vl_stall_life & 0xe0)); @@ -4894,7 +4945,7 @@ ib_port_info_set_hoq_lifetime( */ OSM_INLINE uint8_t AL_API ib_port_info_get_hoq_lifetime( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->vl_stall_life & 0x1f) ); } @@ -4917,15 +4968,15 @@ ib_port_info_get_hoq_lifetime( * ib_port_info_set_vl_stall_count * * DESCRIPTION -* Sets the VL Stall Count which define the number of contiguous +* Sets the VL Stall Count which define the number of contiguous * HLL (hoq) drops that will put the VL into stalled mode. * * SYNOPSIS */ OSM_INLINE void AL_API ib_port_info_set_vl_stall_count( - IN ib_port_info_t* const p_pi, - IN const uint8_t vl_stall_count ) + IN ib_port_info_t* const p_pi, + IN const uint8_t vl_stall_count ) { p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) | ((vl_stall_count << 5) & 0xe0)); @@ -4958,7 +5009,7 @@ ib_port_info_set_vl_stall_count( */ OSM_INLINE uint8_t AL_API ib_port_info_get_vl_stall_count( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5); } @@ -4987,7 +5038,7 @@ ib_port_info_get_vl_stall_count( */ AL_INLINE uint8_t AL_API ib_port_info_get_lmc( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) ); } @@ -5015,8 +5066,8 @@ ib_port_info_get_lmc( */ AL_INLINE void AL_API ib_port_info_set_lmc( - IN ib_port_info_t* const p_pi, - IN const uint8_t lmc ) + IN ib_port_info_t* const p_pi, + IN const uint8_t lmc ) { CL_ASSERT( lmc <= 0x7 ); p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc); @@ -5048,7 +5099,7 @@ ib_port_info_set_lmc( */ OSM_INLINE uint8_t AL_API ib_port_info_get_link_speed_enabled( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) ); } @@ -5064,6 +5115,7 @@ ib_port_info_get_link_speed_enabled( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_set_link_speed_enabled * NAME * ib_port_info_set_link_speed_enabled @@ -5075,8 +5127,8 @@ ib_port_info_get_link_speed_enabled( */ AL_INLINE void AL_API ib_port_info_set_link_speed_enabled( - IN ib_port_info_t* const p_pi, - IN const uint8_t link_speed_enabled ) + IN ib_port_info_t* const p_pi, + IN const uint8_t link_speed_enabled ) { p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled ); } @@ -5107,7 +5159,7 @@ ib_port_info_set_link_speed_enabled( */ AL_INLINE uint8_t AL_API ib_port_info_get_mpb( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return( (uint8_t)((p_pi->mkey_lmc & IB_PORT_MPB_MASK) >> IB_PORT_MPB_SHIFT) ); @@ -5136,8 +5188,8 @@ ib_port_info_get_mpb( */ AL_INLINE void AL_API ib_port_info_set_mpb( - IN ib_port_info_t* p_pi, - IN uint8_t mpb ) + IN ib_port_info_t* p_pi, + IN uint8_t mpb ) { p_pi->mkey_lmc = (~IB_PORT_MPB_MASK & p_pi->mkey_lmc) | @@ -5156,6 +5208,7 @@ ib_port_info_set_mpb( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd * NAME * ib_port_info_get_local_phy_err_thd @@ -5167,7 +5220,7 @@ ib_port_info_set_mpb( */ OSM_INLINE uint8_t AL_API ib_port_info_get_local_phy_err_thd( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4); } @@ -5183,6 +5236,7 @@ ib_port_info_get_local_phy_err_thd( * * SEE ALSO *********/ + /****f* IBA Base: Types/ib_port_info_get_overrun_err_thd * NAME * ib_port_info_get_local_overrun_err_thd @@ -5194,7 +5248,7 @@ ib_port_info_get_local_phy_err_thd( */ OSM_INLINE uint8_t AL_API ib_port_info_get_overrun_err_thd( - IN const ib_port_info_t* const p_pi ) + IN const ib_port_info_t* const p_pi ) { return (uint8_t)(p_pi->error_threshold & 0x0F); } @@ -5222,9 +5276,9 @@ ib_port_info_get_overrun_err_thd( */ OSM_INLINE void AL_API ib_port_info_set_phy_and_overrun_err_thd( - IN ib_port_info_t* const p_pi, - IN uint8_t phy_threshold, - IN uint8_t overrun_threshold ) + IN ib_port_info_t* const p_pi, + IN uint8_t phy_threshold, + IN uint8_t overrun_threshold ) { p_pi->error_threshold = (uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) ); @@ -5259,7 +5313,7 @@ typedef struct _ib_service_record ib_net16_t resv; ib_net32_t service_lease; uint8_t service_key[16]; - ib_svc_name_t service_name; + ib_svc_name_t service_name; uint8_t service_data8[16]; ib_net16_t service_data16[8]; ib_net32_t service_data32[4]; @@ -5274,7 +5328,7 @@ typedef struct _ib_portinfo_record ib_net16_t lid; uint8_t port_num; uint8_t resv; - ib_port_info_t port_info; + ib_port_info_t port_info; uint8_t pad[6]; } PACK_SUFFIX ib_portinfo_record_t; @@ -5297,7 +5351,7 @@ typedef struct _ib_sminfo_record { ib_net16_t lid; uint16_t resv0; - ib_sm_info_t sm_info; + ib_sm_info_t sm_info; uint8_t pad[7]; } PACK_SUFFIX ib_sminfo_record_t; @@ -5317,8 +5371,8 @@ typedef struct _ib_lft_record { ib_net16_t lid; ib_net16_t block_num; - uint32_t resv0; - uint8_t lft[64]; + uint32_t resv0; + uint8_t lft[64]; } PACK_SUFFIX ib_lft_record_t; #include /************/ @@ -5356,7 +5410,7 @@ typedef struct _ib_switch_info_record { ib_net16_t lid; uint16_t resv0; - ib_switch_info_t switch_info; + ib_switch_info_t switch_info; uint8_t pad[3]; } PACK_SUFFIX ib_switch_info_record_t; @@ -5375,7 +5429,7 @@ typedef struct _ib_switch_info_record */ AL_INLINE boolean_t AL_API ib_switch_info_get_state_change( - IN const ib_switch_info_t* const p_si ) + IN const ib_switch_info_t* const p_si ) { return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC ); } @@ -5403,7 +5457,7 @@ ib_switch_info_get_state_change( */ AL_INLINE void AL_API ib_switch_info_clear_state_change( - IN ib_switch_info_t* const p_si ) + IN ib_switch_info_t* const p_si ) { p_si->life_state = (uint8_t)(p_si->life_state & 0xFB); } @@ -5496,7 +5550,7 @@ typedef struct _ib_guidinfo_record #include typedef struct _ib_pkey_table { - ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK]; + ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK]; } PACK_SUFFIX ib_pkey_table_t; #include @@ -5514,12 +5568,12 @@ typedef struct _ib_pkey_table #include typedef struct _ib_pkey_table_record { - ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 - uint16_t block_num; - uint8_t port_num; // for switch: port number, for CA: reserved - uint8_t reserved1; - uint16_t reserved2; - ib_pkey_table_t pkey_tbl; + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint16_t block_num; + uint8_t port_num; // for switch: port number, for CA: reserved + uint8_t reserved1; + uint16_t reserved2; + ib_pkey_table_t pkey_tbl; } PACK_SUFFIX ib_pkey_table_record_t; #include @@ -5549,7 +5603,7 @@ typedef struct _ib_slvl_table * ib_slvl_table_record_t * * DESCRIPTION -* IBA defined Sl to VL Mapping Table Record for SA Query. (15.2.5.4) +* IBA defined SL to VL Mapping Table Record for SA Query. (15.2.5.4) * * SYNOPSIS */ @@ -5560,7 +5614,7 @@ typedef struct _ib_slvl_table_record uint8_t in_port_num; // reserved for CAs uint8_t out_port_num; // reserved for CAs uint32_t resv; - ib_slvl_table_t slvl_tbl; + ib_slvl_table_t slvl_tbl; } PACK_SUFFIX ib_slvl_table_record_t; #include @@ -5577,9 +5631,9 @@ typedef struct _ib_slvl_table_record */ AL_INLINE void AL_API ib_slvl_table_set( - IN ib_slvl_table_t* p_slvl_tbl, - IN uint8_t sl_index, - IN uint8_t vl ) + IN ib_slvl_table_t* p_slvl_tbl, + IN uint8_t sl_index, + IN uint8_t vl ) { uint8_t idx = sl_index/2; CL_ASSERT(vl <= 15); @@ -5627,8 +5681,8 @@ ib_slvl_table_set( */ OSM_INLINE uint8_t AL_API ib_slvl_table_get( - IN const ib_slvl_table_t* p_slvl_tbl, - IN uint8_t sl_index ) + IN const ib_slvl_table_t* p_slvl_tbl, + IN uint8_t sl_index ) { uint8_t idx = sl_index/2; CL_ASSERT(sl_index <= 15); @@ -5675,12 +5729,12 @@ typedef struct _ib_vl_arb_element { uint8_t vl; uint8_t weight; - } PACK_SUFFIX ib_vl_arb_element_t; #include /************/ #define IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK 32 + /****s* IBA Base: Types/ib_vl_arb_table_t * NAME * ib_vl_arb_table_t @@ -5694,7 +5748,6 @@ typedef struct _ib_vl_arb_element typedef struct _ib_vl_arb_table { ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]; - } PACK_SUFFIX ib_vl_arb_table_t; #include /************/ @@ -5711,12 +5764,11 @@ typedef struct _ib_vl_arb_table #include typedef struct _ib_vl_arb_table_record { - ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 - uint8_t port_num; - uint8_t block_num; - uint32_t reserved; + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint8_t port_num; + uint8_t block_num; + uint32_t reserved; ib_vl_arb_table_t vl_arb_tbl; - } PACK_SUFFIX ib_vl_arb_table_record_t; #include /************/ @@ -5727,13 +5779,12 @@ typedef struct _ib_vl_arb_table_record #include typedef struct _ib_grh { - ib_net32_t ver_class_flow; - ib_net16_t resv1; - uint8_t resv2; - uint8_t hop_limit; - ib_gid_t src_gid; - ib_gid_t dest_gid; - + ib_net32_t ver_class_flow; + ib_net16_t resv1; + uint8_t resv2; + uint8_t hop_limit; + ib_gid_t src_gid; + ib_gid_t dest_gid; } PACK_SUFFIX ib_grh_t; #include @@ -5748,10 +5799,10 @@ typedef struct _ib_grh */ AL_INLINE void AL_API ib_grh_get_ver_class_flow( - IN const ib_net32_t ver_class_flow, - OUT uint8_t* const p_ver, - OUT uint8_t* const p_tclass, - OUT uint32_t* const p_flow_lbl ) + IN const ib_net32_t ver_class_flow, + OUT uint8_t* const p_ver, + OUT uint8_t* const p_tclass, + OUT uint32_t* const p_flow_lbl ) { ib_net32_t tmp_ver_class_flow; @@ -5800,9 +5851,9 @@ ib_grh_get_ver_class_flow( */ AL_INLINE ib_net32_t AL_API ib_grh_set_ver_class_flow( - IN const uint8_t ver, - IN const uint8_t tclass, - IN const uint32_t flow_lbl ) + IN const uint8_t ver, + IN const uint8_t tclass, + IN const uint32_t flow_lbl ) { ib_net32_t ver_class_flow; @@ -5919,10 +5970,10 @@ typedef struct _ib_member_rec */ AL_INLINE void AL_API ib_member_get_sl_flow_hop( - IN const ib_net32_t sl_flow_hop, - OUT uint8_t* const p_sl, - OUT uint32_t* const p_flow_lbl, - OUT uint8_t* const p_hop ) + IN const ib_net32_t sl_flow_hop, + OUT uint8_t* const p_sl, + OUT uint32_t* const p_flow_lbl, + OUT uint8_t* const p_hop ) { ib_net32_t tmp_sl_flow_hop; @@ -5971,9 +6022,9 @@ ib_member_get_sl_flow_hop( */ AL_INLINE ib_net32_t AL_API ib_member_set_sl_flow_hop( - IN const uint8_t sl, - IN const uint32_t flow_label, - IN const uint8_t hop_limit ) + IN const uint8_t sl, + IN const uint32_t flow_label, + IN const uint8_t hop_limit ) { ib_net32_t sl_flow_hop; @@ -6016,9 +6067,9 @@ ib_member_set_sl_flow_hop( */ AL_INLINE void AL_API ib_member_get_scope_state( - IN const uint8_t scope_state, - OUT uint8_t* const p_scope, - OUT uint8_t* const p_state ) + IN const uint8_t scope_state, + OUT uint8_t* const p_scope, + OUT uint8_t* const p_state ) { uint8_t tmp_scope_state; @@ -6060,8 +6111,8 @@ ib_member_get_scope_state( */ AL_INLINE uint8_t AL_API ib_member_set_scope_state( - IN const uint8_t scope, - IN const uint8_t state ) + IN const uint8_t scope, + IN const uint8_t state ) { uint8_t scope_state; @@ -6099,10 +6150,10 @@ ib_member_set_scope_state( */ AL_INLINE void AL_API ib_member_set_join_state( - IN OUT ib_member_rec_t *p_mc_rec, - IN const uint8_t state ) + IN OUT ib_member_rec_t *p_mc_rec, + IN const uint8_t state ) { - /* keep the scope as it is */ + /* keep the scope as it is */ p_mc_rec->scope_state = (p_mc_rec->scope_state & 0xF0) | (0x0f & state); } /* @@ -6136,14 +6187,13 @@ ib_member_set_join_state( #define IB_NOTICE_TYPE_URGENT 0x01 #define IB_NOTICE_TYPE_SECURITY 0x02 #define IB_NOTICE_TYPE_SUBN_MGMT 0x03 -#define IB_NOTICE_TYPE_INFO 0x04 +#define IB_NOTICE_TYPE_INFO 0x04 #define IB_NOTICE_TYPE_EMPTY 0x7F - #include typedef struct _ib_mad_notice_attr // Total Size calc Accumulated { - uint8_t generic_type; // 1 1 + uint8_t generic_type; // 1 1 union _notice_g_or_v { @@ -6263,7 +6313,7 @@ typedef struct _ib_mad_notice_attr // Total Size calc Accumulated */ OSM_INLINE boolean_t AL_API ib_notice_is_generic( - IN const ib_mad_notice_attr_t *p_ntc) + IN const ib_mad_notice_attr_t *p_ntc ) { return (p_ntc->generic_type & 0x80); } @@ -6290,7 +6340,7 @@ ib_notice_is_generic( */ AL_INLINE uint8_t AL_API ib_notice_get_type( - IN const ib_mad_notice_attr_t *p_ntc) + IN const ib_mad_notice_attr_t *p_ntc ) { return p_ntc->generic_type & 0x7f; } @@ -6317,9 +6367,10 @@ ib_notice_get_type( */ AL_INLINE ib_net32_t AL_API ib_notice_get_prod_type( - IN const ib_mad_notice_attr_t *p_ntc) + IN const ib_mad_notice_attr_t *p_ntc ) { uint32_t pt; + pt = cl_ntoh16(p_ntc->g_or_v.generic.prod_type_lsb) | (p_ntc->g_or_v.generic.prod_type_msb << 16); return cl_hton32(pt); @@ -6348,10 +6399,10 @@ ib_notice_get_prod_type( AL_INLINE void AL_API ib_notice_set_prod_type( IN ib_mad_notice_attr_t *p_ntc, - IN ib_net32_t prod_type_val) + IN ib_net32_t prod_type_val ) { uint32_t ptv = cl_ntoh32(prod_type_val); - p_ntc->g_or_v.generic.prod_type_lsb = cl_hton16( (uint16_t)(ptv & 0x0000ffff)); + p_ntc->g_or_v.generic.prod_type_lsb = cl_hton16((uint16_t)(ptv & 0x0000ffff)); p_ntc->g_or_v.generic.prod_type_msb = (uint8_t)( (ptv & 0x00ff0000) >> 16); } /* @@ -6380,9 +6431,10 @@ ib_notice_set_prod_type( */ AL_INLINE ib_net32_t AL_API ib_notice_get_vend_id( - IN const ib_mad_notice_attr_t *p_ntc) + IN const ib_mad_notice_attr_t *p_ntc ) { uint32_t vi; + vi = cl_ntoh16(p_ntc->g_or_v.vend.vend_id_lsb) | (p_ntc->g_or_v.vend.vend_id_msb << 16); return cl_hton32(vi); @@ -6390,7 +6442,7 @@ ib_notice_get_vend_id( /* * PARAMETERS * p_ntc -* [in] Pointer to the notice MAD attribute +* [in] Pointer to the notice MAD attribute * * RETURN VALUES * The Vendor Id of Vendor type Notice @@ -6411,7 +6463,7 @@ ib_notice_get_vend_id( AL_INLINE void AL_API ib_notice_set_vend_id( IN ib_mad_notice_attr_t *p_ntc, - IN ib_net32_t vend_id) + IN ib_net32_t vend_id ) { uint32_t vi = cl_ntoh32(vend_id); p_ntc->g_or_v.vend.vend_id_lsb = cl_hton16((uint16_t)(vi & 0x0000ffff)); @@ -6435,12 +6487,12 @@ ib_notice_set_vend_id( #include typedef struct _ib_inform_info { - ib_gid_t gid; + ib_gid_t gid; ib_net16_t lid_range_begin; ib_net16_t lid_range_end; ib_net16_t reserved1; - uint8_t is_generic; - uint8_t subscribe; + uint8_t is_generic; + uint8_t subscribe; ib_net16_t trap_type; union _inform_g_or_v { @@ -6448,7 +6500,7 @@ typedef struct _ib_inform_info { ib_net16_t trap_num; ib_net32_t qpn_resp_time_val; - uint8_t reserved2; + uint8_t reserved2; uint8_t node_type_msb; ib_net16_t node_type_lsb; } PACK_SUFFIX generic; @@ -6457,7 +6509,7 @@ typedef struct _ib_inform_info { ib_net16_t dev_id; ib_net32_t qpn_resp_time_val; - uint8_t reserved2; + uint8_t reserved2; uint8_t vendor_id_msb; ib_net16_t vendor_id_lsb; } PACK_SUFFIX vend; @@ -6478,9 +6530,9 @@ typedef struct _ib_inform_info */ OSM_INLINE void AL_API ib_inform_info_get_qpn_resp_time( - IN const ib_net32_t qpn_resp_time_val, - OUT ib_net32_t* const p_qpn, - OUT uint8_t* const p_resp_time_val ) + IN const ib_net32_t qpn_resp_time_val, + OUT ib_net32_t* const p_qpn, + OUT uint8_t* const p_resp_time_val ) { uint32_t tmp = cl_ntoh32(qpn_resp_time_val); @@ -6523,6 +6575,7 @@ ib_inform_info_set_qpn( IN ib_net32_t const qpn) { uint32_t tmp = cl_ntoh32(p_ii->g_or_v.generic.qpn_resp_time_val); + p_ii->g_or_v.generic.qpn_resp_time_val = cl_hton32( (tmp & 0x000000ff) | @@ -6549,9 +6602,10 @@ ib_inform_info_set_qpn( */ OSM_INLINE ib_net32_t AL_API ib_inform_info_get_node_type( - IN const ib_inform_info_t *p_inf) + IN const ib_inform_info_t *p_inf) { uint32_t nt; + nt = cl_ntoh16(p_inf->g_or_v.generic.node_type_lsb) | (p_inf->g_or_v.generic.node_type_msb << 16); return cl_hton32(nt); @@ -6581,9 +6635,10 @@ ib_inform_info_get_node_type( */ OSM_INLINE ib_net32_t AL_API ib_inform_info_get_vend_id( - IN const ib_inform_info_t *p_inf) + IN const ib_inform_info_t *p_inf) { uint32_t vi; + vi = cl_ntoh16(p_inf->g_or_v.vend.vendor_id_lsb) | (p_inf->g_or_v.vend.vendor_id_msb << 16); return cl_hton32(vi); @@ -6614,10 +6669,10 @@ ib_inform_info_get_vend_id( #include typedef struct _ib_inform_info_record { - ib_gid_t subscriber_gid; + ib_gid_t subscriber_gid; ib_net16_t subscriber_enum; - uint8_t reserved[6]; - ib_inform_info_t inform_info; + uint8_t reserved[6]; + ib_inform_info_t inform_info; } PACK_SUFFIX ib_inform_info_record_t; #include @@ -6651,7 +6706,7 @@ typedef struct _ib_dm_mad ib_mad_t header; uint8_t resv[40]; -#define IB_DM_DATA_SIZE 192 +#define IB_DM_DATA_SIZE 192 uint8_t data[IB_DM_DATA_SIZE]; } PACK_SUFFIX ib_dm_mad_t; @@ -6731,7 +6786,7 @@ typedef struct _ib_iou_info */ AL_INLINE uint8_t AL_API ib_iou_info_diag_dev_id( - IN const ib_iou_info_t* const p_iou_info ) + IN const ib_iou_info_t* const p_iou_info ) { return( (uint8_t)(p_iou_info->diag_rom >> 6 & 1) ); } @@ -6760,7 +6815,7 @@ ib_iou_info_diag_dev_id( */ AL_INLINE uint8_t AL_API ib_iou_info_option_rom( - IN const ib_iou_info_t* const p_iou_info ) + IN const ib_iou_info_t* const p_iou_info ) { return( (uint8_t)(p_iou_info->diag_rom >> 7) ); } @@ -6789,8 +6844,8 @@ ib_iou_info_option_rom( */ AL_INLINE uint8_t AL_API ioc_at_slot( - IN const ib_iou_info_t* const p_iou_info, - IN uint8_t slot ) + IN const ib_iou_info_t* const p_iou_info, + IN uint8_t slot ) { if( slot >= IB_DM_CTRL_LIST_SIZE ) return SLOT_DOES_NOT_EXIST; else return (int8_t) @@ -6933,10 +6988,9 @@ typedef struct _ib_ioc_profile * ib_dm_mad_t *********/ - AL_INLINE uint32_t AL_API ib_ioc_profile_get_vend_id( - IN const ib_ioc_profile_t* const p_ioc_profile ) + IN const ib_ioc_profile_t* const p_ioc_profile ) { return( cl_ntoh32(p_ioc_profile->vend_id) >> 8 ); } @@ -6944,13 +6998,12 @@ ib_ioc_profile_get_vend_id( AL_INLINE void AL_API ib_ioc_profile_set_vend_id( - IN ib_ioc_profile_t* const p_ioc_profile, - IN const uint32_t vend_id ) + IN ib_ioc_profile_t* const p_ioc_profile, + IN const uint32_t vend_id ) { p_ioc_profile->vend_id = (cl_hton32(vend_id) << 8); } - /****s* IBA Base: Types/ib_svc_entry_t * NAME * ib_svc_entry_t @@ -6964,9 +7017,9 @@ ib_ioc_profile_set_vend_id( typedef struct _ib_svc_entry { #define MAX_SVC_ENTRY_NAME_LEN 40 - char name[MAX_SVC_ENTRY_NAME_LEN]; + char name[MAX_SVC_ENTRY_NAME_LEN]; - ib_net64_t id; + ib_net64_t id; } PACK_SUFFIX ib_svc_entry_t; #include @@ -6982,7 +7035,6 @@ typedef struct _ib_svc_entry * ib_svc_entries_t *********/ - /****s* IBA Base: Types/ib_svc_entries_t * NAME * ib_svc_entries_t @@ -6995,7 +7047,7 @@ typedef struct _ib_svc_entry #include typedef struct _ib_svc_entries { -#define SVC_ENTRY_COUNT 4 +#define SVC_ENTRY_COUNT 4 ib_svc_entry_t service_entry[SVC_ENTRY_COUNT]; } PACK_SUFFIX ib_svc_entries_t; @@ -7009,13 +7061,12 @@ typedef struct _ib_svc_entries * ib_dm_mad_t, ib_svc_entry_t *********/ - AL_INLINE void AL_API ib_dm_get_slot_lo_hi( - IN const ib_net32_t slot_lo_hi, - OUT uint8_t *const p_slot, - OUT uint8_t *const p_lo, - OUT uint8_t *const p_hi ) + IN const ib_net32_t slot_lo_hi, + OUT uint8_t *const p_slot, + OUT uint8_t *const p_lo, + OUT uint8_t *const p_hi ) { ib_net32_t tmp_slot_lo_hi = CL_NTOH32( slot_lo_hi ); @@ -7029,7 +7080,6 @@ ib_dm_get_slot_lo_hi( *p_lo = (uint8_t)( ( tmp_slot_lo_hi >> 0 ) & 0xff ); } - /* * IBA defined information describing an I/O controller */ @@ -7038,7 +7088,7 @@ typedef struct _ib_ioc_info { ib_net64_t module_guid; ib_net64_t iou_guid; - ib_ioc_profile_t ioc_profile; + ib_ioc_profile_t ioc_profile; ib_net64_t access_key; uint16_t initiators_conf; uint8_t resv[38]; @@ -7049,8 +7099,8 @@ typedef struct _ib_ioc_info /* * Defines known Communication management class versions */ -#define IB_MCLASS_CM_VER_2 2 -#define IB_MCLASS_CM_VER_1 1 +#define IB_MCLASS_CM_VER_2 2 +#define IB_MCLASS_CM_VER_1 1 /* * Defines the size of user available data in communication management MADs @@ -7079,9 +7129,8 @@ typedef struct _ib_ioc_info #define IB_SIDR_REQ_PDATA_SIZE_VER1 216 #define IB_SIDR_REP_PDATA_SIZE_VER1 140 -#define IB_ARI_SIZE 72 // redefine -#define IB_APR_INFO_SIZE 72 - +#define IB_ARI_SIZE 72 // redefine +#define IB_APR_INFO_SIZE 72 /****d* Access Layer/ib_rej_status_t * NAME @@ -7092,42 +7141,42 @@ typedef struct _ib_ioc_info * * SYNOPSIS */ -typedef ib_net16_t ib_rej_status_t; +typedef ib_net16_t ib_rej_status_t; /* * SEE ALSO * ib_cm_rej, ib_cm_rej_rec_t * * SOURCE - */ -#define IB_REJ_INSUF_QP CL_HTON16(1) -#define IB_REJ_INSUF_EEC CL_HTON16(2) +*/ +#define IB_REJ_INSUF_QP CL_HTON16(1) +#define IB_REJ_INSUF_EEC CL_HTON16(2) #define IB_REJ_INSUF_RESOURCES CL_HTON16(3) -#define IB_REJ_TIMEOUT CL_HTON16(4) -#define IB_REJ_UNSUPPORTED CL_HTON16(5) +#define IB_REJ_TIMEOUT CL_HTON16(4) +#define IB_REJ_UNSUPPORTED CL_HTON16(5) #define IB_REJ_INVALID_COMM_ID CL_HTON16(6) -#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7) -#define IB_REJ_INVALID_SID CL_HTON16(8) +#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7) +#define IB_REJ_INVALID_SID CL_HTON16(8) #define IB_REJ_INVALID_XPORT CL_HTON16(9) -#define IB_REJ_STALE_CONN CL_HTON16(10) +#define IB_REJ_STALE_CONN CL_HTON16(10) #define IB_REJ_RDC_NOT_EXIST CL_HTON16(11) -#define IB_REJ_INVALID_GID CL_HTON16(12) -#define IB_REJ_INVALID_LID CL_HTON16(13) -#define IB_REJ_INVALID_SL CL_HTON16(14) -#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15) +#define IB_REJ_INVALID_GID CL_HTON16(12) +#define IB_REJ_INVALID_LID CL_HTON16(13) +#define IB_REJ_INVALID_SL CL_HTON16(14) +#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15) #define IB_REJ_INVALID_HOP_LIMIT CL_HTON16(16) #define IB_REJ_INVALID_PKT_RATE CL_HTON16(17) #define IB_REJ_INVALID_ALT_GID CL_HTON16(18) #define IB_REJ_INVALID_ALT_LID CL_HTON16(19) #define IB_REJ_INVALID_ALT_SL CL_HTON16(20) -#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21) -#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22) +#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21) +#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22) #define IB_REJ_INVALID_ALT_PKT_RATE CL_HTON16(23) #define IB_REJ_PORT_REDIRECT CL_HTON16(24) -#define IB_REJ_INVALID_MTU CL_HTON16(26) -#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27) -#define IB_REJ_USER_DEFINED CL_HTON16(28) +#define IB_REJ_INVALID_MTU CL_HTON16(26) +#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27) +#define IB_REJ_USER_DEFINED CL_HTON16(28) #define IB_REJ_INVALID_RNR_RETRY CL_HTON16(29) -#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30) +#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30) #define IB_REJ_INVALID_CLASS_VER CL_HTON16(31) #define IB_REJ_INVALID_FLOW_LBL CL_HTON16(32) #define IB_REJ_INVALID_ALT_FLOW_LBL CL_HTON16(33) @@ -7135,7 +7184,6 @@ typedef ib_net16_t ib_rej_status_t; #define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535) /******/ - /****d* Access Layer/ib_apr_status_t * NAME * ib_apr_status_t @@ -7145,27 +7193,27 @@ typedef ib_net16_t ib_rej_status_t; * * SYNOPSIS */ -typedef uint8_t ib_apr_status_t; +typedef uint8_t ib_apr_status_t; /* * SEE ALSO * ib_cm_apr, ib_cm_apr_rec_t * * SOURCE */ -#define IB_AP_SUCCESS 0 +#define IB_AP_SUCCESS 0 #define IB_AP_INVALID_COMM_ID 1 -#define IB_AP_UNSUPPORTED 2 -#define IB_AP_REJECT 3 -#define IB_AP_REDIRECT 4 -#define IB_AP_IS_CURRENT 5 +#define IB_AP_UNSUPPORTED 2 +#define IB_AP_REJECT 3 +#define IB_AP_REDIRECT 4 +#define IB_AP_IS_CURRENT 5 #define IB_AP_INVALID_QPN_EECN 6 -#define IB_AP_INVALID_LID 7 -#define IB_AP_INVALID_GID 8 +#define IB_AP_INVALID_LID 7 +#define IB_AP_INVALID_GID 8 #define IB_AP_INVALID_FLOW_LBL 9 #define IB_AP_INVALID_TCLASS 10 #define IB_AP_INVALID_HOP_LIMIT 11 #define IB_AP_INVALID_PKT_RATE 12 -#define IB_AP_INVALID_SL 13 +#define IB_AP_INVALID_SL 13 /******/ /****d* Access Layer/ib_cm_cap_mask_t @@ -7179,9 +7227,9 @@ typedef uint8_t ib_apr_status_t; */ #define IB_CM_RELIABLE_CONN_CAPABLE CL_HTON16(9) #define IB_CM_RELIABLE_DGRM_CAPABLE CL_HTON16(10) -#define IB_CM_RDGRM_CAPABLE CL_HTON16(11) -#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12) -#define IB_CM_SIDR_CAPABLE CL_HTON16(13) +#define IB_CM_RDGRM_CAPABLE CL_HTON16(11) +#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12) +#define IB_CM_SIDR_CAPABLE CL_HTON16(13) /* * SEE ALSO * ib_cm_rep, ib_class_port_info_t @@ -7190,19 +7238,17 @@ typedef uint8_t ib_apr_status_t; * *******/ - /* * Service ID resolution status */ -typedef uint16_t ib_sidr_status_t; -#define IB_SIDR_SUCCESS 0 -#define IB_SIDR_UNSUPPORTED 1 -#define IB_SIDR_REJECT 2 -#define IB_SIDR_NO_QP 3 -#define IB_SIDR_REDIRECT 4 +typedef uint16_t ib_sidr_status_t; +#define IB_SIDR_SUCCESS 0 +#define IB_SIDR_UNSUPPORTED 1 +#define IB_SIDR_REJECT 2 +#define IB_SIDR_NO_QP 3 +#define IB_SIDR_REDIRECT 4 #define IB_SIDR_UNSUPPORTED_VER 5 - /* * The following definitions are shared between the Access Layer and VPD */ @@ -7214,7 +7260,8 @@ typedef struct _ib_rdd* __ptr64 ib_rdd_handle_t; typedef struct _ib_mr* __ptr64 ib_mr_handle_t; typedef struct _ib_mw* __ptr64 ib_mw_handle_t; typedef struct _ib_qp* __ptr64 ib_qp_handle_t; -typedef struct _ib_eec* __ptr64 ib_eec_handle_t; +typedef struct _ib_srq* __ptr64 ib_srq_handle_t; +typedef struct _ib_eec* __ptr64 ib_eec_handle_t; typedef struct _ib_cq* __ptr64 ib_cq_handle_t; typedef struct _ib_av* __ptr64 ib_av_handle_t; typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t; @@ -7222,8 +7269,8 @@ typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t; /* Currently for windows branch, use the extended version of ib special verbs struct in order to be compliant with Infinicon ib_types; later we'll change it to support OpenSM ib_types.h */ -#ifndef WIN32 +#ifndef WIN32 /****d* Access Layer/ib_api_status_t * NAME * ib_api_status_t @@ -7255,7 +7302,6 @@ typedef enum _ib_api_status_t IB_OVERFLOW, IB_MAX_MCAST_QPS_REACHED, IB_INVALID_QP_STATE, - IB_INVALID_EEC_STATE, IB_INVALID_APM_STATE, IB_INVALID_PORT_STATE, IB_INVALID_STATE, @@ -7266,6 +7312,7 @@ typedef enum _ib_api_status_t IB_INVALID_MAX_WRS, IB_INVALID_MAX_SGE, IB_INVALID_CQ_SIZE, + IB_INVALID_SRQ_SIZE, IB_INVALID_SERVICE_TYPE, IB_INVALID_GID, IB_INVALID_LID, @@ -7273,12 +7320,12 @@ typedef enum _ib_api_status_t IB_INVALID_CA_HANDLE, IB_INVALID_AV_HANDLE, IB_INVALID_CQ_HANDLE, - IB_INVALID_EEC_HANDLE, IB_INVALID_QP_HANDLE, + IB_INVALID_SRQ_HANDLE, IB_INVALID_PD_HANDLE, IB_INVALID_MR_HANDLE, + IB_INVALID_FMR_HANDLE, IB_INVALID_MW_HANDLE, - IB_INVALID_RDD_HANDLE, IB_INVALID_MCAST_HANDLE, IB_INVALID_CALLBACK, IB_INVALID_AL_HANDLE, /* InfiniBand Access Layer */ @@ -7291,6 +7338,9 @@ typedef enum _ib_api_status_t IB_EE_IN_TIMEWAIT, IB_INVALID_PORT, IB_NOT_DONE, + IB_INVALID_INDEX, + IB_NO_MATCH, + IB_PENDING, IB_UNKNOWN_ERROR /* ALWAYS LAST ENUM VALUE! */ } ib_api_status_t; @@ -7309,7 +7359,7 @@ AL_EXPORT const char* ib_error_str[]; */ AL_INLINE const char* AL_API ib_get_err_str( - IN ib_api_status_t status ) + IN ib_api_status_t status ) { if( status > IB_UNKNOWN_ERROR ) status = IB_UNKNOWN_ERROR; @@ -7328,7 +7378,6 @@ ib_get_err_str( * SEE ALSO *********/ - /****d* Verbs/ib_async_event_t * NAME * ib_async_event_t -- Async event types @@ -7497,7 +7546,7 @@ AL_EXPORT const char* ib_async_event_str[]; */ AL_INLINE const char* AL_API ib_get_async_event_str( - IN ib_async_event_t event ) + IN ib_async_event_t event ) { if( event > IB_AE_UNKNOWN ) event = IB_AE_UNKNOWN; @@ -7516,7 +7565,6 @@ ib_get_async_event_str( * SEE ALSO *********/ - /****s* Verbs/ib_event_rec_t * NAME * ib_event_rec_t -- Async event notification record @@ -7537,7 +7585,7 @@ ib_get_async_event_str( typedef struct _ib_event_rec { void *context; - ib_async_event_t type; + ib_async_event_t type; /* HCA vendor specific event information. */ uint64_t vendor_specific; @@ -7575,14 +7623,13 @@ typedef struct _ib_event_rec } info; - ib_net64_t sysimg_guid; + ib_net64_t sysimg_guid; } trap; } ib_event_rec_t; /*******/ - /****d* Access Layer/ib_atomic_t * NAME * ib_atomic_t @@ -7612,7 +7659,6 @@ typedef enum _ib_atomic_t * in the system. *****/ - /****s* Access Layer/ib_port_cap_t * NAME * ib_port_cap_t @@ -7650,7 +7696,6 @@ typedef struct _ib_port_cap } ib_port_cap_t; /*****/ - /****d* Access Layer/ib_init_type_t * NAME * ib_init_type_t @@ -7665,13 +7710,12 @@ typedef struct _ib_port_cap * SYNOPSIS */ typedef uint8_t ib_init_type_t; -#define IB_INIT_TYPE_NO_LOAD 0x01 +#define IB_INIT_TYPE_NO_LOAD 0x01 #define IB_INIT_TYPE_PRESERVE_CONTENT 0x02 #define IB_INIT_TYPE_PRESERVE_PRESENCE 0x04 #define IB_INIT_TYPE_DO_NOT_RESUSCITATE 0x08 /*****/ - /****s* Access Layer/ib_port_attr_mod_t * NAME * ib_port_attr_mod_t @@ -7683,11 +7727,11 @@ typedef uint8_t ib_init_type_t; */ typedef struct _ib_port_attr_mod { - ib_port_cap_t cap; + ib_port_cap_t cap; uint16_t pkey_ctr; uint16_t qkey_ctr; - ib_init_type_t init_type; + ib_init_type_t init_type; ib_net64_t system_image_guid; } ib_port_attr_mod_t; @@ -7696,7 +7740,6 @@ typedef struct _ib_port_attr_mod * ib_port_cap_t *****/ - /****s* Access Layer/ib_port_attr_t * NAME * ib_port_attr_t @@ -7727,7 +7770,7 @@ typedef struct _ib_port_attr uint8_t sm_sl; uint8_t link_state; - ib_init_type_t init_type_reply; /* Optional */ + ib_init_type_t init_type_reply; /* Optional */ /* * subnet_timeout: @@ -7739,7 +7782,7 @@ typedef struct _ib_port_attr */ uint8_t subnet_timeout; - ib_port_cap_t cap; + ib_port_cap_t cap; uint16_t pkey_ctr; uint16_t qkey_ctr; @@ -7758,7 +7801,6 @@ typedef struct _ib_port_attr * uint8_t, ib_port_cap_t, ib_link_states_t *****/ - /****s* Access Layer/ib_ca_attr_t * NAME * ib_ca_attr_t @@ -7850,7 +7892,7 @@ typedef struct _ib_ca_attr uint8_t num_ports; uint32_t *p_page_size; - ib_port_attr_t *p_port_attr; + ib_port_attr_t *p_port_attr; } ib_ca_attr_t; /* @@ -7867,7 +7909,7 @@ typedef struct _ib_ca_attr * revision * Revision ID of this adapter * -* Fw_ver +* fw_ver * Device Firmware version. * * size @@ -8035,8 +8077,8 @@ typedef struct _ib_ca_attr */ ib_ca_attr_t* ib_copy_ca_attr( - IN ib_ca_attr_t* const p_dest, - IN const ib_ca_attr_t* const p_src ); + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ); /* * PARAMETERS * p_dest @@ -8056,7 +8098,6 @@ ib_copy_ca_attr( * ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr *****/ - /****s* Access Layer/ib_av_attr_t * NAME * ib_av_attr_t @@ -8093,7 +8134,6 @@ typedef struct _ib_av_attr * ib_gid_t *****/ - /****d* Access Layer/ib_qp_type_t * NAME * ib_qp_type_t @@ -8105,17 +8145,17 @@ typedef struct _ib_av_attr */ typedef enum _ib_qp_type { - IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */ - IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */ - IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */ + IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */ + IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */ IB_QPT_UNRELIABLE_DGRM, IB_QPT_QP0, IB_QPT_QP1, IB_QPT_RAW_IPV6, IB_QPT_RAW_ETHER, - IB_QPT_MAD, /* InfiniBand Access Layer */ - IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ - IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */ + IB_QPT_MAD, /* InfiniBand Access Layer */ + IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */ } ib_qp_type_t; /* @@ -8160,7 +8200,6 @@ typedef enum _ib_qp_type * protection domain. *****/ - /****d* Access Layer/ib_access_t * NAME * ib_access_t @@ -8172,11 +8211,11 @@ typedef enum _ib_qp_type * SYNOPSIS */ typedef uint32_t ib_access_t; -#define IB_AC_RDMA_READ 0x00000001 -#define IB_AC_RDMA_WRITE 0x00000002 -#define IB_AC_ATOMIC 0x00000004 -#define IB_AC_LOCAL_WRITE 0x00000008 -#define IB_AC_MW_BIND 0x00000010 +#define IB_AC_RDMA_READ 0x00000001 +#define IB_AC_RDMA_WRITE 0x00000002 +#define IB_AC_ATOMIC 0x00000004 +#define IB_AC_LOCAL_WRITE 0x00000008 +#define IB_AC_MW_BIND 0x00000010 /* * NOTES * Users may combine access rights using a bit-wise or operation to specify @@ -8184,7 +8223,6 @@ typedef uint32_t ib_access_t; * RDMA read and write access. *****/ - /****d* Access Layer/ib_qp_state_t * NAME * ib_qp_state_t @@ -8197,19 +8235,18 @@ typedef uint32_t ib_access_t; * SYNOPSIS */ typedef uint32_t ib_qp_state_t; -#define IB_QPS_RESET 0x00000001 +#define IB_QPS_RESET 0x00000001 #define IB_QPS_INIT 0x00000002 #define IB_QPS_RTR 0x00000004 #define IB_QPS_RTS 0x00000008 #define IB_QPS_SQD 0x00000010 -#define IB_QPS_SQD_DRAINING 0x00000030 -#define IB_QPS_SQD_DRAINED 0x00000050 -#define IB_QPS_SQERR 0x00000080 -#define IB_QPS_ERROR 0x00000100 -#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */ +#define IB_QPS_SQD_DRAINING 0x00000030 +#define IB_QPS_SQD_DRAINED 0x00000050 +#define IB_QPS_SQERR 0x00000080 +#define IB_QPS_ERROR 0x00000100 +#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */ /*****/ - /****d* Access Layer/ib_apm_state_t * NAME * ib_apm_state_t @@ -8228,7 +8265,6 @@ typedef enum _ib_apm_state } ib_apm_state_t; /*****/ - /****s* Access Layer/ib_qp_create_t * NAME * ib_qp_create_t @@ -8244,15 +8280,15 @@ typedef struct _ib_qp_create ib_rdd_handle_t h_rdd; - uint32_t sq_depth; - uint32_t rq_depth; - uint32_t sq_sge; - uint32_t rq_sge; + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; ib_cq_handle_t h_sq_cq; ib_cq_handle_t h_rq_cq; - boolean_t sq_signaled; + boolean_t sq_signaled; } ib_qp_create_t; /* @@ -8311,7 +8347,6 @@ typedef struct _ib_qp_create * ib_qp_type_t, ib_qp_attr_t *****/ - /****s* Access Layer/ib_qp_attr_t * NAME * ib_qp_attr_t @@ -8323,8 +8358,8 @@ typedef struct _ib_qp_create */ typedef struct _ib_qp_attr { - ib_pd_handle_t h_pd; - ib_qp_type_t qp_type; + ib_pd_handle_t h_pd; + ib_qp_type_t qp_type; ib_access_t access_ctrl; uint16_t pkey_index; @@ -8335,13 +8370,13 @@ typedef struct _ib_qp_attr uint8_t init_depth; uint8_t resp_res; - ib_cq_handle_t h_sq_cq; - ib_cq_handle_t h_rq_cq; - ib_rdd_handle_t h_rdd; + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_rdd_handle_t h_rdd; boolean_t sq_signaled; - ib_qp_state_t state; + ib_qp_state_t state; ib_net32_t num; ib_net32_t dest_num; ib_net32_t qkey; @@ -8351,9 +8386,9 @@ typedef struct _ib_qp_attr uint8_t primary_port; uint8_t alternate_port; - ib_av_attr_t primary_av; - ib_av_attr_t alternate_av; - ib_apm_state_t apm_state; + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; } ib_qp_attr_t; /* @@ -8369,7 +8404,6 @@ typedef struct _ib_qp_attr * ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t *****/ - /****d* Access Layer/ib_qp_opts_t * NAME * ib_qp_opts_t @@ -8380,29 +8414,27 @@ typedef struct _ib_qp_attr * SYNOPSIS */ typedef uint32_t ib_qp_opts_t; -#define IB_MOD_QP_ALTERNATE_AV 0x00000001 +#define IB_MOD_QP_ALTERNATE_AV 0x00000001 #define IB_MOD_QP_PKEY 0x00000002 #define IB_MOD_QP_APM_STATE 0x00000004 -#define IB_MOD_QP_PRIMARY_AV 0x00000008 -#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010 +#define IB_MOD_QP_PRIMARY_AV 0x00000008 +#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010 #define IB_MOD_QP_RESP_RES 0x00000020 -#define IB_MOD_QP_INIT_DEPTH 0x00000040 -#define IB_MOD_QP_PRIMARY_PORT 0x00000080 -#define IB_MOD_QP_ACCESS_CTRL 0x00000100 +#define IB_MOD_QP_INIT_DEPTH 0x00000040 +#define IB_MOD_QP_PRIMARY_PORT 0x00000080 +#define IB_MOD_QP_ACCESS_CTRL 0x00000100 #define IB_MOD_QP_QKEY 0x00000200 #define IB_MOD_QP_SQ_DEPTH 0x00000400 #define IB_MOD_QP_RQ_DEPTH 0x00000800 -#define IB_MOD_QP_CURRENT_STATE 0x00001000 +#define IB_MOD_QP_CURRENT_STATE 0x00001000 #define IB_MOD_QP_RETRY_CNT 0x00002000 -#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000 -#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000 - +#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000 +#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000 /* * SEE ALSO * ib_qp_mod_t *****/ - /****s* Access Layer/ib_qp_mod_t * NAME * ib_qp_mod_t @@ -8425,68 +8457,68 @@ typedef struct _ib_qp_mod * Time, in milliseconds, that the QP needs to spend in * the time wait state before being reused. */ - uint32_t timewait; + uint32_t timewait; } reset; struct _qp_init { ib_qp_opts_t opts; - uint8_t primary_port; - ib_net32_t qkey; - uint16_t pkey_index; - ib_access_t access_ctrl; + uint8_t primary_port; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; } init; struct _qp_rtr { - ib_net32_t rq_psn; - ib_net32_t dest_qp; + ib_net32_t rq_psn; + ib_net32_t dest_qp; ib_av_attr_t primary_av; - uint8_t resp_res; + uint8_t resp_res; ib_qp_opts_t opts; ib_av_attr_t alternate_av; - ib_net32_t qkey; - uint16_t pkey_index; - ib_access_t access_ctrl; - uint32_t sq_depth; - uint32_t rq_depth; - uint8_t rnr_nak_timeout; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + uint8_t rnr_nak_timeout; } rtr; struct _qp_rts { - ib_net32_t sq_psn; - uint8_t retry_cnt; - uint8_t rnr_retry_cnt; - uint8_t rnr_nak_timeout; - uint8_t local_ack_timeout; - uint8_t init_depth; + ib_net32_t sq_psn; + uint8_t retry_cnt; + uint8_t rnr_retry_cnt; + uint8_t rnr_nak_timeout; + uint8_t local_ack_timeout; + uint8_t init_depth; ib_qp_opts_t opts; ib_qp_state_t current_state; - ib_net32_t qkey; - ib_access_t access_ctrl; - uint8_t resp_res; + ib_net32_t qkey; + ib_access_t access_ctrl; + uint8_t resp_res; ib_av_attr_t primary_av; ib_av_attr_t alternate_av; - uint32_t sq_depth; - uint32_t rq_depth; + uint32_t sq_depth; + uint32_t rq_depth; ib_apm_state_t apm_state; - uint8_t primary_port; - uint16_t pkey_index; + uint8_t primary_port; + uint16_t pkey_index; } rts; struct _qp_sqd { - boolean_t sqd_event; + boolean_t sqd_event; } sqd; @@ -8498,7 +8530,6 @@ typedef struct _ib_qp_mod * ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t *****/ - /****s* Access Layer/ib_eec_attr_t * NAME * ib_eec_attr_t @@ -8510,8 +8541,8 @@ typedef struct _ib_qp_mod */ typedef struct _ib_eec_attr { - ib_qp_state_t state; - ib_rdd_handle_t h_rdd; + ib_qp_state_t state; + ib_rdd_handle_t h_rdd; ib_net32_t local_eecn; ib_net32_t sq_psn; @@ -8522,9 +8553,9 @@ typedef struct _ib_eec_attr ib_net32_t remote_eecn; uint32_t init_depth; uint32_t dest_num; // ??? What is this? - ib_av_attr_t primary_av; - ib_av_attr_t alternate_av; - ib_apm_state_t apm_state; + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; } ib_eec_attr_t; /* @@ -8532,7 +8563,6 @@ typedef struct _ib_eec_attr * ib_qp_state_t, ib_av_attr_t, ib_apm_state_t *****/ - /****d* Access Layer/ib_eec_opts_t * NAME * ib_eec_opts_t @@ -8543,14 +8573,14 @@ typedef struct _ib_eec_attr * SYNOPSIS */ typedef uint32_t ib_eec_opts_t; -#define IB_MOD_EEC_ALTERNATE_AV 0x00000001 -#define IB_MOD_EEC_PKEY 0x00000002 -#define IB_MOD_EEC_APM_STATE 0x00000004 -#define IB_MOD_EEC_PRIMARY_AV 0x00000008 -#define IB_MOD_EEC_RNR 0x00000010 -#define IB_MOD_EEC_RESP_RES 0x00000020 -#define IB_MOD_EEC_OUTSTANDING 0x00000040 -#define IB_MOD_EEC_PRIMARY_PORT 0x00000080 +#define IB_MOD_EEC_ALTERNATE_AV 0x00000001 +#define IB_MOD_EEC_PKEY 0x00000002 +#define IB_MOD_EEC_APM_STATE 0x00000004 +#define IB_MOD_EEC_PRIMARY_AV 0x00000008 +#define IB_MOD_EEC_RNR 0x00000010 +#define IB_MOD_EEC_RESP_RES 0x00000020 +#define IB_MOD_EEC_OUTSTANDING 0x00000040 +#define IB_MOD_EEC_PRIMARY_PORT 0x00000080 /* * NOTES * @@ -8569,7 +8599,7 @@ typedef uint32_t ib_eec_opts_t; */ typedef struct _ib_eec_mod { - ib_qp_state_t req_state; + ib_qp_state_t req_state; union _eec_state { @@ -8584,11 +8614,11 @@ typedef struct _ib_eec_mod { ib_net32_t rq_psn; ib_net32_t remote_eecn; - ib_av_attr_t primary_av; + ib_av_attr_t primary_av; uint8_t resp_res; - ib_eec_opts_t opts; - ib_av_attr_t alternate_av; + ib_eec_opts_t opts; + ib_av_attr_t alternate_av; uint16_t pkey_index; } rtr; @@ -8601,11 +8631,11 @@ typedef struct _ib_eec_mod uint8_t local_ack_timeout; uint8_t init_depth; - ib_eec_opts_t opts; - ib_av_attr_t alternate_av; - ib_apm_state_t apm_state; + ib_eec_opts_t opts; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; - ib_av_attr_t primary_av; + ib_av_attr_t primary_av; uint16_t pkey_index; uint8_t primary_port; @@ -8625,7 +8655,6 @@ typedef struct _ib_eec_mod * ib_qp_state_t, ib_av_attr_t, ib_apm_state_t *****/ - /****d* Access Layer/ib_wr_type_t * NAME * ib_wr_type_t @@ -8646,7 +8675,6 @@ typedef enum _ib_wr_type_t } ib_wr_type_t; /*****/ - /****s* Access Layer/ib_local_ds_t * NAME * ib_local_ds_t @@ -8667,7 +8695,6 @@ typedef struct _ib_local_ds } ib_local_ds_t; /*****/ - /****d* Access Layer/ib_send_opt_t * NAME * ib_send_opt_t @@ -8678,16 +8705,14 @@ typedef struct _ib_local_ds * * SYNOPSIS */ -typedef uint32_t ib_send_opt_t; +typedef uint32_t ib_send_opt_t; #define IB_SEND_OPT_IMMEDIATE 0x00000001 -#define IB_SEND_OPT_FENCE 0x00000002 +#define IB_SEND_OPT_FENCE 0x00000002 #define IB_SEND_OPT_SIGNALED 0x00000004 #define IB_SEND_OPT_SOLICITED 0x00000008 -#define IB_SEND_OPT_INLINE 0x00000010 -#define IB_SEND_OPT_LOCAL 0x00000020 +#define IB_SEND_OPT_INLINE 0x00000010 +#define IB_SEND_OPT_LOCAL 0x00000020 #define IB_SEND_OPT_VEND_MASK 0xFFFF0000 - - /* * VALUES * The following flags determine the behavior of a work request when @@ -8730,7 +8755,6 @@ typedef uint32_t ib_send_opt_t; * *****/ - /****s* Access Layer/ib_send_wr_t * NAME * ib_send_wr_t @@ -8743,12 +8767,12 @@ typedef uint32_t ib_send_opt_t; */ typedef struct _ib_send_wr { - struct _ib_send_wr *p_next; + struct _ib_send_wr *p_next; uint64_t wr_id; - ib_wr_type_t wr_type; - ib_send_opt_t send_opt; + ib_wr_type_t wr_type; + ib_send_opt_t send_opt; uint32_t num_ds; - ib_local_ds_t *ds_array; + ib_local_ds_t *ds_array; ib_net32_t immediate_data; union _send_dgrm @@ -8757,7 +8781,7 @@ typedef struct _ib_send_wr { ib_net32_t remote_qp; ib_net32_t remote_qkey; - ib_av_handle_t h_av; + ib_av_handle_t h_av; } ud; @@ -8905,7 +8929,6 @@ typedef struct _ib_send_wr * ib_wr_type_t, ib_local_ds_t, ib_send_opt_t *****/ - /****s* Access Layer/ib_recv_wr_t * NAME * ib_recv_wr_t @@ -8919,10 +8942,9 @@ typedef struct _ib_send_wr typedef struct _ib_recv_wr { struct _ib_recv_wr *p_next; - uint64_t wr_id; - uint32_t num_ds; + uint64_t wr_id; + uint32_t num_ds; ib_local_ds_t *ds_array; - } ib_recv_wr_t; /* * FIELDS @@ -8946,7 +8968,6 @@ typedef struct _ib_recv_wr * ib_local_ds_t *****/ - /****s* Access Layer/ib_bind_wr_t * NAME * ib_bind_wr_t @@ -8960,13 +8981,13 @@ typedef struct _ib_recv_wr typedef struct _ib_bind_wr { uint64_t wr_id; - ib_send_opt_t send_opt; + ib_send_opt_t send_opt; - ib_mr_handle_t h_mr; + ib_mr_handle_t h_mr; ib_access_t access_ctrl; uint32_t current_rkey; - ib_local_ds_t local_ds; + ib_local_ds_t local_ds; } ib_bind_wr_t; /* @@ -8994,7 +9015,6 @@ typedef struct _ib_bind_wr * ib_send_opt_t, ib_access_t, ib_local_ds_t *****/ - /****d* Access Layer/ib_wc_status_t * NAME * ib_wc_status_t @@ -9023,9 +9043,9 @@ typedef enum _ib_wc_status_t IB_WCS_REM_INVALID_RD_REQ_ERR, IB_WCS_INVALID_EECN, IB_WCS_INVALID_EEC_STATE, - IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */ - IB_WCS_CANCELED, /* InfiniBand Access Layer */ - IB_WCS_UNKNOWN /* Must be last. */ + IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */ + IB_WCS_CANCELED, /* InfiniBand Access Layer */ + IB_WCS_UNKNOWN /* Must be last. */ } ib_wc_status_t; /* @@ -9110,10 +9130,8 @@ typedef enum _ib_wc_status_t * The completed work request was canceled by the user. *****/ - AL_EXPORT const char* ib_wc_status_str[]; - /****f* IBA Base: Types/ib_get_wc_status_str * NAME * ib_get_wc_status_str @@ -9125,7 +9143,7 @@ AL_EXPORT const char* ib_wc_status_str[]; */ AL_INLINE const char* AL_API ib_get_wc_status_str( - IN ib_wc_status_t wc_status ) + IN ib_wc_status_t wc_status ) { if( wc_status > IB_WCS_UNKNOWN ) wc_status = IB_WCS_UNKNOWN; @@ -9144,7 +9162,6 @@ ib_get_wc_status_str( * SEE ALSO *********/ - /****d* Access Layer/ib_wc_type_t * NAME * ib_wc_type_t @@ -9168,7 +9185,6 @@ typedef enum _ib_wc_type_t } ib_wc_type_t; /*****/ - /****d* Access Layer/ib_recv_opt_t * NAME * ib_recv_opt_t @@ -9178,9 +9194,9 @@ typedef enum _ib_wc_type_t * * SYNOPSIS */ -typedef uint32_t ib_recv_opt_t; +typedef uint32_t ib_recv_opt_t; #define IB_RECV_OPT_IMMEDIATE 0x00000001 -#define IB_RECV_OPT_FORWARD 0x00000002 +#define IB_RECV_OPT_FORWARD 0x00000002 #define IB_RECV_OPT_GRH_VALID 0x00000004 #define IB_RECV_OPT_VEND_MASK 0xFFFF0000 /* @@ -9202,7 +9218,6 @@ typedef uint32_t ib_recv_opt_t; * but may have specific meaning to the underlying VPD. *****/ - /****s* Access Layer/ib_wc_t * NAME * ib_wc_t @@ -9215,58 +9230,58 @@ typedef uint32_t ib_recv_opt_t; typedef struct _ib_wc { struct _ib_wc *p_next; - uint64_t wr_id; + uint64_t wr_id; ib_wc_type_t wc_type; - uint32_t length; + uint32_t length; ib_wc_status_t status; - uint64_t vendor_specific; + uint64_t vendor_specific; union _wc_recv { struct _wc_conn { ib_recv_opt_t recv_opt; - ib_net32_t immediate_data; + ib_net32_t immediate_data; } conn; struct _wc_ud { ib_recv_opt_t recv_opt; - ib_net32_t immediate_data; - ib_net32_t remote_qp; - uint16_t pkey_index; - ib_net16_t remote_lid; - uint8_t remote_sl; - uint8_t path_bits; + ib_net32_t immediate_data; + ib_net32_t remote_qp; + uint16_t pkey_index; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; } ud; struct _wc_rd { - ib_net32_t remote_eecn; - ib_net32_t remote_qp; - ib_net16_t remote_lid; - uint8_t remote_sl; - uint32_t free_cnt; + ib_net32_t remote_eecn; + ib_net32_t remote_qp; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint32_t free_cnt; } rd; struct _wc_raw_ipv6 { - ib_net16_t remote_lid; - uint8_t remote_sl; - uint8_t path_bits; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; } raw_ipv6; struct _wc_raw_ether { - ib_net16_t remote_lid; - uint8_t remote_sl; - uint8_t path_bits; - ib_net16_t ether_type; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + ib_net16_t ether_type; } raw_ether; @@ -9382,7 +9397,6 @@ typedef struct _ib_wc * ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t *****/ - /****s* Access Layer/ib_mr_create_t * NAME * ib_mr_create_t @@ -9397,7 +9411,6 @@ typedef struct _ib_mr_create void *vaddr; uint64_t length; ib_access_t access_ctrl; - } ib_mr_create_t; /* * FIELDS @@ -9414,7 +9427,6 @@ typedef struct _ib_mr_create * ib_access_t *****/ - /****s* Access Layer/ib_phys_create_t * NAME * ib_phys_create_t @@ -9432,7 +9444,6 @@ typedef struct _ib_phys_create uint32_t buf_offset; uint32_t page_size; ib_access_t access_ctrl; - } ib_phys_create_t; /* * length @@ -9459,7 +9470,6 @@ typedef struct _ib_phys_create * ib_access_t *****/ - /****s* Access Layer/ib_mr_attr_t * NAME * ib_mr_attr_t @@ -9472,14 +9482,13 @@ typedef struct _ib_phys_create typedef struct _ib_mr_attr { ib_pd_handle_t h_pd; - void *local_lb; - void *local_ub; - void *remote_lb; - void *remote_ub; - ib_access_t access_ctrl; - uint32_t lkey; - uint32_t rkey; - + void *local_lb; + void *local_ub; + void *remote_lb; + void *remote_ub; + ib_access_t access_ctrl; + uint32_t lkey; + uint32_t rkey; } ib_mr_attr_t; /* * DESCRIPTION @@ -9519,7 +9528,6 @@ typedef struct _ib_mr_attr * ib_access_t *****/ - /****d* Access Layer/ib_ca_mod_t * NAME * ib_ca_mod_t -- Modify port attributes and error counters @@ -9529,18 +9537,18 @@ typedef struct _ib_mr_attr * * SYNOPSIS */ -typedef uint32_t ib_ca_mod_t; -#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001 -#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002 +typedef uint32_t ib_ca_mod_t; +#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001 +#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002 #define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED 0x00000004 -#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008 -#define IB_CA_MOD_IS_SM 0x00000010 -#define IB_CA_MOD_IS_SM_DISABLED 0x00000020 -#define IB_CA_MOD_QKEY_CTR 0x00000040 -#define IB_CA_MOD_PKEY_CTR 0x00000080 +#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008 +#define IB_CA_MOD_IS_SM 0x00000010 +#define IB_CA_MOD_IS_SM_DISABLED 0x00000020 +#define IB_CA_MOD_QKEY_CTR 0x00000040 +#define IB_CA_MOD_PKEY_CTR 0x00000080 #define IB_CA_MOD_IS_NOTICE_SUPPORTED 0x00000100 -#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200 -#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400 +#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200 +#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400 #define IB_CA_MOD_IS_SLMAP_SUPPORTED 0x00000800 #define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED 0x00001000 #define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED 0x00002000 @@ -9550,9 +9558,9 @@ typedef uint32_t ib_ca_mod_t; #define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED 0x00020000 #define IB_CA_MOD_IS_REINIT_SUPORTED 0x00040000 #define IB_CA_MOD_IS_LEDINFO_SUPPORTED 0x00080000 -#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000 -#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000 -#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000 +#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000 +#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000 +#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000 /* * VALUES * IB_CA_MOD_IS_CM_SUPPORTED @@ -9636,7 +9644,6 @@ typedef uint32_t ib_ca_mod_t; * Used to modify the system image GUID for the port. *****/ - /****d* Access Layer/ib_mr_mod_t * NAME * ib_mr_mod_t @@ -9747,11 +9754,11 @@ typedef uint32_t ib_mr_mod_t; */ typedef struct _ib_ci_op { - IN uint32_t command; - IN OUT void* p_buf OPTIONAL; - IN uint32_t buf_size; - IN OUT uint32_t num_bytes_ret; - IN OUT int32_t status; + IN uint32_t command; + IN OUT void* p_buf OPTIONAL; + IN uint32_t buf_size; + IN OUT uint32_t num_bytes_ret; + IN OUT int32_t status; } ib_ci_op_t; /* diff --git a/trunk/ulp/opensm/user/include/iba/ib_types_extended.h b/trunk/ulp/opensm/user/include/iba/ib_types_extended.h index c0f76881..46e446f6 100644 --- a/trunk/ulp/opensm/user/include/iba/ib_types_extended.h +++ b/trunk/ulp/opensm/user/include/iba/ib_types_extended.h @@ -128,6 +128,7 @@ typedef enum _ib_api_status_t IB_INVALID_MAX_WRS, IB_INVALID_MAX_SGE, IB_INVALID_CQ_SIZE, + IB_INVALID_SRQ_SIZE, IB_INVALID_SERVICE_TYPE, IB_INVALID_GID, IB_INVALID_LID, @@ -136,8 +137,10 @@ typedef enum _ib_api_status_t IB_INVALID_AV_HANDLE, IB_INVALID_CQ_HANDLE, IB_INVALID_QP_HANDLE, + IB_INVALID_SRQ_HANDLE, IB_INVALID_PD_HANDLE, IB_INVALID_MR_HANDLE, + IB_INVALID_FMR_HANDLE, IB_INVALID_MW_HANDLE, IB_INVALID_MCAST_HANDLE, IB_INVALID_CALLBACK, @@ -148,6 +151,7 @@ typedef enum _ib_api_status_t IB_VERBS_PROCESSING_DONE, /* See Notes above */ IB_INVALID_WR_TYPE, IB_QP_IN_TIMEWAIT, + IB_EE_IN_TIMEWAIT, IB_INVALID_PORT, IB_NOT_DONE, IB_INVALID_INDEX, @@ -221,6 +225,9 @@ typedef enum _ib_async_event_t IB_AE_WQ_ACCESS_ERROR, IB_AE_PORT_ACTIVE, IB_AE_PORT_DOWN, + IB_AE_CLIENT_REREGISTER, + IB_AE_SRQ_LIMIT_REACHED, + IB_AE_SRQ_QP_LAST_WQE_REACHED, IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */ } ib_async_event_t; @@ -310,6 +317,18 @@ typedef enum _ib_async_event_t * The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED, * IB_LINK_DOWN. * +* IB_AE_CLIENT_REREGISTER +* The SM idicate to client to reregister its SA records. +* +* IB_AE_SRQ_CATAS_ERROR +* An error occurred while processing or accessing the SRQ that prevents +* dequeuing a WQE from the SRQ and reporting of receive completions. +* +* IB_AE_SRQ_QP_LAST_WQE_REACHED +* An event, issued for a QP, associated with a shared receive queue, when +* a CQE is generated for the last WQE, or +* the QP gets in the Error State and there are no more WQEs on the RQ. +* * IB_AE_UNKNOWN * An unknown error occurred which cannot be attributed to any * resource; behavior is indeterminate. @@ -647,6 +666,9 @@ typedef struct _ib_ca_attr uint32_t max_qps_per_mcast_grp; uint32_t max_fmr; uint32_t max_map_per_fmr; + uint32_t max_srq; + uint32_t max_srq_wrs; + uint32_t max_srq_sges; /* * local_ack_delay: @@ -664,6 +686,7 @@ typedef struct _ib_ca_attr boolean_t av_port_check; boolean_t change_primary_port; boolean_t modify_wr_depth; + boolean_t modify_srq_depth; boolean_t current_qp_state_support; boolean_t shutdown_port_capability; boolean_t init_type_support; @@ -751,6 +774,21 @@ typedef struct _ib_ca_attr * Maximum limit on number of responder resources for incomming RDMA * operations on QPs. * +* max_fmr +* Maximum number of Fast Memory Regions supported. +* +* max_map_per_fmr +* Maximum number of mappings, supported by a Fast Memory Region. +* +* max_srq +* Maximum number of Shared Receive Queues supported. +* +* max_srq_wrs +* Maximum number of work requests supported by this SRQ. +* +* max_srq_sges +* Maximum number of scatter gather elements supported per work request on SRQ. +* * max_resp_res * Maximum number of responder resources per HCA, with this HCA used as * the target. @@ -798,6 +836,10 @@ typedef struct _ib_ca_attr * Indicates ability to modify QP depth during a modify QP operation. * Check the verb specification for permitted states. * +* modify_srq_depth +* Indicates ability to modify SRQ depth during a modify SRQ operation. +* Check the verb specification for permitted states. +* * current_qp_state_support * Indicates ability of the HCA to support the current QP state modifier * during a modify QP operation. @@ -1072,6 +1114,52 @@ typedef enum _ib_apm_state } ib_apm_state_t; /*****/ +/****d* Access Layer/ib_srq_attr_mask_t +* NAME +* ib_srq_attr_mask_t +* +* DESCRIPTION +* Indicates valid fields in ib_srq_attr_t structure +* +* SYNOPSIS +*/ +typedef enum _ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +} ib_srq_attr_mask_t; +/*****/ + + +/****s* Access Layer/ib_srq_attr_t +* NAME +* ib_srq_attr_t +* +* DESCRIPTION +* Attributes used to initialize a shared queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +} ib_srq_attr_t; +/* +* FIELDS +* max_wr +* Specifies the max number of work request on SRQ. +* +* max_sge +* Specifies the max number of scatter/gather elements in one work request. +* +* srq_limit +* Specifies the low water mark for SRQ. +* +* SEE ALSO +* ib_qp_type_t, ib_srq_attr_mask_t +*****/ + + /****s* Access Layer/ib_qp_create_t * NAME @@ -1093,6 +1181,7 @@ typedef struct _ib_qp_create ib_cq_handle_t h_sq_cq; ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; boolean_t sq_signaled; @@ -1136,6 +1225,10 @@ typedef struct _ib_qp_create * work request completions. This handle must be NULL if the type is * IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. * +* h_srq +* A handle to an SRQ to get receive completions via. Must be coded NULL +* when QP is not associated with SRQ +* * sq_signaled * A flag that is used to indicate whether the queue pair will signal * an event upon completion of a send work request. If set to @@ -1175,6 +1268,7 @@ typedef struct _ib_qp_attr ib_cq_handle_t h_sq_cq; ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; boolean_t sq_signaled; diff --git a/trunk/ulp/srp/kernel/srp_connection.c b/trunk/ulp/srp/kernel/srp_connection.c index 6ab43b6b..e7705439 100644 --- a/trunk/ulp/srp/kernel/srp_connection.c +++ b/trunk/ulp/srp/kernel/srp_connection.c @@ -141,6 +141,7 @@ __srp_create_qp( p_ifc = &p_hca->p_hba->ifc; // Create QP + cl_memclr( &qp_create, sizeof(qp_create) ); qp_create.qp_type = IB_QPT_RELIABLE_CONN; qp_create.sq_depth = SRP_DEFAULT_SEND_Q_DEPTH; qp_create.rq_depth = SRP_DEFAULT_RECV_Q_DEPTH; diff --git a/trunk/ulp/wsd/user/ibsp_iblow.c b/trunk/ulp/wsd/user/ibsp_iblow.c index 3fa9454f..d094ecdc 100644 --- a/trunk/ulp/wsd/user/ibsp_iblow.c +++ b/trunk/ulp/wsd/user/ibsp_iblow.c @@ -992,6 +992,7 @@ ib_create_socket( } /* Queue pair */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); qp_create.qp_type = IB_QPT_RELIABLE_CONN; qp_create.sq_depth = QP_ATTRIB_SQ_DEPTH; qp_create.rq_depth = QP_ATTRIB_RQ_DEPTH; -- 2.41.0