p_mad_element->remote_sl = p_wc->recv.ud.remote_sl;\r
p_mad_element->pkey_index = p_wc->recv.ud.pkey_index;\r
p_mad_element->path_bits = p_wc->recv.ud.path_bits;\r
- p_mad_element->recv_opt = p_wc->recv.ud.recv_opt;\r
- p_mad_element->grh_valid = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;\r
+ p_mad_element->recv_opt = p_wc->recv.ud.recv_opt;\r
\r
+ p_mad_element->grh_valid = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;\r
+ \r
if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE )\r
p_mad_element->immediate_data = p_wc->recv.ud.immediate_data;\r
\r
}\r
}\r
\r
- /* See if we need to create the address vector for the user. */\r
- if( !p_mad_element->h_av &&\r
- !( p_mad_element->send_opt & IB_SEND_OPT_LOCAL ) )\r
- {\r
+ /* See if we need to create the address vector for the user. \r
+ We also create AV for local send to pass the slid and grh in case of trap generation*/\r
+ if( !p_mad_element->h_av){\r
+\r
status = __create_send_av( h_mad_svc, h_send );\r
if( status != IB_SUCCESS )\r
{\r
return IB_INVALID_PARAMETER;\r
}\r
\r
- status = verbs_local_mad( h_ca, port_num, p_mad_in, p_mad_out );\r
+ status = al_local_mad(h_ca, port_num, NULL,p_mad_in, p_mad_out);\r
\r
AL_EXIT( AL_DBG_MAD_SVC );\r
return status;\r
}\r
\r
+ib_api_status_t\r
+al_local_mad( \r
+ IN const ib_ca_handle_t h_ca,\r
+ IN const uint8_t port_num,\r
+ IN const ib_av_attr_t* p_src_av_attr,\r
+ IN const void* const p_mad_in,\r
+ IN void* p_mad_out )\r
+{\r
+ ib_api_status_t status;\r
+ void* p_mad_out_local = NULL;\r
+ AL_ENTER( AL_DBG_MAD_SVC );\r
\r
+ if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") );\r
+ return IB_INVALID_CA_HANDLE;\r
+ }\r
+ if( !p_mad_in )\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+ if( !p_mad_out )\r
+ {\r
+ p_mad_out_local = cl_zalloc(256);\r
+ if(!p_mad_out_local)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INSUFFICIENT_MEMORY\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+ }else\r
+ {\r
+ p_mad_out_local = p_mad_out;\r
+ }\r
+ \r
+ status = verbs_local_mad( h_ca, port_num, p_src_av_attr, p_mad_in, p_mad_out_local );\r
+ \r
+ if( !p_mad_out )\r
+ {\r
+ cl_free(p_mad_out_local);\r
+ }\r
+ \r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return status;\r
+ \r
+}\r
\r
ib_net32_t\r
al_get_user_tid(\r
OUT ib_mad_svc_handle_t* const ph_mad_svc );\r
\r
\r
+ib_api_status_t\r
+al_local_mad( \r
+ IN const ib_ca_handle_t h_ca,\r
+ IN const uint8_t port_num,\r
+ IN const ib_av_attr_t* p_av_attr,\r
+ IN const void* const p_mad_in,\r
+ IN void* p_mad_out );\r
\r
/*\r
* TID management\r
h_qp->obj.p_ci_ca->verbs.post_recv( h_qp->h_ci_qp,\\r
p_recv_wr, pp_recv_failure )\r
\r
-#define verbs_local_mad(h_ca, port_num, p_mad_in, p_mad_out) \\r
+#define verbs_local_mad(h_ca, port_num, p_src_av_attr, p_mad_in, p_mad_out) \\r
h_ca->obj.p_ci_ca->verbs.local_mad( h_ca->obj.p_ci_ca->h_ci_ca,\\r
- port_num, p_mad_in, p_mad_out)\r
+ port_num, p_src_av_attr, p_mad_in, p_mad_out)\r
\r
#define check_local_mad(h_qp) \\r
(h_qp->obj.p_ci_ca->verbs.local_mad)\r
verbs_local_mad(\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t* p_src_av_attr,\r
IN const void* const p_mad_in,\r
IN void* p_mad_out )\r
{\r
return ual_local_mad( h_ca, port_num, p_mad_in, p_mad_out );\r
+ UNUSED_PARAM( p_src_av_attr );\r
}\r
\r
#define check_local_mad(h_qp) \\r
/* Simulate a send/receive between local managers. */\r
cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
+ /* Construct the receive MAD element. */\r
+ p_mad_resp->status = IB_WCS_SUCCESS;\r
+ p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
+ p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;\r
+ if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
+ {\r
+ p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
+ p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
+ }\r
+\r
+ /*\r
+ * Hand the receive MAD element to the dispatcher before completing\r
+ * the send. This guarantees that the send request cannot time out.\r
+ */\r
+ status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
+\r
+ /* Forward the send work completion to the dispatcher. */\r
+ __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
+\r
}\r
\r
AL_EXIT( AL_DBG_SMI );\r
ib_mad_t* p_mad;\r
ib_smp_t* p_smp;\r
al_mad_send_t* p_mad_send;\r
- ib_mad_element_t* p_mad_response;\r
+ ib_mad_element_t* p_mad_response = NULL;\r
ib_mad_t* p_mad_response_buf;\r
ib_api_status_t status = IB_SUCCESS;\r
boolean_t smp_is_set;\r
\r
/* Get a MAD element from the pool for the response. */\r
p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
-//*** Commented code to work-around ib_local_mad() requiring a response MAD\r
-//*** as input. Remove comments once the ib_local_mad() implementation allows\r
-//*** for a NULL response MAD, when one is not expected.\r
-//*** Note that an attempt to route an invalid response MAD in this case\r
-//*** will fail harmlessly.\r
-//*** if( p_mad_send->p_send_mad->resp_expected )\r
-//*** {\r
+ if( p_mad_send->p_send_mad->resp_expected )\r
+ {\r
status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
if( status != IB_SUCCESS )\r
{\r
+ __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
+ IB_WCS_LOCAL_OP_ERR );\r
AL_EXIT( AL_DBG_SMI );\r
return status;\r
}\r
p_mad_response_buf = p_mad_response->p_mad_buf;\r
-//*** }\r
-//*** else\r
-//*** {\r
-//*** p_mad_response_buf = NULL;\r
-//*** }\r
+ }\r
+ else\r
+ {\r
+ p_mad_response_buf = NULL;\r
+ }\r
\r
/* Adjust directed route SMPs as required by IBA. */\r
if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
}\r
\r
/* Forward the locally addressed MAD to the CA interface. */\r
- status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
- p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
+ status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
+ p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
\r
/* Reset directed route SMPs as required by IBA. */\r
if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
}\r
\r
/* Check the completion status of this simulated send. */\r
- if( p_mad_response_buf )\r
+ if( p_mad_send->p_send_mad->resp_expected )\r
{\r
/*\r
* The SMI is uses PnP polling to refresh the base_lid and lmc.\r
( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
{\r
p_port_info =\r
- (ib_port_info_t*)( p_mad_response_buf + 1 );\r
+ (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
}\r
break;\r
\r
}\r
\r
if( p_port_info )\r
- {\r
+ { \r
p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
+ p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
+ p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
+\r
if (p_port_info->subnet_timeout & 0x80)\r
{\r
AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
}\r
}\r
}\r
- }\r
+ \r
\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
+ /* Construct the receive MAD element. */\r
+ p_mad_response->status = IB_WCS_SUCCESS;\r
+ p_mad_response->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
+ p_mad_response->remote_lid = p_spl_qp_svc->base_lid;\r
+ if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
+ {\r
+ p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
+ p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
+ }\r
\r
+ /*\r
+ * Hand the receive MAD element to the dispatcher before completing\r
+ * the send. This guarantees that the send request cannot time out.\r
+ */\r
+ status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
+ }\r
+ \r
+ __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
+\r
+ \r
+ \r
/* If the SMP was a Get, no need to trigger a PnP poll. */\r
if( status == IB_SUCCESS && !smp_is_set )\r
status = IB_NOT_DONE;\r
/* Create an address vector for the SM. */\r
cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
av_attr.port_num = p_spl_qp_svc->port_num;\r
- av_attr.sl = p_mad_element->remote_sl;\r
- av_attr.dlid = p_mad_element->remote_lid;\r
- if( p_mad_element->grh_valid )\r
- {\r
- cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
- av_attr.grh.src_gid = p_mad_element->p_grh->dest_gid;\r
- av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
- av_attr.grh_valid = TRUE;\r
- }\r
+ av_attr.sl = p_spl_qp_svc->sm_sl;\r
+ av_attr.dlid = p_spl_qp_svc->sm_lid;\r
+ av_attr.grh_valid = FALSE;\r
\r
- status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
+ status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
&av_attr, &p_mad_element->h_av );\r
\r
if( status != IB_SUCCESS )\r
ib_net16_t base_lid;\r
uint8_t lmc;\r
\r
+ ib_net16_t sm_lid;\r
+ uint8_t sm_sl;\r
+\r
al_mad_disp_handle_t h_mad_disp;\r
ib_cq_handle_t h_send_cq;\r
ib_cq_handle_t h_recv_cq;\r
mlnx_local_mad (\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t *p_av_src_attr, \r
IN const ib_mad_t *p_mad_in,\r
OUT ib_mad_t *p_mad_out );\r
\r
mlnx_local_mad (\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t *p_av_src_attr,\r
IN const ib_mad_t *p_mad_in,\r
- OUT ib_mad_t *p_mad_out )\r
+ OUT ib_mad_t *p_mad_out )\r
{\r
ib_api_status_t status;\r
\r
HH_hca_dev_t *hca_ul_info;\r
\r
CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
+ UNUSED_PARAM(*p_av_src_attr);\r
\r
if (port_num > 2) {\r
status = IB_INVALID_PARAMETER;\r
mlnx_local_mad (\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t *p_src_av_attr,\r
IN const ib_mad_t *p_mad_in,\r
OUT ib_mad_t *p_mad_out );\r
\r
mlnx_local_mad (\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t* p_av_attr,\r
IN const ib_mad_t *p_mad_in,\r
OUT ib_mad_t *p_mad_out )\r
{\r
struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
//TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?\r
int mad_flags = 0; \r
- struct _ib_wc *wc_p;\r
+ struct _ib_wc *wc_p = NULL;\r
//TODO: do we need use grh ?\r
- struct ib_grh *grh_p = NULL;\r
+ struct _ib_grh *grh_p = NULL;\r
\r
HCA_ENTER(HCA_DBG_MAD);\r
\r
goto err_port_num;\r
}\r
\r
- wc_p = NULL;\r
- \r
- // debug print\r
- {\r
- HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
- ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
- (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
- (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
- (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
- (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
- (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
+ if (p_av_attr){\r
+ wc_p = cl_zalloc(sizeof(struct _ib_wc));\r
+ if(!wc_p){\r
+ status = IB_INSUFFICIENT_MEMORY ;\r
+ goto err_wc_alloc;\r
+ }\r
+ //Copy part of the attributes need to fill the mad extended fields in mellanox devices\r
+ wc_p->recv.ud.remote_lid = p_av_attr->dlid;\r
+ wc_p->recv.ud.remote_sl = p_av_attr->sl;\r
+ wc_p->recv.ud.path_bits = p_av_attr->path_bits;\r
+ wc_p->recv.ud.recv_opt = p_av_attr->grh_valid?IB_RECV_OPT_GRH_VALID:0;\r
+\r
+ if(wc_p->recv.ud.recv_opt &IB_RECV_OPT_GRH_VALID){\r
+ grh_p = cl_zalloc(sizeof(struct _ib_grh));\r
+ if(!grh_p){\r
+ status = IB_INSUFFICIENT_MEMORY ;\r
+ goto err_grh_alloc;\r
+ }\r
+ cl_memcpy(grh_p, &p_av_attr->grh, sizeof(ib_grh_t));\r
+ }\r
+ \r
+\r
}\r
+\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
+ ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
+ (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
+ (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
+\r
\r
// process mad\r
if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) )\r
if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
p_mad_out->status |= IB_SMP_DIRECTION;\r
\r
+\r
err_process_mad:\r
+ if(grh_p)\r
+ cl_free(grh_p);\r
+err_grh_alloc:\r
+ if(wc_p)\r
+ cl_free(wc_p);\r
+err_wc_alloc:\r
err_port_num: \r
HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MAD,\r
("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
int process_mad_flags,
u8 port_num,
struct _ib_wc *in_wc,
- struct ib_grh *in_grh,
+ struct _ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
}
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct _ib_wc *in_wc, struct ib_grh *in_grh,
+ int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status)
{
struct mthca_mailbox *inmailbox, *outmailbox;
u32 in_modifier = port;
u8 op_modifier = 0;
- ASSERT( !in_wc );
- UNREFERENCED_PARAMETER( in_grh );
#define MAD_IFC_BOX_SIZE 0x400
#define MAD_IFC_MY_QPN_OFFSET 0x100
-#define MAD_IFC_RQPN_OFFSET 0x104
-#define MAD_IFC_SL_OFFSET 0x108
-#define MAD_IFC_G_PATH_OFFSET 0x109
-#define MAD_IFC_RLID_OFFSET 0x10a
-#define MAD_IFC_PKEY_OFFSET 0x10e
+#define MAD_IFC_RQPN_OFFSET 0x108
+#define MAD_IFC_SL_OFFSET 0x10c
+#define MAD_IFC_G_PATH_OFFSET 0x10d
+#define MAD_IFC_RLID_OFFSET 0x10e
+#define MAD_IFC_PKEY_OFFSET 0x112
#define MAD_IFC_GRH_OFFSET 0x140
inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
+ if (in_wc) {
+ u8 val;
+
+ memset(inbox + 256, 0, 256);
+
+
+ MTHCA_PUT(inbox, 0, MAD_IFC_MY_QPN_OFFSET);
+ MTHCA_PUT(inbox, cl_ntoh32(in_wc->recv.ud.remote_qp), MAD_IFC_RQPN_OFFSET);
+ val = in_wc->recv.ud.remote_sl << 4;
+ MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
+
+ val = in_wc->recv.ud.path_bits |
+ (in_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ? 0x80 : 0);
+ MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET)
+
+ MTHCA_PUT(inbox, cl_ntoh16(in_wc->recv.ud.remote_lid), MAD_IFC_RLID_OFFSET);
+ MTHCA_PUT(inbox, in_wc->recv.ud.pkey_index, MAD_IFC_PKEY_OFFSET);
+
+ if (in_grh)
+ memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
+
+ op_modifier |= 0x4;
+
+ in_modifier |= cl_ntoh16(in_wc->recv.ud.remote_lid) << 16;
+
+ }
+
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
u8 *status);
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct _ib_wc *in_wc, struct ib_grh *in_grh,
+ int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status);
int mthca_READ_MGM(struct mthca_dev *dev, int index,
struct mthca_mailbox *mailbox, u8 *status);
entry->recv.ud.recv_opt |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ?
IB_RECV_OPT_GRH_VALID : 0;
}
+ if (!is_send && cqe->rlid == 0){
+ HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("found rlid == 0 \n "));
+ entry->recv.ud.recv_opt |= IB_RECV_OPT_FORWARD;
+ }
if (is_error) {
handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
(struct mthca_err_cqe *) cqe, entry, &free_cqe);
int mad_flags,
u8 port_num,
struct _ib_wc *in_wc,
- struct ib_grh *in_grh,
+ struct _ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
int mad_flags,
u8 port_num,
struct _ib_wc *in_wc,
- struct ib_grh *in_grh,
+ struct _ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
* definition.\r
*/\r
#define VERBS_MAJOR_VER (0x0001)\r
-#define VERBS_MINOR_VER (0x0002)\r
+#define VERBS_MINOR_VER (0x0003)\r
\r
#define VERBS_VERSION (((VERBS_MAJOR_VER) << 16) | (VERBS_MINOR_VER))\r
#define MK_VERBS_VERSION(maj,min) ((((maj) & 0xFFFF) << 16) | \\r
(*ci_local_mad) (\r
IN const ib_ca_handle_t h_ca,\r
IN const uint8_t port_num,\r
+ IN const ib_av_attr_t *p_src_av_attr,\r
IN const ib_mad_t *p_mad_in,\r
- OUT ib_mad_t *p_mad_out );\r
+ OUT ib_mad_t *p_mad_out );\r
/*\r
* DESCRIPTION\r
* This routine is OPTIONAL for the channel interface. This is required\r
* SEE ALSO\r
*********/\r
\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_sm_sl\r
+* NAME\r
+* ib_port_info_get_sm_sl\r
+*\r
+* DESCRIPTION\r
+* Returns the encoded value for the SM sl at this port.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_sm_sl(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->mtu_smsl & 0x0F) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the encoded value for the neighbor MTU at this port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+/****f* IBA Base: Types/ib_port_info_set_sm_sl\r
+* NAME\r
+* ib_port_info_set_sm_sl\r
+*\r
+* DESCRIPTION\r
+* Sets the SM sl value in the PortInfo attribute.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_port_info_set_sm_sl(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t sm_sl )\r
+{\r
+ CL_ASSERT( sm_sl<= 5 );\r
+ CL_ASSERT( sm_sl != 0 );\r
+ p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | sm_sl );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* mtu\r
+* [in] Encoded SM sl value to set\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
/****f* IBA Base: Types/ib_port_info_set_timeout\r
* NAME\r
* ib_port_info_set_timeout\r
* 1. Register physical memory region with HCA (ci_register_pmr)\r
* 2. Modify physical memory region with HCA (ci_modify_pmr)\r
* 3. Create Special QP (ci_create_spl_qp)\r
-* 4. Local Mad (ci_local_mad)\r
*\r
* For all these functions, the vendor does NOT provide support\r
* and UAL will return IB_UNSUPPORTED to the caller of Access Layer.\r