\r
#include "al.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_common.h"\r
#include "al_debug.h"\r
#include "al_mad_pool.h"\r
}\r
\r
cl_spinlock_release( &p_obj->lock );\r
+\r
+ /* Cleanup any left-over connections. */\r
+ al_cep_cleanup_al( h_al );\r
}\r
\r
\r
}\r
\r
\r
-\r
-static void\r
-__free_conns(\r
- IN const ib_al_handle_t h_al )\r
-{\r
- cl_list_item_t *p_list_item;\r
- ib_cm_handle_t h_conn;\r
-\r
- /*\r
- * Report any outstanding connections left lying around. We should\r
- * never enter the loop below if the code is written correctly.\r
- */\r
- for( p_list_item = cl_qlist_head( &h_al->conn_list );\r
- p_list_item != cl_qlist_end( &h_al->conn_list );\r
- p_list_item = cl_qlist_head( &h_al->conn_list ) )\r
- {\r
- CL_ASSERT( !p_list_item );\r
-\r
- h_conn = PARENT_STRUCT( p_list_item, al_conn_t, al_item );\r
-\r
- /* Release the connection object, so the CM can clean-up properly. */\r
- cm_cleanup_conn( h_conn );\r
- }\r
-}\r
-\r
-\r
-\r
void\r
free_al(\r
IN al_obj_t *p_obj )\r
/* Free any MADs not returned by the user. */\r
__free_mads( h_al );\r
\r
- /* Cleanup any left-over connections. */\r
- __free_conns( h_al );\r
-\r
#ifdef CL_KERNEL\r
cl_vector_destroy( &h_al->hdl_vector );\r
#endif\r
}\r
\r
\r
-\r
-\r
ib_api_status_t\r
ib_query_ca_by_guid(\r
IN const ib_al_handle_t h_al,\r
\r
\r
\r
-void\r
-al_insert_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- ref_al_obj( &h_al->obj );\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- h_conn->h_al = h_al;\r
- cl_qlist_insert_tail( &h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = al_hdl_insert( h_al, h_conn, AL_OBJ_TYPE_H_CONN );\r
-#endif\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
-}\r
-\r
-\r
-\r
-void\r
-al_remove_conn(\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- cl_spinlock_acquire( &h_conn->h_al->obj.lock );\r
- cl_qlist_remove_item( &h_conn->h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- al_hdl_free( h_conn->h_al, h_conn->hdl );\r
-#endif\r
- cl_spinlock_release( &h_conn->h_al->obj.lock );\r
-\r
- deref_al_obj( &h_conn->h_al->obj );\r
-\r
- h_conn->h_al = NULL;\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = AL_INVALID_HANDLE;\r
-#endif\r
-}\r
-\r
-\r
-\r
void\r
al_insert_mad(\r
IN const ib_al_handle_t h_al,\r
\r
cl_qlist_t key_list;\r
cl_qlist_t query_list;\r
- cl_qlist_t conn_list;\r
+ cl_qlist_t cep_list;\r
\r
#ifdef CL_KERNEL\r
/* Handle manager is only needed in the kernel. */\r
#include "al_mgr.h"\r
#include "al_pnp.h"\r
#include "al_qp.h"\r
-\r
-#if defined(CL_KERNEL)\r
-#include "al_cm.h"\r
-#endif\r
#include "ib_common.h"\r
\r
\r
case IB_AE_QP_COMM:\r
case IB_AE_QP_APM:\r
case IB_AE_QP_APM_ERROR:\r
-#if defined(CL_KERNEL)\r
- cm_async_event_cb( &p_event_item->event_rec );\r
-#endif\r
- /* Fall through next case. */\r
-\r
case IB_AE_QP_FATAL:\r
case IB_AE_RQ_ERROR:\r
case IB_AE_SQ_ERROR:\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#pragma once\r
+\r
+#ifndef _AL_CM_CEP_H_\r
+#define _AL_CM_CEP_H_\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al_common.h"\r
+\r
+\r
+#define CEP_EVENT_TIMEOUT 0x80000000\r
+#define CEP_EVENT_RECV 0x40000000\r
+#define CEP_EVENT_REQ 0x00000001\r
+#define CEP_EVENT_REP 0x00000002\r
+#define CEP_EVENT_RTU 0x00000004\r
+#define CEP_EVENT_DREQ 0x00000008\r
+#define CEP_EVENT_DREP 0x00000010\r
+#define CEP_EVENT_MRA 0x00000020\r
+#define CEP_EVENT_REJ 0x00000040\r
+#define CEP_EVENT_LAP 0x00000080\r
+#define CEP_EVENT_APR 0x00000100\r
+#define CEP_EVENT_SIDR 0x00800000\r
+\r
+\r
+#define AL_INVALID_CID 0xFFFFFFFF\r
+\r
+\r
+typedef void\r
+(*al_pfn_cep_cb_t)(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep );\r
+/* PARAMETERS\r
+* h_al\r
+* [in] Handle to the AL instance to pass into the al_cep_poll call.\r
+*\r
+* p_cep\r
+* [in] Pointer to an ib_cep_t structure containing the CID and context\r
+* for the CEP on which the event occured. The CID should be passed\r
+* into the al_cep_poll call.\r
+*\r
+* RETURN VALUES:\r
+* This function does not return a value.\r
+*\r
+* NOTES\r
+* The callback is invoked at DISPATCH_LEVEL.\r
+*\r
+* Recipients of the callback are expected to call al_cep_poll to retrieve\r
+* event specific details until al_cep_poll returns IB_NOT_DONE. This may\r
+* be done in a different thread context.\r
+*********/\r
+\r
+\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj );\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al );\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid );\r
+/*\r
+* NOTES\r
+* This function may be invoked at DISPATCH_LEVEL\r
+*\r
+* The pfn_cb parameter may be NULL in the kernel if using IRPs for\r
+* event notification.\r
+*********/\r
+\r
+\r
+/* Destruction is asynchronous. */\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb );\r
+/*\r
+* NOTES\r
+* Destruction is synchronous.\r
+* Clients must not invoke this function from a CEP callback, but should\r
+* instead return IB_CANCELLED or other appropriate value.\r
+*\r
+* The reason parameter is passed as input to KeWaitForSingleObject.\r
+* The user-mode proxy sets this to UserRequest. Kernel clients should set\r
+* this to Executive.\r
+*********/\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata OPTIONAL,\r
+ IN const uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_migrate(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_established(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad );\r
+\r
+\r
+#ifdef CL_KERNEL\r
+NTSTATUS\r
+al_cep_queue_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_irp );\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/****s* Access Layer/al_cep_sreq_t\r
+* NAME\r
+* al_cep_sreq_t\r
+*\r
+* DESCRIPTION\r
+* Connection request information used to establish a new connection.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_sreq\r
+{\r
+ ib_net64_t svc_id;\r
+\r
+ ib_path_rec_t* __ptr64 p_path;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ uint8_t pdata_len;\r
+\r
+ uint8_t max_cm_retries;\r
+ ib_net16_t pkey;\r
+ uint32_t timeout_ms;\r
+\r
+} al_cep_sreq_t;\r
+/*\r
+* FIELDS\r
+* svc_id\r
+* The ID of the remote service to which the SIDR request is\r
+* being made.\r
+*\r
+* p_path\r
+* Path information over which to send the request.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR request.\r
+*\r
+* pdata_len\r
+* Defines the size of the user-defined private data.\r
+*\r
+* max_cm_retries\r
+* The maximum number of times that either CM should\r
+* resend a SIDR message.\r
+*\r
+* timeout_ms\r
+* Timeout value in milli-seconds for the SIDR REQ to expire. The CM will\r
+* add twice packet lifetime to this value to determine the actual timeout\r
+* value used.\r
+*\r
+* pkey\r
+* pkey to be used as part of the request.\r
+*\r
+* SEE ALSO\r
+* al_cep_sreq\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_sreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_sreq_t* const p_sreq );\r
+\r
+\r
+/****s* Access Layer/al_cep_srep_t\r
+* NAME\r
+* al_cep_srep_t\r
+*\r
+* DESCRIPTION\r
+* SIDR reply information.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_srep\r
+{\r
+ net32_t qp_num;\r
+ net32_t qkey;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ const void* __ptr64 p_info;\r
+\r
+ uint8_t pdata_len;\r
+ uint8_t info_len;\r
+\r
+ ib_sidr_status_t status;\r
+\r
+} al_cep_srep_t;\r
+/*\r
+* FIELDS\r
+* qp_num\r
+* The number of the queue pair on which the requested service\r
+* is supported.\r
+*\r
+* qp_key\r
+* The QKEY of the returned queue pair.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR reply.\r
+*\r
+* p_info\r
+* Optional "additonal information" sent as part of the SIDR reply.\r
+*\r
+* pdata_len\r
+* Size of the user-defined private data.\r
+*\r
+* info_len\r
+* Size of the "additional information".\r
+*\r
+* status\r
+* sidr status value returned back to a previously received REQ.\r
+*\r
+* SEE ALSO\r
+* al_cep_srep\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_srep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_srep_t* const p_sreq );\r
+\r
+\r
+\r
+\r
+/*\r
+ * Return the local ACK timeout value based on the given packet lifetime\r
+ * and target ACK delay. Both input values are assumed to be in the form\r
+ * 4.096 x 2 ^ input.\r
+ */\r
+#define MAX_LOCAL_ACK_TIMEOUT 0x1F /* limited to 5 bits */\r
+\r
+inline uint8_t\r
+calc_lcl_ack_timeout(\r
+ IN const uint8_t round_trip_time,\r
+ IN const uint8_t target_ack_delay )\r
+{\r
+ uint64_t timeout;\r
+ uint8_t local_ack_timeout;\r
+\r
+ if( !target_ack_delay )\r
+ {\r
+ if( round_trip_time > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ else\r
+ return round_trip_time;\r
+ }\r
+\r
+ /*\r
+ * Since both input and the output values are in the same form, we\r
+ * can ignore the 4.096 portion by dividing it out.\r
+ */\r
+\r
+ /* The input parameter is the round trip time. */\r
+ timeout = (uint64_t)1 << round_trip_time;\r
+\r
+ /* Add in the target ack delay. */\r
+ if( target_ack_delay )\r
+ timeout += (uint64_t)1 << target_ack_delay;\r
+\r
+ /* Calculate the local ACK timeout. */\r
+ local_ack_timeout = 1;\r
+ while( (1ui64 << local_ack_timeout) <= timeout )\r
+ {\r
+ local_ack_timeout++;\r
+\r
+ /* Only 5-bits are valid. */\r
+ if( local_ack_timeout > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ }\r
+\r
+ return local_ack_timeout;\r
+}\r
+\r
+#endif /* _AL_CM_CEP_H_ */\r
uint8_t pdata[IB_REQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_req_t;\r
+C_ASSERT( sizeof(mad_cm_req_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_req->offset51 = (retries << 4);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_req_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
IN const uint8_t data_len,\r
\r
if( p_data )\r
{\r
+ if( data_len > IB_REQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_req->pdata, p_data, data_len );\r
- cl_memclr( p_req->pdata + data_len,\r
- IB_REQ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_req->pdata + data_len, IB_REQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_req->pdata, IB_REQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
IN OUT req_path_info_t* const p_path )\r
{\r
if( subn_lcl )\r
- p_path->offset42 = (p_path->offset42 & 0xF0);\r
- else\r
p_path->offset42 = ((p_path->offset42 & 0xF0) | 0x08);\r
+ else\r
+ p_path->offset42 = (p_path->offset42 & 0xF0);\r
}\r
\r
static inline uint8_t\r
uint8_t pdata[IB_MRA_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_mra_t;\r
+C_ASSERT( sizeof(mad_cm_mra_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_mra_t* const p_mra )\r
{\r
- if( p_data && data_len > IB_MRA_PDATA_SIZE )\r
- return IB_INVALID_SETTING;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_MRA_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_mra->pdata, p_data, data_len );\r
cl_memclr( p_mra->pdata + data_len, IB_MRA_PDATA_SIZE - data_len );\r
}\r
uint8_t pdata[IB_REJ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rej_t;\r
+C_ASSERT( sizeof(mad_cm_rej_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_rej_t* const p_rej )\r
{\r
- if( p_data && data_len > IB_REJ_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_REJ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_rej->pdata, p_data, data_len );\r
- cl_memclr( p_rej->pdata + data_len,\r
- IB_REJ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_rej->pdata + data_len, IB_REJ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
uint8_t pdata[IB_REP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rep_t;\r
+C_ASSERT( sizeof(mad_cm_rep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_rep->offset27 = (rnr_retry_cnt << 5);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rep_t* const p_rep )\r
{\r
CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
+\r
if( p_data )\r
{\r
- cl_memcpy( p_rep->pdata, p_data, rep_len );\r
- cl_memclr( p_rep->pdata + rep_len,\r
- IB_REP_PDATA_SIZE - rep_len );\r
+ if( data_len > IB_REP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rep->pdata, p_data, data_len );\r
+ cl_memclr( p_rep->pdata + data_len, IB_REP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rep->pdata, IB_REP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_RTU_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rtu_t;\r
+C_ASSERT( sizeof(mad_cm_rtu_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rtu_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rtu_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rtu_t* const p_rtu )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_rtu->pdata, p_data, rtu_len );\r
- cl_memclr( p_rtu->pdata + rtu_len, IB_RTU_PDATA_SIZE - rtu_len );\r
+ if( data_len > IB_RTU_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rtu->pdata, p_data, data_len );\r
+ cl_memclr( p_rtu->pdata + data_len, IB_RTU_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rtu->pdata, IB_RTU_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
/* DREQ */\r
uint8_t pdata[IB_DREQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_dreq_t;\r
+C_ASSERT( sizeof(mad_cm_dreq_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
__set_low24( &p_dreq->offset8, qpn );\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_dreq_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t dreq_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_dreq_t* const p_dreq )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_dreq->pdata, p_data, dreq_len );\r
- cl_memclr( p_dreq->pdata + dreq_len,\r
- IB_DREQ_PDATA_SIZE - dreq_len );\r
+ if( data_len > IB_DREQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_dreq->pdata, p_data, data_len );\r
+ cl_memclr( p_dreq->pdata + data_len, IB_DREQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_dreq->pdata, IB_DREQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_DREP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_drep_t;\r
+C_ASSERT( sizeof(mad_cm_drep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_drep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t drep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_drep_t* const p_drep )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_drep->pdata, p_data, drep_len );\r
- cl_memclr( p_drep->pdata + drep_len,\r
- IB_DREP_PDATA_SIZE - drep_len );\r
+ if( data_len > IB_DREP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_drep->pdata, p_data, data_len );\r
+ cl_memclr( p_drep->pdata + data_len, IB_DREP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_drep->pdata, IB_DREP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
\r
uint8_t pdata[IB_LAP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_lap_t;\r
+C_ASSERT( sizeof(mad_cm_lap_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_lap_t* const p_lap )\r
{\r
CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && data_len > IB_LAP_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
\r
cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE );\r
if( p_data )\r
{\r
+ if( data_len > IB_LAP_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_lap->pdata, p_data, data_len );\r
cl_memclr( p_lap->pdata + data_len,\r
IB_LAP_PDATA_SIZE - data_len );\r
uint8_t pdata[IB_APR_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_apr_t;\r
+C_ASSERT( sizeof(mad_cm_apr_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_apr_t* const p_apr )\r
{\r
CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && ( data_len > IB_APR_PDATA_SIZE ) )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_APR_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_apr->pdata, p_data, data_len );\r
cl_memclr( p_apr->pdata + data_len,\r
IB_APR_PDATA_SIZE - data_len );\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al.h"\r
+#include "al_qp.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_mgr.h"\r
+#include "al_debug.h"\r
+\r
+\r
+typedef struct _al_listen\r
+{\r
+ al_obj_t obj;\r
+ net32_t cid;\r
+\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ const void* __ptr64 sidr_context;\r
+\r
+} al_listen_t;\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+/*\r
+ * Structure for queuing received MADs to the asynchronous processing\r
+ * manager.\r
+ */\r
+typedef struct _cep_async_mad\r
+{\r
+ cl_async_proc_item_t item;\r
+ ib_al_handle_t h_al;\r
+ ib_cep_t cep;\r
+\r
+} cep_async_mad_t;\r
+\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the error state to flush all oustanding work\r
+ * requests and sets the timewait time. This function may be called\r
+ * when destroying the QP in order to flush all work requests, so we\r
+ * cannot call through the main API, or the call will fail since the\r
+ * QP is no longer in the initialize state.\r
+ */\r
+static void\r
+__cep_timewait_qp(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ uint64_t timewait = 0;\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * The CM should have set the proper timewait time-out value. Reset\r
+ * the QP and let it enter the timewait state.\r
+ */\r
+ if( al_cep_get_timewait( h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS )\r
+ {\r
+ /* Special checks on the QP state for error handling - see above. */\r
+ if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ ( (h_qp->obj.state != CL_INITIALIZED) && \r
+ (h_qp->obj.state != CL_DESTROYING) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_CM, ("IB_INVALID_QP_HANDLE\n") );\r
+ return;\r
+ }\r
+\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_ERROR;\r
+\r
+ /* Modify to error state using function pointers - see above. */\r
+ status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("pfn_modify_qp to IB_QPS_ERROR returned %s\n",\r
+ ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+#ifdef CL_KERNEL\r
+ /* Store the timestamp after which the QP exits timewait. */\r
+ h_qp->timewait = cl_get_time_stamp() + timewait;\r
+#endif /* CL_KERNEL */\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_path_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const req_path_info_t* const p_path,\r
+ OUT ib_path_rec_t* const p_path_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_path );\r
+ CL_ASSERT( p_path_rec );\r
+\r
+ /*\r
+ * Format a local path record. The local ack timeout specified in the\r
+ * REQ is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( p_path_rec,\r
+ &p_path->local_gid,\r
+ &p_path->remote_gid,\r
+ p_path->local_lid,\r
+ p_path->remote_lid,\r
+ 1, p_req->pkey,\r
+ conn_req_path_get_svc_lvl( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY, conn_req_get_mtu( p_req ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_req_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_req_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ p_path_rec->hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( p_path_rec, p_path->hop_limit,\r
+ conn_req_path_get_flow_lbl( p_path ), FALSE );\r
+ p_path_rec->tclass = p_path->traffic_class;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ OUT ib_cm_req_rec_t *p_req_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_req_rec );\r
+\r
+ cl_memclr( p_req_rec, sizeof(ib_cm_req_rec_t) );\r
+\r
+ /* format version specific data */\r
+ p_req_rec->p_req_pdata = p_req->pdata;\r
+\r
+ p_req_rec->qp_type = conn_req_get_qp_type( p_req );\r
+\r
+ p_req_rec->resp_res = conn_req_get_resp_res( p_req );\r
+ p_req_rec->flow_ctrl = conn_req_get_flow_ctrl( p_req );\r
+ p_req_rec->rnr_retry_cnt = conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ __format_req_path_rec( p_req, &p_req->primary_path,\r
+ &p_req_rec->primary_path );\r
+ __format_req_path_rec( p_req, &p_req->alternate_path,\r
+ &p_req_rec->alt_path );\r
+\r
+ /* These values are filled in later based on listen or peer connections\r
+ p_req_rec->context = ;\r
+ p_req_rec->h_cm_req = ;\r
+ p_req_rec->h_cm_listen = ;\r
+ */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle incoming REQs that matched to an outstanding listen.\r
+*\r
+*/\r
+\r
+\r
+static void\r
+__listen_req(\r
+ IN al_listen_t* const p_listen,\r
+ IN const ib_cep_t* const p_new_cep,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_listen );\r
+ CL_ASSERT( p_new_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update listen based rec */\r
+ req_rec.context = p_listen->obj.context;\r
+\r
+ req_rec.h_cm_req.cid = p_new_cep->cid;\r
+ req_rec.h_cm_req.h_al = p_listen->obj.h_al;\r
+ req_rec.h_cm_req.h_qp = p_new_cep->context;\r
+\r
+ req_rec.h_cm_listen = p_listen;\r
+\r
+ /* Invoke the user's callback. */\r
+ p_listen->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_listen(\r
+ IN al_listen_t* const p_listen,\r
+ IN ib_cep_t* const p_new_cep,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Context is a listen - MAD must be a REQ or SIDR REQ */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __listen_req(\r
+ p_listen, p_new_cep, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_SIDR_REQ_ATTR_ID:\r
+ /* TODO - implement SIDR. */\r
+ default:\r
+ CL_ASSERT( p_mad->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->attr_id == CM_SIDR_REQ_ATTR_ID );\r
+ /* Destroy the new CEP as it won't ever be reported to the user. */\r
+ al_destroy_cep( p_listen->obj.h_al, p_new_cep->cid, NULL );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle send timeouts:\r
+*\r
+*/\r
+\r
+/*\r
+ * callback to process a connection establishment timeout due to reply not\r
+ * being received. The connection object has a reference\r
+ * taken when the timer is set or when the send is sent.\r
+ */\r
+static void\r
+__proc_conn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * Format the reject record before aborting the connection since\r
+ * we need the QP context.\r
+ */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+ rej_rec.h_qp = h_qp;\r
+ rej_rec.qp_context = h_qp->obj.context;\r
+ rej_rec.rej_status = IB_REJ_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ /* Unbind the QP from the CEP. */\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Invoke the callback. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * callback to process a LAP timeout due to APR not being received.\r
+ */\r
+static void\r
+__proc_lap_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /* Report the timeout. */\r
+ cl_memclr( &apr_rec, sizeof(ib_cm_apr_rec_t) );\r
+ apr_rec.h_qp = h_qp;\r
+ apr_rec.qp_context = h_qp->obj.context;\r
+ apr_rec.cm_status = IB_TIMEOUT;\r
+ apr_rec.apr_status = IB_AP_REJECT;\r
+\r
+ /* Notify the user that the LAP failed. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Callback to process a disconnection timeout due to not receiving the DREP\r
+ * within allowable time.\r
+ */\r
+static void\r
+__proc_dconn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* No response. We're done. Deliver a DREP callback. */\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+ drep_rec.h_qp = h_qp;\r
+ drep_rec.qp_context = h_qp->obj.context;\r
+ drep_rec.cm_status = IB_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_failed_send(\r
+ IN ib_qp_handle_t h_qp,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Failure indicates a send. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ case CM_REP_ATTR_ID:\r
+ __proc_conn_timeout( h_qp );\r
+ break;\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap_timeout( h_qp );\r
+ break;\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dconn_timeout( h_qp );\r
+ break;\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM send MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle received MADs on a connection (not listen)\r
+*\r
+*/\r
+\r
+\r
+void\r
+__proc_peer_req(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ /* Must be peer-to-peer. */\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update peer based rec handles and context values */\r
+ req_rec.context = p_cm->h_qp->obj.context;\r
+ req_rec.h_cm_req = *p_cm;\r
+ req_rec.h_cm_listen = NULL;\r
+\r
+ /* Invoke the user's callback. User must call ib_cm_rep or ib_cm_rej. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_mra(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_mra_t* const p_mra )\r
+{\r
+ ib_cm_mra_rec_t mra_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb );\r
+\r
+ /* Format the MRA callback record. */\r
+ cl_memclr( &mra_rec, sizeof(ib_cm_mra_rec_t) );\r
+\r
+ mra_rec.h_qp = p_cm->h_qp;\r
+ mra_rec.qp_context = p_cm->h_qp->obj.context;\r
+ mra_rec.p_mra_pdata = p_mra->pdata;\r
+\r
+ /*\r
+ * Call the user back. Note that users will get a callback only\r
+ * for the first MRA received in response to a REQ, REP, or LAP.\r
+ */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb( &mra_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_rej(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_rej_t* const p_rej )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( p_cm->h_qp )\r
+ {\r
+ /* Format the REJ callback record. */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+\r
+ rej_rec.h_qp = p_cm->h_qp;\r
+ rej_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ rej_rec.p_rej_pdata = p_rej->pdata;\r
+ rej_rec.p_ari = p_rej->ari;\r
+ rej_rec.ari_length = conn_rej_get_ari_len( p_rej );\r
+ rej_rec.rej_status = p_rej->reason;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ /*\r
+ * Unbind the QP from the connection object. This allows the QP to\r
+ * be immediately reused in another connection request.\r
+ */\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == p_cm->cid || cid == AL_INVALID_CID );\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rep_t* const p_rep )\r
+{\r
+ ib_cm_rep_rec_t rep_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &rep_rec, sizeof(ib_cm_rep_rec_t) );\r
+\r
+ /* fill the rec callback data */\r
+ rep_rec.p_rep_pdata = p_rep->pdata;\r
+ rep_rec.qp_type = p_cm->h_qp->type;\r
+\r
+ rep_rec.h_cm_rep = *p_cm;\r
+ rep_rec.qp_context = p_cm->h_qp->obj.context;\r
+ rep_rec.resp_res = p_rep->resp_resources;\r
+ rep_rec.flow_ctrl = conn_rep_get_e2e_flow_ctl( p_rep );\r
+ rep_rec.apr_status = conn_rep_get_failover( p_rep );\r
+\r
+ /* Notify the user of the reply. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rep_cb( &rep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rtu(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rtu_t* const p_rtu )\r
+{\r
+ ib_cm_rtu_rec_t rtu_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ rtu_rec.p_rtu_pdata = p_rtu->pdata;\r
+ rtu_rec.h_qp = p_cm->h_qp;\r
+ rtu_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rtu_cb( &rtu_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_dreq(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_dreq_t* const p_dreq )\r
+{\r
+ ib_cm_dreq_rec_t dreq_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &dreq_rec, sizeof(ib_cm_dreq_rec_t) );\r
+\r
+ dreq_rec.h_cm_dreq = *p_cm;\r
+ dreq_rec.p_dreq_pdata = p_dreq->pdata;\r
+\r
+ dreq_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_dreq_cb( &dreq_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_drep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_drep_t* const p_drep )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+\r
+ /* Copy qp context before the connection is released */\r
+ drep_rec.cm_status = IB_SUCCESS;\r
+ drep_rec.p_drep_pdata = p_drep->pdata;\r
+ drep_rec.h_qp = p_cm->h_qp;\r
+ drep_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ CL_ASSERT( cid == p_cm->cid );\r
+\r
+ if( al_destroy_cep(\r
+ p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_lap(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_lap_t* const p_lap )\r
+{\r
+ ib_cm_lap_rec_t lap_rec;\r
+ const lap_path_info_t* const p_path = &p_lap->alternate_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( p_lap );\r
+\r
+ cl_memclr( &lap_rec, sizeof(ib_cm_lap_rec_t) );\r
+ lap_rec.qp_context = p_cm->h_qp->obj.context;\r
+ lap_rec.h_cm_lap = *p_cm;\r
+\r
+ /*\r
+ * Format the path record. The local ack timeout specified in the\r
+ * LAP is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( &lap_rec.alt_path,\r
+ &p_lap->alternate_path.local_gid,\r
+ &p_lap->alternate_path.remote_gid,\r
+ p_lap->alternate_path.local_lid,\r
+ p_lap->alternate_path.remote_lid,\r
+ 1, IB_DEFAULT_PKEY,\r
+ conn_lap_path_get_svc_lvl( &p_lap->alternate_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ IB_MTU_2048,\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_lap_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_lap_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ lap_rec.alt_path.hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_lap_path_get_subn_lcl( &p_lap->alternate_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( &lap_rec.alt_path,\r
+ p_lap->alternate_path.hop_limit,\r
+ conn_lap_path_get_flow_lbl( &p_lap->alternate_path ),\r
+ FALSE );\r
+ lap_rec.alt_path.tclass =\r
+ conn_lap_path_get_tclass( &p_lap->alternate_path );\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_lap_cb( &lap_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_lap_qp(\r
+ IN ib_cm_handle_t* const p_cm )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_get_rts_attr( p_cm->h_al, p_cm->cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s.\n", ib_get_err_str(status)) );\r
+ goto done;\r
+ }\r
+\r
+ status = ib_modify_qp( p_cm->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__proc_apr(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_apr_t* const p_apr )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ apr_rec.h_qp = p_cm->h_qp;\r
+ apr_rec.qp_context = p_cm->h_qp->obj.context;\r
+ apr_rec.p_info = (const uint8_t*)&p_apr->info;\r
+ apr_rec.info_length = p_apr->info_len;\r
+ apr_rec.p_apr_pdata = p_apr->pdata;\r
+ apr_rec.apr_status = p_apr->status;\r
+\r
+ if( apr_rec.apr_status == IB_AP_SUCCESS )\r
+ {\r
+ apr_rec.cm_status = __cep_lap_qp( p_cm );\r
+ }\r
+ else\r
+ {\r
+ apr_rec.cm_status = IB_ERROR;\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_conn(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Success indicates a receive. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __proc_peer_req( p_cm, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_MRA_ATTR_ID:\r
+ __proc_mra( p_cm, (mad_cm_mra_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REJ_ATTR_ID:\r
+ __proc_rej( p_cm, (mad_cm_rej_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ __proc_rep( p_cm, (mad_cm_rep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_RTU_ATTR_ID:\r
+ __proc_rtu( p_cm, (mad_cm_rtu_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dreq( p_cm, (mad_cm_dreq_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREP_ATTR_ID:\r
+ __proc_drep( p_cm, (mad_cm_drep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap( p_cm, (mad_cm_lap_t*)p_mad );\r
+ break;\r
+\r
+ case CM_APR_ATTR_ID:\r
+ __proc_apr( p_cm, (mad_cm_apr_t*)p_mad );\r
+ break;\r
+\r
+ //case CM_SIDR_REQ_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
+ // break;\r
+\r
+ //case CM_SIDR_REP_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
+ // break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+/******************************************************************************\r
+* CEP callback handler.\r
+*\r
+*/\r
+\r
+#ifdef CL_KERNEL\r
+static void\r
+__process_cep_cb(\r
+#else\r
+static void\r
+__cm_handler(\r
+#endif\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ ib_cep_t new_cep;\r
+ ib_mad_element_t *p_mad;\r
+ ib_cm_handle_t h_cm;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ for( status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad );\r
+ status == IB_SUCCESS;\r
+ status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad ) )\r
+ {\r
+ /* Something to do - WOOT!!! */\r
+ if( new_cep.cid != AL_INVALID_CID )\r
+ {\r
+ __proc_listen( (al_listen_t*)p_cep->context,\r
+ &new_cep, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else if( p_mad->status != IB_SUCCESS )\r
+ {\r
+ /* Context is a QP handle, and a sent MAD timed out. */\r
+ __proc_failed_send(\r
+ (ib_qp_handle_t)p_cep->context, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else\r
+ {\r
+ h_cm.h_al = h_al;\r
+ h_cm.cid = p_cep->cid;\r
+ h_cm.h_qp = (ib_qp_handle_t)p_cep->context;\r
+ __proc_conn( &h_cm, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ ib_put_mad( p_mad );\r
+ }\r
+}\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+static void\r
+__process_cep_async(\r
+ IN cl_async_proc_item_t *p_item )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = PARENT_STRUCT( p_item, cep_async_mad_t, item );\r
+\r
+ __process_cep_cb( p_async_mad->h_al, &p_async_mad->cep );\r
+\r
+ cl_free( p_async_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * The handler is invoked at DISPATCH_LEVEL in kernel mode. We need to switch\r
+ * to a passive level thread context to perform QP modify and invoke user\r
+ * callbacks.\r
+ */\r
+static void\r
+__cm_handler(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = (cep_async_mad_t*)cl_zalloc( sizeof(cep_async_mad_t) );\r
+ if( !p_async_mad )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("failed to cl_zalloc cm_async_mad_t (%d bytes)\n",\r
+ sizeof(cep_async_mad_t)) );\r
+ return;\r
+ }\r
+\r
+ p_async_mad->h_al = h_al;\r
+ p_async_mad->cep = *p_cep;\r
+ p_async_mad->item.pfn_callback = __process_cep_async;\r
+\r
+ /* Queue the MAD for asynchronous processing. */\r
+ cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the INIT state, if it is not already in the\r
+ * INIT state.\r
+ */\r
+ib_api_status_t\r
+__cep_init_qp(\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_qp_mod_t* const p_init )\r
+{\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ /*\r
+ * Move to the init state to allow posting of receive buffers.\r
+ * Chech the current state of the QP. The user may have already\r
+ * transitioned it and posted some receives to the QP, so we\r
+ * should not reset the QP if it is already in the INIT state.\r
+ */\r
+ if( h_qp->state != IB_QPS_INIT )\r
+ {\r
+ /* Reset the QP. */\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RESET;\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp to IB_QPS_RESET returned %s\n",\r
+ ib_get_err_str(status) ) );\r
+ }\r
+\r
+ /* Initialize the QP. */\r
+ status = ib_modify_qp( h_qp, p_init );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ return IB_SUCCESS;\r
+}\r
+\r
+static ib_api_status_t\r
+__cep_pre_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_pre_req( qp_get_al( p_cm_req->h_qp ),\r
+ ((al_conn_qp_t*)p_cm_req->h_qp)->cid, p_cm_req, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition QP through state machine */\r
+ /*\r
+ * Warning! Using all access rights. We need to modify\r
+ * the ib_cm_req_t to include this.\r
+ */\r
+ qp_mod.state.init.access_ctrl |=\r
+ IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_ATOMIC;\r
+ status = __cep_init_qp( p_cm_req->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_init_qp returned %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_req(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ //cl_status_t cl_status;\r
+ //cl_event_t sync_event;\r
+ //cl_event_t *p_sync_event = NULL;\r
+ al_conn_qp_t *p_qp;\r
+ net32_t cid, old_cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* event based mechanism */\r
+ if( p_cm_req->flags & IB_FLAGS_SYNC )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_UNSUPPORTED;\r
+ //cl_event_construct( &sync_event );\r
+ //cl_status = cl_event_init( &sync_event, FALSE );\r
+ //if( cl_status != CL_SUCCESS )\r
+ //{\r
+ // __deref_conn( p_conn );\r
+ // return ib_convert_cl_status( cl_status );\r
+ //}\r
+ //p_conn->p_sync_event = p_sync_event = &sync_event;\r
+ }\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_req->h_qp;\r
+\r
+ /* Get a CEP and bind it to the QP. */\r
+ status = al_create_cep( h_al, __cm_handler, p_cm_req->h_qp, &cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str( status )) );\r
+ goto done;\r
+ }\r
+\r
+ /* See if this QP has already been connected. */\r
+ old_cid = cl_atomic_comp_xchg( &p_qp->cid, AL_INVALID_CID, cid );\r
+ if( old_cid != AL_INVALID_CID )\r
+ {\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_STATE;\r
+ }\r
+\r
+ status = __cep_pre_req( p_cm_req );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ /* Store callback pointers. */\r
+ p_qp->pfn_cm_req_cb = p_cm_req->pfn_cm_req_cb;\r
+ p_qp->pfn_cm_rep_cb = p_cm_req->pfn_cm_rep_cb;\r
+ p_qp->pfn_cm_mra_cb = p_cm_req->pfn_cm_mra_cb;\r
+ p_qp->pfn_cm_rej_cb = p_cm_req->pfn_cm_rej_cb;\r
+\r
+ /* Send the REQ. */\r
+ status = al_cep_send_req( h_al, p_qp->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ //if( p_sync_event )\r
+ // cl_event_destroy( p_sync_event );\r
+\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_send_req returned %s.\n", ib_get_err_str(status)) );\r
+err:\r
+ ref_al_obj( &p_qp->qp.obj );\r
+ cl_atomic_xchg( &p_qp->cid, AL_INVALID_CID );\r
+ if( al_destroy_cep( h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_qp->qp.obj );\r
+ }\r
+\r
+ /* wait on event if synchronous operation */\r
+ //if( p_sync_event )\r
+ //{\r
+ // CL_TRACE( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("event blocked on REQ...\n") );\r
+ // cl_event_wait_on( p_sync_event, EVENT_NO_TIMEOUT, FALSE );\r
+\r
+ // cl_event_destroy( p_sync_event );\r
+ //}\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_req )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_req->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_req->h_qp->type != p_cm_req->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ status = __cep_conn_req( qp_get_al( p_cm_req->h_qp ), p_cm_req );\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ status = IB_UNSUPPORTED;\r
+// status = cm_sidr_req( p_cm_req->h_al, p_cm_req );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Note: we pass in the QP handle separately because it comes form different\r
+ * sources. It comes from the ib_cm_rep_t structure in the ib_cm_rep path, and\r
+ * from the ib_cm_handle_t structure in the ib_cm_rtu path.\r
+ */\r
+static ib_api_status_t\r
+__cep_rts_qp(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_access_t access_ctrl,\r
+ IN const uint32_t sq_depth,\r
+ IN const uint32_t rq_depth )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Set the QP to RTR. */\r
+ status = al_cep_get_rtr_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ if( access_ctrl )\r
+ {\r
+ qp_mod.state.rtr.access_ctrl = access_ctrl;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_ACCESS_CTRL;\r
+ }\r
+\r
+ if( sq_depth )\r
+ {\r
+ qp_mod.state.rtr.sq_depth = sq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_SQ_DEPTH;\r
+ }\r
+\r
+ if( rq_depth )\r
+ {\r
+ qp_mod.state.rtr.rq_depth = rq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_RQ_DEPTH;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ /* Set the QP to RTS. */\r
+ status = al_cep_get_rts_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_pre_rep(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_rep->h_qp;\r
+\r
+ status = al_cep_pre_rep(\r
+ h_cm.h_al, h_cm.cid, p_cm_rep->h_qp, p_cm_rep, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_rep returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition the QP to the INIT state. */\r
+ qp_mod.state.init.access_ctrl = p_cm_rep->access_ctrl;\r
+ status = __cep_init_qp( p_cm_rep->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cm_init_qp returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Prepost receives. */\r
+ if( p_cm_rep->p_recv_wr )\r
+ {\r
+ status = ib_post_recv( p_cm_rep->h_qp, p_cm_rep->p_recv_wr,\r
+ (ib_recv_wr_t** __ptr64)p_cm_rep->pp_recv_failure );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_post_recv returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ /* Transition the QP to the RTR and RTS states. */\r
+ status = __cep_rts_qp( h_cm, p_cm_rep->h_qp,\r
+ p_cm_rep->access_ctrl, p_cm_rep->sq_depth, p_cm_rep->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_rep(\r
+ IN ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cid = cl_atomic_comp_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID, h_cm.cid );\r
+\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ /* We don't destroy the CEP to allow the user to retry accepting. */\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("QP already connected.\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ /* Store the CM callbacks. */\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rej_cb = p_cm_rep->pfn_cm_rej_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_mra_cb = p_cm_rep->pfn_cm_mra_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rtu_cb = p_cm_rep->pfn_cm_rtu_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_lap_cb = p_cm_rep->pfn_cm_lap_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_dreq_cb = p_cm_rep->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_pre_rep( h_cm, p_cm_rep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s\n", ib_get_err_str(status)) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_send_rep( h_cm.h_al, h_cm.cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) );\r
+err:\r
+ cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID );\r
+\r
+ ref_al_obj( &p_cm_rep->h_qp->obj );\r
+\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_cm_rep->h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rep(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ status = IB_SUCCESS;\r
+ switch( p_cm_rep->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ if( p_cm_rep->h_qp->obj.h_al != h_cm_req.h_al )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( ( p_cm_rep->status == IB_SIDR_SUCCESS ) &&\r
+ (AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ al_cep_rej(\r
+ h_cm_req.h_al, h_cm_req.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+ al_destroy_cep( h_cm_req.h_al, h_cm_req.cid, NULL );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ if( p_cm_rep->qp_type == IB_QPT_UNRELIABLE_DGRM )\r
+ status = IB_UNSUPPORTED;//status = cm_sidr_rep( p_conn, p_cm_rep );\r
+ else\r
+ status = __cep_conn_rep( h_cm_req, p_cm_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rtu(\r
+ IN const ib_cm_handle_t h_cm_rep,\r
+ IN const ib_cm_rtu_t* const p_cm_rtu )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rtu )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ///*\r
+ // * Call invalid if event is still processed.\r
+ // * User may have called rtu in rep callback.\r
+ // */\r
+ //if( p_conn->p_sync_event )\r
+ //{\r
+ // CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ // ("Connection in invalid state. Sync call in progress.\n" ) );\r
+\r
+ // cm_res_release( p_conn );\r
+ // __deref_conn( p_conn );\r
+ // return IB_INVALID_STATE;\r
+ //}\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_apr_cb = p_cm_rtu->pfn_cm_apr_cb;\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_dreq_cb = p_cm_rtu->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_rts_qp( h_cm_rep, h_cm_rep.h_qp,\r
+ p_cm_rtu->access_ctrl, p_cm_rtu->sq_depth, p_cm_rtu->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_rtu( h_cm_rep.h_al, h_cm_rep.cid,\r
+ p_cm_rtu->p_rtu_pdata, p_cm_rtu->rtu_length );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+err:\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej(\r
+ h_cm_rep.h_al, h_cm_rep.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ __cep_timewait_qp( h_cm_rep.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_rep.h_qp)->cid, AL_INVALID_CID );\r
+\r
+ CL_ASSERT( cid == h_cm_rep.cid );\r
+\r
+ ref_al_obj( &h_cm_rep.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_rep.h_al, h_cm_rep.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_rep.h_qp->obj );\r
+ }\r
+\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_mra(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_mra )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_mra( h_cm.h_al, h_cm.cid, p_cm_mra );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_mra returned %s\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rej(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rej_t* const p_cm_rej )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rej )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_rej( h_cm.h_al, h_cm.cid, p_cm_rej->rej_status,\r
+ p_cm_rej->p_ari->data, p_cm_rej->ari_length,\r
+ p_cm_rej->p_rej_pdata, p_cm_rej->rej_length );\r
+\r
+ if( h_cm.h_qp )\r
+ {\r
+ __cep_timewait_qp( h_cm.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_cm.h_qp->obj );\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &h_cm.h_qp->obj );\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_dreq(\r
+ IN const ib_cm_dreq_t* const p_cm_dreq )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_dreq )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_dreq->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_dreq->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_dreq->h_qp->type != p_cm_dreq->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ /* Store the callback pointers. */\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->pfn_cm_drep_cb =\r
+ p_cm_dreq->pfn_cm_drep_cb;\r
+\r
+ status = al_cep_dreq( p_cm_dreq->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->cid,\r
+ p_cm_dreq->p_dreq_pdata, p_cm_dreq->dreq_length );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_STATE:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_SETTING:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ case IB_SUCCESS:\r
+ /* Wait for the DREP or timeout. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * If we failed to send the DREQ, just release the connection. It's\r
+ * unreliable anyway. The local port may be down. Note that we could\r
+ * not send the DREQ, but we still could have received one. The DREQ\r
+ * will have a reference on the connection until the user calls\r
+ * ib_cm_drep.\r
+ */\r
+ __cep_timewait_qp( p_cm_dreq->h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, AL_INVALID_CID );\r
+ ref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ if( cid == AL_INVALID_CID || al_destroy_cep(\r
+ p_cm_dreq->h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ }\r
+ status = IB_SUCCESS;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_drep(\r
+ IN const ib_cm_handle_t h_cm_dreq,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_drep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_drep( h_cm_dreq.h_al, h_cm_dreq.cid, p_cm_drep );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_SETTING:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_STATE:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Some other out-of-resource error - continue as if we succeeded in\r
+ * sending the DREP.\r
+ */\r
+ status = IB_SUCCESS;\r
+ /* Fall through */\r
+ case IB_SUCCESS:\r
+ __cep_timewait_qp( h_cm_dreq.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ CL_ASSERT( cid == h_cm_dreq.cid );\r
+ ref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ }\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_lap(\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_lap )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_lap->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_lap->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_lap->h_qp->type != p_cm_lap->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_lap( p_cm_lap->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_lap->h_qp)->cid, p_cm_lap );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_lap returned %s.\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_apr(\r
+ IN const ib_cm_handle_t h_cm_lap,\r
+ IN const ib_cm_apr_t* const p_cm_apr )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_apr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_apr->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_apr->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_apr->h_qp->type != p_cm_apr->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_pre_apr( h_cm_lap.h_al, h_cm_lap.cid, p_cm_apr, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_apr returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Load alt path into QP */\r
+ status = ib_modify_qp( h_cm_lap.h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n",\r
+ ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+ \r
+ status = al_cep_send_apr( h_cm_lap.h_al, h_cm_lap.cid );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_force_apm(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_conn_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp );\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RTS;\r
+ qp_mod.state.rts.apm_state = IB_APM_MIGRATED;\r
+ qp_mod.state.rts.opts = IB_MOD_QP_APM_STATE;\r
+\r
+ /* Set the QP to RTS. */\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__destroying_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ ib_api_status_t status;\r
+ al_listen_t *p_listen;\r
+\r
+ p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj );\r
+\r
+ /* Destroy the listen's CEP. */\r
+ status = al_destroy_cep(\r
+ p_obj->h_al, p_listen->cid, deref_al_obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_destroy_cep returned %s.\n", ib_get_err_str( status )) );\r
+ deref_al_obj( p_obj );\r
+ }\r
+}\r
+\r
+\r
+\r
+static void\r
+__free_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ destroy_al_obj( p_obj );\r
+ cl_free( PARENT_STRUCT( p_obj, al_listen_t, obj ) );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+ al_listen_t *p_listen;\r
+ ib_cep_listen_t cep_listen;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( pfn_listen_err_cb );\r
+\r
+ /* Allocate the listen object. */\r
+ p_listen = (al_listen_t*)cl_zalloc( sizeof(al_listen_t) );\r
+ if( !p_listen )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ /* Copy the listen request information for matching incoming requests. */\r
+ p_listen->pfn_cm_req_cb = p_cm_listen->pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ p_listen->sidr_context = p_cm_listen->sidr_context;\r
+\r
+ construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN );\r
+ status = init_al_obj( &p_listen->obj, listen_context, TRUE,\r
+ __destroying_listen, NULL, __free_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_listen( &p_listen->obj );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ /* Add the listen to the AL instance's object list. */\r
+ status = attach_al_obj( &h_al->obj, &p_listen->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Create a CEP to listen on. */\r
+ status = al_create_cep( h_al, __cm_handler, p_listen, &p_listen->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cep_listen.cmp_len = p_cm_listen->compare_length;\r
+ cep_listen.cmp_offset = p_cm_listen->compare_offset;\r
+ cep_listen.p_cmp_buf = p_cm_listen->p_compare_buffer;\r
+ cep_listen.port_guid = p_cm_listen->port_guid;\r
+ cep_listen.svc_id = p_cm_listen->svc_id;\r
+\r
+ status = al_cep_listen( h_al, p_listen->cid, &cep_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_listen returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ *ph_cm_listen = p_listen;\r
+\r
+ /* Note that we keep the reference held on behalf of the CEP. */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ if( !p_cm_listen || !pfn_listen_err_cb || !ph_cm_listen )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context,\r
+ ph_cm_listen );\r
+\r
+ CL_EXIT( AL_DBG_CM, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_cancel(\r
+ IN const ib_listen_handle_t h_cm_listen,\r
+ IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_cm_listen, AL_OBJ_TYPE_H_LISTEN ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_cm_listen->obj );\r
+ h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, pfn_destroy_cb );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_handoff(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_net64_t svc_id )\r
+{\r
+ UNUSED_PARAM( h_cm_req );\r
+ UNUSED_PARAM( svc_id );\r
+ return IB_UNSUPPORTED;\r
+}\r
#define IS_CM_IOCTL(cmd) \\r
((cmd) > AL_CM_OPS_START && (cmd) < AL_CM_MAXOPS)\r
\r
+\r
+enum _ual_cep_ops\r
+{\r
+ al_cep_ops_start = al_ioc_maxops,\r
+ ual_create_cep,\r
+ ual_destroy_cep,\r
+ ual_cep_listen,\r
+ ual_cep_pre_req,\r
+ ual_cep_send_req,\r
+ ual_cep_pre_rep,\r
+ ual_cep_send_rep,\r
+ ual_cep_get_rtr,\r
+ ual_cep_get_rts,\r
+ ual_cep_rtu,\r
+ ual_cep_rej,\r
+ ual_cep_mra,\r
+ ual_cep_lap,\r
+ ual_cep_pre_apr,\r
+ ual_cep_send_apr,\r
+ ual_cep_dreq,\r
+ ual_cep_drep,\r
+ ual_cep_get_timewait,\r
+ ual_cep_get_event,\r
+ ual_cep_poll,\r
+\r
+ al_cep_maxops\r
+\r
+} ual_cep_ops_t;\r
+\r
+#define UAL_CEP_OPS_START IOCTL_CODE(ALDEV_KEY, al_cep_ops_start)\r
+#define UAL_CEP_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cep_maxops)\r
+#define IS_CEP_IOCTL(cmd) \\r
+ ((cmd) > UAL_CEP_OPS_START && (cmd) < UAL_CEP_MAXOPS)\r
+\r
+\r
/* AL ioctls */\r
\r
typedef enum _al_dev_ops\r
{\r
- al_ops_start = al_cm_maxops,\r
+ al_ops_start = al_cep_maxops,\r
\r
ual_reg_shmid_cmd,\r
ual_get_ca_attr,\r
#define UAL_CM_APR IOCTL_CODE(ALDEV_KEY, ual_cm_apr_cmd)\r
#define UAL_CM_FORCE_APM IOCTL_CODE(ALDEV_KEY, ual_force_apm_cmd)\r
\r
+/* CEP Related IOCTL commands */\r
+#define UAL_CREATE_CEP IOCTL_CODE(ALDEV_KEY, ual_create_cep)\r
+#define UAL_DESTROY_CEP IOCTL_CODE(ALDEV_KEY, ual_destroy_cep)\r
+#define UAL_CEP_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cep_listen)\r
+#define UAL_CEP_PRE_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_pre_req)\r
+#define UAL_CEP_SEND_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_send_req)\r
+#define UAL_CEP_PRE_REP IOCTL_CODE(ALDEV_KEY, ual_cep_pre_rep)\r
+#define UAL_CEP_SEND_REP IOCTL_CODE(ALDEV_KEY, ual_cep_send_rep)\r
+#define UAL_CEP_GET_RTR IOCTL_CODE(ALDEV_KEY, ual_cep_get_rtr)\r
+#define UAL_CEP_GET_RTS IOCTL_CODE(ALDEV_KEY, ual_cep_get_rts)\r
+#define UAL_CEP_RTU IOCTL_CODE(ALDEV_KEY, ual_cep_rtu)\r
+#define UAL_CEP_REJ IOCTL_CODE(ALDEV_KEY, ual_cep_rej)\r
+#define UAL_CEP_MRA IOCTL_CODE(ALDEV_KEY, ual_cep_mra)\r
+#define UAL_CEP_LAP IOCTL_CODE(ALDEV_KEY, ual_cep_lap)\r
+#define UAL_CEP_PRE_APR IOCTL_CODE(ALDEV_KEY, ual_cep_pre_apr)\r
+#define UAL_CEP_SEND_APR IOCTL_CODE(ALDEV_KEY, ual_cep_send_apr)\r
+#define UAL_CEP_DREQ IOCTL_CODE(ALDEV_KEY, ual_cep_dreq)\r
+#define UAL_CEP_DREP IOCTL_CODE(ALDEV_KEY, ual_cep_drep)\r
+#define UAL_CEP_GET_TIMEWAIT IOCTL_CODE(ALDEV_KEY, ual_cep_get_timewait)\r
+#define UAL_CEP_GET_EVENT IOCTL_CODE(ALDEV_KEY, ual_cep_get_event)\r
+#define UAL_CEP_POLL IOCTL_CODE(ALDEV_KEY, ual_cep_poll)\r
+\r
#define UAL_GET_CA_ATTR_INFO IOCTL_CODE(ALDEV_KEY, ual_get_ca_attr)\r
\r
/* PnP related ioctl commands. */\r
CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );\r
return IB_INVALID_CA_HANDLE;\r
}\r
- if( !p_ioc_profile || ph_ioc )\r
+ if( !p_ioc_profile || !ph_ioc )\r
{\r
CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
return IB_INVALID_PARAMETER;\r
} ioc_state_t;\r
\r
\r
+#pragma warning(disable:4324)\r
typedef struct _al_ioc\r
{\r
al_obj_t obj; /* Child of ib_ca_t */\r
atomic32_t in_use_cnt;\r
\r
} al_ioc_t;\r
+#pragma warning(default:4324)\r
\r
\r
typedef struct _al_svc_entry\r
__send_timer_cb(\r
IN void *context );\r
\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item );\r
-\r
static void\r
__check_send_queue(\r
IN ib_mad_svc_handle_t h_mad_svc );\r
\r
/* Construct the MAD service. */\r
construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );\r
- cl_async_proc_construct( &h_mad_svc->send_async_proc );\r
cl_timer_construct( &h_mad_svc->send_timer );\r
cl_timer_construct( &h_mad_svc->recv_timer );\r
cl_qlist_init( &h_mad_svc->send_list );\r
return ib_convert_cl_status( cl_status );\r
}\r
\r
- cl_status = cl_async_proc_init( &h_mad_svc->send_async_proc,\r
- 1, "MAD svc send timeout" );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
*ph_mad_svc = h_mad_svc;\r
\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
ib_mad_send_handle_t h_send;\r
cl_list_item_t *p_list_item;\r
int32_t timeout_ms;\r
+#ifdef CL_KERNEL\r
+ KIRQL old_irql;\r
+#endif\r
\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
CL_ASSERT( p_obj );\r
timeout_ms -= 10;\r
}\r
\r
- /*\r
- * Cancel all outstanding send requests. Stop the send timer to avoid\r
- * synchronizing with it.\r
- */\r
- cl_timer_stop( &h_mad_svc->send_timer );\r
- cl_async_proc_destroy( &h_mad_svc->send_async_proc );\r
- cl_timer_destroy( &h_mad_svc->send_timer );\r
-\r
/*\r
* Deregister from the MAD dispatcher. The MAD dispatcher holds\r
* a reference on the MAD service when invoking callbacks. Since we\r
if( h_mad_svc->h_mad_reg )\r
__mad_disp_dereg( h_mad_svc->h_mad_reg );\r
\r
+ /* Cancel all outstanding send requests. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
p_list_item = cl_qlist_next( p_list_item ) )\r
h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
h_send->canceled = TRUE;\r
}\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
\r
/*\r
* Invoke the timer callback to return the canceled MADs to the user.\r
* Since the MAD service is being destroyed, the user cannot be issuing\r
* sends.\r
*/\r
+#ifdef CL_KERNEL\r
+ old_irql = KeRaiseIrqlToDpcLevel();\r
+#endif\r
__check_send_queue( h_mad_svc );\r
+#ifdef CL_KERNEL\r
+ KeLowerIrql( old_irql );\r
+#endif\r
+\r
+ cl_timer_destroy( &h_mad_svc->send_timer );\r
\r
#ifdef CL_KERNEL\r
/*\r
if( h_mad_svc->obj.h_al->p_context )\r
{\r
cl_qlist_t *p_cblist;\r
- cl_list_item_t *p_list_item;\r
al_proxy_cb_info_t *p_cb_info;\r
\r
cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
}\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms )\r
+{\r
+#ifdef CL_KERNEL\r
+ cl_list_item_t *p_list_item;\r
+ ib_mad_send_handle_t h_send;\r
+#endif\r
+\r
+ AL_ENTER( AL_DBG_MAD_SVC );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+ if( !p_mad_element )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+#ifndef CL_KERNEL\r
+ UNUSED_PARAM( p_mad_element );\r
+ UNUSED_PARAM( delay_ms );\r
+ /* TODO: support for user-mode MAD QP's. */\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_UNSUPPORTED;\r
+#else\r
+ /* Search for the MAD in our MAD list. It may have already completed. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
+ p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
+ __mad_svc_find_send, p_mad_element );\r
+\r
+ if( !p_list_item )\r
+ {\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );\r
+ return IB_NOT_FOUND;\r
+ }\r
+\r
+ /* Mark the MAD as having been canceled. */\r
+ h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
+\r
+ if( h_send->retry_time == MAX_TIME )\r
+ h_send->delay = delay_ms;\r
+ else\r
+ h_send->retry_time += ((uint64_t)delay_ms * 1000ULL);\r
+\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_SUCCESS;\r
+#endif\r
+}\r
+\r
\r
/*\r
* Process a send completion.\r
__set_retry_time(\r
IN ib_mad_send_handle_t h_send )\r
{\r
- h_send->retry_time = h_send->p_send_mad->timeout_ms * 1000 +\r
+ h_send->retry_time =\r
+ (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL +\r
cl_get_time_stamp();\r
+ h_send->delay = 0;\r
}\r
\r
\r
__send_timer_cb(\r
IN void *context )\r
{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
\r
- /*\r
- If we haven't already queued the asynchronous processing item to\r
- check the send queue, do so now.\r
- */\r
- h_mad_svc = (ib_mad_svc_handle_t)context;\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- See if the asynchronous processing item is in use. If it is already\r
- in use, it means that we're about to check the send queue anyway, so\r
- just ignore the timer. Also, don't bother scheduling if the object\r
- state is not CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( !h_mad_svc->send_async_item.pfn_callback &&\r
- ( h_mad_svc->obj.state == CL_INITIALIZED ) )\r
- {\r
- /* Not in use, reference the service and queue the callback. */\r
- cl_atomic_inc( &h_mad_svc->ref_cnt );\r
- h_mad_svc->send_async_item.pfn_callback = __send_async_proc_cb;\r
- cl_async_proc_queue( &h_mad_svc->send_async_proc,\r
- &h_mad_svc->send_async_item );\r
- }\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-}\r
-\r
-\r
-\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item )\r
-{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-\r
- h_mad_svc = PARENT_STRUCT( p_send_async_item, al_mad_svc_t,\r
- send_async_item );\r
-\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- * Don't bother processing if the object state is not\r
- * CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( h_mad_svc->obj.state != CL_INITIALIZED )\r
- {\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
- return;\r
- }\r
-\r
- /* The send_async_item is available for use again. */\r
- h_mad_svc->send_async_item.pfn_callback = NULL;\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- __check_send_queue( h_mad_svc );\r
+ __check_send_queue( (ib_mad_svc_handle_t)context );\r
\r
- /* Release the reference held during async processing. */\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
}\r
\r
ib_pfn_mad_comp_cb_t pfn_user_send_cb;\r
ib_pfn_mad_comp_cb_t pfn_user_recv_cb;\r
\r
- cl_async_proc_t send_async_proc;\r
- cl_async_proc_item_t send_async_item;\r
cl_qlist_t send_list;\r
cl_timer_t send_timer;\r
\r
IN al_mad_wr_t* const p_mad_wr );\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms );\r
+\r
\r
#endif /* __IB_AL_MAD_H__ */\r
/* Absolute time that the request should be retried. */\r
uint64_t retry_time;\r
\r
+ /* Delay, in milliseconds, to add before the next retry. */\r
+ uint32_t delay;\r
+\r
/* Number of times that the request can be retried. */\r
uint32_t retry_cnt;\r
boolean_t canceled; /* indicates if send was canceled */\r
\r
ref_al_obj( &h_mcast->obj );\r
status = al_send_sa_req(\r
- &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data );\r
+ &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data, 0 );\r
if( status != IB_SUCCESS )\r
deref_al_obj( &h_mcast->obj );\r
\r
\r
p_mcast->state = SA_REG_STARTING;\r
status = al_send_sa_req( &p_mcast->sa_reg_req, p_mcast->port_guid,\r
- p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data );\r
+ p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data, 0 );\r
\r
CL_EXIT( AL_DBG_MCAST, g_al_dbg_lvl );\r
return status;\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
\r
+cl_status_t cep_ioctl(\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes );\r
+\r
cl_status_t ioc_ioctl(\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
#include "al.h"\r
#include "al_av.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_cq.h"\r
#include "al_debug.h"\r
#include "al_mad.h"\r
}\r
\r
\r
-/*\r
-static ib_api_status_t\r
-al_bad_leave_mcast(\r
- IN const ib_mcast_handle_t h_mcast )\r
-{\r
- UNUSED_PARAM( h_mcast );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-*/\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_call(\r
- IN OUT al_conn_t* const p_conn )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_pre_rep(\r
- IN OUT al_conn_t* const p_conn,\r
- IN OUT const ib_cm_rep_t* p_cm_rep )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- UNUSED_PARAM( p_cm_rep );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
ib_api_status_t\r
init_base_qp(\r
IN ib_qp_t* const p_qp,\r
cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
\r
- /*\r
- * Get the QP attributes. This works around a bug with create QP calls\r
- * above not reporting the correct attributes.\r
- */\r
-// ib_query_qp( h_qp, &qp_attr );\r
h_qp->num = qp_attr.num;\r
\r
return IB_SUCCESS;\r
/* Initialize the inherited QP first. */\r
status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID,\r
p_qp_create, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
- return status;\r
- }\r
\r
- return IB_SUCCESS;\r
+ p_conn_qp->cid = AL_INVALID_CID;\r
+\r
+ return status;\r
}\r
\r
\r
ib_qp_handle_t h_qp;\r
al_mad_qp_t *p_mad_qp;\r
al_qp_alias_t *p_qp_alias;\r
- al_conn_qp_t *p_conn_qp;\r
+ net32_t cid;\r
\r
CL_ASSERT( p_obj );\r
h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );\r
\r
case IB_QPT_RELIABLE_CONN:\r
case IB_QPT_UNRELIABLE_CONN:\r
- p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp);\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+ }\r
\r
- /* Disconnect the QP. */\r
- cm_conn_destroy( p_conn_qp );\r
/* Fall through. */\r
-\r
case IB_QPT_UNRELIABLE_DGRM:\r
default:\r
/* Multicast membership gets cleaned up by object hierarchy. */\r
CL_ASSERT( p_event_rec );\r
h_qp = (ib_qp_handle_t)p_event_rec->context;\r
\r
+#if defined(CL_KERNEL)\r
+ switch( p_event_rec->code )\r
+ {\r
+ case IB_AE_QP_COMM:\r
+ al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM:\r
+ al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM_ERROR:\r
+ //***TODO: Figure out how to handle these errors.\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+#endif\r
+\r
p_event_rec->context = (void*)h_qp->obj.context;\r
p_event_rec->handle.h_qp = h_qp;\r
\r
CL_EXIT( AL_DBG_MW, g_al_dbg_lvl );\r
return status;\r
}\r
-\r
-\r
-ib_al_handle_t\r
-qp_get_al(\r
- IN const ib_qp_handle_t h_qp )\r
-{\r
- /* AL the is great-grandparent of the QP. */\r
- return (ib_al_handle_t)\r
- h_qp->obj.p_parent_obj->p_parent_obj->p_parent_obj;\r
-}\r
\r
ib_cm_handle_t p_conn;\r
\r
+ atomic32_t cid;\r
+\r
+ /* Callback table. */\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+ ib_pfn_cm_rep_cb_t pfn_cm_rep_cb;\r
+ ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
+ ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb;\r
+ ib_pfn_cm_lap_cb_t pfn_cm_lap_cb;\r
+ ib_pfn_cm_apr_cb_t pfn_cm_apr_cb;\r
+ ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb;\r
+ ib_pfn_cm_drep_cb_t pfn_cm_drep_cb;\r
+ ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; /* If RTU times out */\r
+\r
+\r
} al_conn_qp_t;\r
\r
\r
\r
\r
/* Return the AL instance associated with this QP. */\r
-ib_al_handle_t\r
+static inline ib_al_handle_t\r
qp_get_al(\r
- IN const ib_qp_handle_t h_qp );\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ return h_qp->obj.h_al;\r
+}\r
\r
\r
#endif /* __AL_QP_H__ */\r
static ib_api_status_t\r
query_sa(\r
IN al_query_t *p_query,\r
- IN const ib_query_req_t* const p_query_req );\r
+ IN const ib_query_req_t* const p_query_req,\r
+ IN const ib_al_flags_t flags );\r
\r
void\r
query_req_cb(\r
IN al_sa_req_t *p_sa_req,\r
IN ib_mad_element_t *p_mad_response );\r
\r
-static void\r
-__free_query(\r
- IN OUT al_query_t *p_query );\r
-\r
-\r
\r
ib_api_status_t\r
ib_query(\r
{\r
al_query_t *p_query;\r
ib_api_status_t status;\r
- cl_status_t cl_status;\r
- boolean_t sync;\r
\r
CL_ENTER( AL_DBG_QUERY, g_al_dbg_lvl );\r
\r
return IB_INSUFFICIENT_MEMORY;\r
}\r
\r
- /* Check for synchronous operation. */\r
- p_query->flags = p_query_req->flags;\r
- cl_event_construct( &p_query->event );\r
- sync = ( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC );\r
- if( sync )\r
- {\r
- cl_status = cl_event_init( &p_query->event, TRUE );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- status = ib_convert_cl_status( cl_status );\r
- CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
- ("cl_init_event failed: %s\n", ib_get_err_str(status) ) );\r
- __free_query( p_query );\r
- return status;\r
- }\r
- }\r
-\r
/* Copy the query context information. */\r
p_query->sa_req.pfn_sa_req_cb = query_req_cb;\r
p_query->sa_req.user_context = p_query_req->query_context;\r
/* Track the query with the AL instance. */\r
al_insert_query( h_al, p_query );\r
\r
+ /*\r
+ * Set the query handle now so that users that do sync queries\r
+ * can also cancel the queries.\r
+ */\r
+ if( ph_query )\r
+ *ph_query = p_query;\r
+\r
/* Issue the MAD to the SA. */\r
- status = query_sa( p_query, (ib_query_req_t*)p_query_req );\r
- if( status == IB_SUCCESS )\r
- {\r
- /*\r
- * Set the query handle now so that users that do sync queries\r
- * can also cancel the queries.\r
- */\r
- if( ph_query )\r
- *ph_query = p_query;\r
- /* If synchronous, wait for the completion. */\r
- if( sync )\r
- {\r
- do\r
- {\r
- cl_status = cl_event_wait_on(\r
- &p_query->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE );\r
- } while( cl_status == CL_NOT_DONE );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- }\r
- }\r
- else if( status != IB_INVALID_GUID )\r
+ status = query_sa( p_query, p_query_req, p_query_req->flags );\r
+ if( status != IB_SUCCESS && status != IB_INVALID_GUID )\r
{\r
CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
("query_sa failed: %s\n", ib_get_err_str(status) ) );\r
}\r
\r
/* Cleanup from issuing the query if it failed or was synchronous. */\r
- if( ( status != IB_SUCCESS ) || sync )\r
+ if( status != IB_SUCCESS )\r
{\r
al_remove_query( p_query );\r
- __free_query( p_query );\r
+ cl_free( p_query );\r
}\r
\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
static ib_api_status_t\r
query_sa(\r
IN al_query_t *p_query,\r
- IN const ib_query_req_t* const p_query_req )\r
+ IN const ib_query_req_t* const p_query_req,\r
+ IN const ib_al_flags_t flags )\r
{\r
ib_user_query_t sa_req, *p_sa_req;\r
union _query_sa_recs\r
\r
status = al_send_sa_req(\r
&p_query->sa_req, p_query_req->port_guid, p_query_req->timeout_ms,\r
- p_query_req->retry_cnt, p_sa_req );\r
+ p_query_req->retry_cnt, p_sa_req, flags );\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
return status;\r
}\r
/* Notify the user of the result. */\r
p_query->pfn_query_cb( &query_rec );\r
\r
- /* Check for synchronous operation. */\r
- if( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC )\r
- {\r
- cl_event_signal( &p_query->event );\r
- }\r
- else\r
- {\r
- /* Cleanup from issuing the query. */\r
- al_remove_query( p_query );\r
- __free_query( p_query );\r
- }\r
+ /* Cleanup from issuing the query. */\r
+ al_remove_query( p_query );\r
+ cl_free( p_query );\r
\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
}\r
-\r
-\r
-\r
-static void\r
-__free_query(\r
- IN OUT al_query_t *p_query )\r
-{\r
- CL_ASSERT( p_query );\r
-\r
- cl_event_destroy( &p_query->event );\r
- cl_free( p_query );\r
-}\r
sa_req_svc_t *p_sa_req_svc; /* For cancellation */\r
ib_mad_element_t *p_mad_response;\r
ib_mad_element_t *p_mad_request; /* For cancellation */\r
+ KEVENT *p_sync_event;\r
#else /* defined( CL_KERNEL ) */\r
uint64_t hdl;\r
ual_send_sa_req_ioctl_t ioctl;\r
{\r
al_sa_req_t sa_req; /* Must be first. */\r
\r
- /* Used to perform synchronous requests. */\r
- ib_al_flags_t flags;\r
- cl_event_t event;\r
-\r
ib_al_handle_t h_al;\r
ib_pfn_query_cb_t pfn_query_cb;\r
ib_query_type_t query_type;\r
IN const net64_t port_guid,\r
IN const uint32_t timeout_ms,\r
IN const uint32_t retry_cnt,\r
- IN const ib_user_query_t* const p_sa_req_data );\r
+ IN const ib_user_query_t* const p_sa_req_data,\r
+ IN const ib_al_flags_t flags );\r
\r
#if defined( CL_KERNEL )\r
static __inline void\r
{\r
ib_reg_svc_handle_t h_reg_svc;\r
\r
- h_reg_svc = PARENT_STRUCT ( p_sa_req, al_reg_svc_t, sa_req );\r
+ /*\r
+ * Note that we come into this callback with a reference\r
+ * on the registration object.\r
+ */\r
+ h_reg_svc = PARENT_STRUCT( p_sa_req, al_reg_svc_t, sa_req );\r
\r
if( p_mad_response )\r
ib_put_mad( p_mad_response );\r
\r
- deref_al_obj( &h_reg_svc->obj );\r
+ h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL );\r
}\r
\r
\r
sa_mad_data.comp_mask = ~CL_CONST64(0);\r
\r
if( al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
- 500, 0, &sa_mad_data ) != IB_SUCCESS )\r
+ 500, 0, &sa_mad_data, 0 ) != IB_SUCCESS )\r
{\r
/* Cleanup from the registration. */\r
deref_al_obj( &h_reg_svc->obj );\r
\r
h_reg_svc->pfn_reg_svc_cb( ®_svc_rec );\r
\r
- /* Check for synchronous operation. */\r
- if( (h_reg_svc->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC )\r
- cl_event_signal( &h_reg_svc->event );\r
-\r
- /* Release the reference taken when issuing the request. */\r
- deref_al_obj( &h_reg_svc->obj );\r
+ if( p_sa_req->status != IB_SUCCESS )\r
+ {\r
+ h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL );\r
+ }\r
+ else\r
+ {\r
+ /* Release the reference taken when issuing the request. */\r
+ deref_al_obj( &h_reg_svc->obj );\r
+ }\r
}\r
\r
\r
h_sa_reg = PARENT_STRUCT( p_obj, al_reg_svc_t, obj );\r
\r
destroy_al_obj( p_obj );\r
- cl_event_destroy( &h_sa_reg->event );\r
cl_free( h_sa_reg );\r
\r
AL_EXIT( AL_DBG_SA_REQ );\r
IN const ib_reg_svc_req_t* const p_reg_svc_req )\r
{\r
ib_user_query_t sa_mad_data;\r
- ib_api_status_t status;\r
\r
/* Set the request information. */\r
h_reg_svc->sa_req.pfn_sa_req_cb = reg_svc_req_cb;\r
sa_mad_data.comp_mask = p_reg_svc_req->svc_data_mask;\r
sa_mad_data.p_attr = &h_reg_svc->svc_rec;\r
\r
- status = al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
- p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data );\r
- return status;\r
+ return al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
+ p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data,\r
+ p_reg_svc_req->flags );\r
}\r
\r
\r
{\r
ib_reg_svc_handle_t h_sa_reg = NULL;\r
ib_api_status_t status;\r
- cl_status_t cl_status;\r
\r
AL_ENTER( AL_DBG_SA_REQ );\r
\r
return IB_INSUFFICIENT_MEMORY;\r
}\r
\r
- h_sa_reg->flags = p_reg_svc_req->flags;\r
- cl_event_construct( &h_sa_reg->event );\r
construct_al_obj( &h_sa_reg->obj, AL_OBJ_TYPE_H_SA_REG );\r
\r
status = init_al_obj( &h_sa_reg->obj, p_reg_svc_req->svc_context, TRUE,\r
return status;\r
}\r
\r
- /* Check for synchronous operation. */\r
- if( h_sa_reg->flags & IB_FLAGS_SYNC )\r
- {\r
- cl_status = cl_event_init( &h_sa_reg->event, TRUE );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- status = ib_convert_cl_status( cl_status );\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("cl_init_event failed: %s\n", ib_get_err_str(status)) );\r
- h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL );\r
- return status;\r
- }\r
- }\r
-\r
/* Store the port GUID on which to issue the request. */\r
h_sa_reg->port_guid = p_reg_svc_req->port_guid;\r
\r
\r
/* Issue the MAD to the SA. */\r
status = sa_reg_svc( h_sa_reg, p_reg_svc_req );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* If synchronous, wait for the completion. */\r
- if( h_sa_reg->flags & IB_FLAGS_SYNC )\r
- {\r
- do\r
- {\r
- cl_status = cl_event_wait_on(\r
- &h_sa_reg->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE );\r
- } while( cl_status == CL_NOT_DONE );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
-\r
- /* Cleanup from issuing the request if it failed. */\r
- if( h_sa_reg->state == SA_REG_ERROR )\r
- {\r
- status = h_sa_reg->req_status;\r
- /* The callback released the reference from init_al_obj. */\r
- ref_al_obj( &h_sa_reg->obj );\r
- }\r
- }\r
- }\r
- else\r
+ if( status != IB_SUCCESS )\r
{\r
AL_TRACE( AL_DBG_ERROR,\r
("sa_reg_svc failed: %s\n", ib_get_err_str(status) ) );\r
h_sa_reg->state = SA_REG_ERROR;\r
- }\r
\r
- if( status != IB_SUCCESS )\r
h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL );\r
+ }\r
else\r
+ {\r
*ph_reg_svc = h_sa_reg;\r
+ }\r
\r
AL_EXIT( AL_DBG_SA_REQ );\r
return status;\r
/* Additional status information returned in the registration response. */\r
ib_net16_t resp_status;\r
\r
- /* Used to perform synchronous requests. */\r
- ib_al_flags_t flags;\r
- cl_event_t event;\r
-\r
al_sa_reg_state_t state;\r
ib_pfn_reg_svc_cb_t pfn_reg_svc_cb;\r
\r
SOURCES= ibal.rc \\r
al_ca_pnp.c \\r
al_ci_ca.c \\r
- al_cm.c \\r
- al_cm_conn.c \\r
- al_cm_sidr.c \\r
+ al_cm_cep.c \\r
al_dev.c \\r
al_driver.c \\r
al_ioc_pnp.c \\r
al_mr.c \\r
al_pnp.c \\r
al_proxy.c \\r
- al_proxy_cm.c \\r
+ al_proxy_cep.c \\r
al_proxy_ioc.c \\r
al_proxy_subnet.c \\r
al_proxy_verbs.c \\r
..\al_av.c \\r
..\al_ca.c \\r
..\al_ci_ca_shared.c \\r
- ..\al_cm_shared.c \\r
+ ..\al_cm_qp.c \\r
..\al_common.c \\r
..\al_cq.c \\r
..\al_dm.c \\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_vector.h>\r
+#include <complib/cl_rbmap.h>\r
+#include <complib/cl_qmap.h>\r
+#include <complib/cl_spinlock.h>\r
+#include "al_common.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_debug.h"\r
+#include "ib_common.h"\r
+#include "al_mgr.h"\r
+#include "al_ca.h"\r
+#include "al.h"\r
+#include "al_mad.h"\r
+#include "al_qp.h"\r
+\r
+\r
+/*\r
+ * The vector object uses a list item at the front of the buffers\r
+ * it allocates. Take the list item into account so that allocations\r
+ * are for full page sizes.\r
+ */\r
+#define CEP_CID_MIN \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+#define CEP_CID_GROW \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+\r
+/*\r
+ * We reserve the upper byte of the connection ID as a revolving counter so\r
+ * that connections that are retried by the client change connection ID.\r
+ * This counter is never zero, so it is OK to use all CIDs since we will never\r
+ * have a full CID (base + counter) that is zero.\r
+ * See the IB spec, section 12.9.8.7 for details about REJ retry.\r
+ */\r
+#define CEP_MAX_CID (0x00FFFFFF)\r
+#define CEP_MAX_CID_MASK (0x00FFFFFF)\r
+\r
+#define CEP_MAD_SQ_DEPTH (128)\r
+#define CEP_MAD_RQ_DEPTH (1) /* ignored. */\r
+#define CEP_MAD_SQ_SGE (1)\r
+#define CEP_MAD_RQ_SGE (1) /* ignored. */\r
+\r
+\r
+/* Global connection manager object. */\r
+typedef struct _al_cep_mgr\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_qmap_t port_map;\r
+\r
+ KSPIN_LOCK lock;\r
+\r
+ /* Bitmap of CEPs, indexed by CID. */\r
+ cl_vector_t cid_vector;\r
+ uint32_t free_cid;\r
+\r
+ /* List of active listens. */\r
+ cl_rbmap_t listen_map;\r
+\r
+ /* Map of CEP by remote CID and CA GUID. */\r
+ cl_rbmap_t conn_id_map;\r
+ /* Map of CEP by remote QPN, used for stale connection matching. */\r
+ cl_rbmap_t conn_qp_map;\r
+\r
+ NPAGED_LOOKASIDE_LIST cep_pool;\r
+ NPAGED_LOOKASIDE_LIST req_pool;\r
+\r
+ /*\r
+ * Periodically walk the list of connections in the time wait state\r
+ * and flush them as appropriate.\r
+ */\r
+ cl_timer_t timewait_timer;\r
+ cl_qlist_t timewait_list;\r
+\r
+ ib_pnp_handle_t h_pnp;\r
+\r
+} al_cep_mgr_t;\r
+\r
+\r
+/* Per-port CM object. */\r
+typedef struct _cep_port_agent\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_map_item_t item;\r
+\r
+ ib_ca_handle_t h_ca;\r
+ ib_pd_handle_t h_pd;\r
+ ib_qp_handle_t h_qp;\r
+ ib_pool_key_t pool_key;\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+\r
+ net64_t port_guid;\r
+ uint8_t port_num;\r
+ net16_t base_lid;\r
+\r
+} cep_agent_t;\r
+\r
+\r
+/*\r
+ * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively.\r
+ * This allows shifting 1 << msg_mraed from an MRA to figure out for what\r
+ * message the MRA was sent for.\r
+ */\r
+#define CEP_STATE_RCVD 0x10000000\r
+#define CEP_STATE_SENT 0x20000000\r
+#define CEP_STATE_MRA 0x01000000\r
+#define CEP_STATE_REQ 0x00000001\r
+#define CEP_STATE_REP 0x00000002\r
+#define CEP_STATE_LAP 0x00000004\r
+#define CEP_STATE_RTU 0x00000008\r
+#define CEP_STATE_DREQ 0x00000010\r
+#define CEP_STATE_DREP 0x00000020\r
+#define CEP_STATE_DESTROYING 0x00010000\r
+#define CEP_STATE_USER 0x00020000\r
+\r
+#define CEP_MSG_MASK 0x000000FF\r
+#define CEP_OP_MASK 0xF0000000\r
+\r
+#define CEP_STATE_PREP 0x00100000\r
+\r
+/* States match CM state transition diagrams from spec. */\r
+typedef enum _cep_state\r
+{\r
+ CEP_STATE_IDLE,\r
+ CEP_STATE_LISTEN,\r
+ CEP_STATE_ESTABLISHED,\r
+ CEP_STATE_TIMEWAIT,\r
+ CEP_STATE_SREQ_SENT,\r
+ CEP_STATE_SREQ_RCVD,\r
+ CEP_STATE_ERROR,\r
+ CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
+ CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
+ CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT,\r
+ CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD,\r
+ CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT,\r
+ CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT,\r
+ CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT,\r
+ CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD,\r
+ CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING\r
+\r
+} cep_state_t;\r
+\r
+\r
+/* Active side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* al_cep_pre_req -> PRE_REQ\r
+* al_cep_send_req -> REQ_SENT\r
+* Recv REQ MRA -> REQ_MRA_RCVD\r
+* Recv REP -> REP_RCVD\r
+* al_cep_mra -> REP_MRA_SENT\r
+* al_cep_rtu -> ESTABLISHED\r
+*\r
+* Passive side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* Recv REQ -> REQ_RCVD\r
+* al_cep_mra* -> REQ_MRA_SENT\r
+* al_cep_pre_rep -> PRE_REP\r
+* al_cep_mra* -> PRE_REP_MRA_SENT\r
+* al_cep_send_rep -> REP_SENT\r
+* Recv RTU -> ESTABLISHED\r
+*\r
+* *al_cep_mra can only be called once - either before or after PRE_REP.\r
+*/\r
+\r
+typedef struct _al_kcep_av\r
+{\r
+ ib_av_attr_t attr;\r
+ net64_t port_guid;\r
+ uint16_t pkey_index;\r
+\r
+} kcep_av_t;\r
+\r
+\r
+typedef struct _al_kcep\r
+{\r
+ ib_cep_t cep;\r
+\r
+ struct _cep_cid *p_cid;\r
+\r
+ net64_t sid;\r
+\r
+ /* Port guid for filtering incoming requests. */\r
+ net64_t port_guid;\r
+\r
+ uint8_t* __ptr64 p_cmp_buf;\r
+ uint8_t cmp_offset;\r
+ uint8_t cmp_len;\r
+\r
+ boolean_t p2p;\r
+\r
+ /* Used to store connection structure with owning AL instance. */\r
+ cl_list_item_t al_item;\r
+\r
+ /* Flag to indicate whether a user is processing events. */\r
+ boolean_t signalled;\r
+\r
+ /* Destroy callback. */\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+\r
+ ib_mad_element_t *p_mad_head;\r
+ ib_mad_element_t *p_mad_tail;\r
+ al_pfn_cep_cb_t pfn_cb;\r
+\r
+ IRP *p_irp;\r
+\r
+ /* MAP item for finding listen CEPs. */\r
+ cl_rbmap_item_t listen_item;\r
+\r
+ /* Map item for finding CEPs based on remote comm ID & CA GUID. */\r
+ cl_rbmap_item_t rem_id_item;\r
+\r
+ /* Map item for finding CEPs based on remote QP number. */\r
+ cl_rbmap_item_t rem_qp_item;\r
+\r
+ /* Communication ID's for the connection. */\r
+ net32_t local_comm_id;\r
+ net32_t remote_comm_id;\r
+\r
+ net64_t local_ca_guid;\r
+ net64_t remote_ca_guid;\r
+\r
+ /* Remote QP, used for stale connection checking. */\r
+ net32_t remote_qpn;\r
+\r
+ /* Parameters to format QP modification structure. */\r
+ net32_t sq_psn;\r
+ net32_t rq_psn;\r
+ uint8_t resp_res;\r
+ uint8_t init_depth;\r
+ uint8_t rnr_nak_timeout;\r
+\r
+ /*\r
+ * Local QP number, used for the "additional check" required\r
+ * of the DREQ.\r
+ */\r
+ net32_t local_qpn;\r
+\r
+ /* PKEY to make sure a LAP is on the same partition. */\r
+ net16_t pkey;\r
+\r
+ /* Initiator depth as received in the REQ. */\r
+ uint8_t req_init_depth;\r
+\r
+ /*\r
+ * Primary and alternate path info, used to create the address vectors for\r
+ * sending MADs, to locate the port CM agent to use for outgoing sends,\r
+ * and for creating the address vectors for transitioning QPs.\r
+ */\r
+ kcep_av_t av[2];\r
+ uint8_t idx_primary;\r
+\r
+ /* Temporary AV and CEP port GUID used when processing LAP. */\r
+ kcep_av_t alt_av;\r
+ uint8_t alt_2pkt_life;\r
+\r
+ /* maxium packet lifetime * 2 of any path used on a connection. */\r
+ uint8_t max_2pkt_life;\r
+ /* Given by the REP, used for alternate path setup. */\r
+ uint8_t target_ack_delay;\r
+ /* Stored to help calculate the local ACK delay in the LAP. */\r
+ uint8_t local_ack_delay;\r
+\r
+ /* Volatile to allow using atomic operations for state checks. */\r
+ cep_state_t state;\r
+\r
+ /*\r
+ * Flag that indicates whether a connection took the active role during\r
+ * establishment. \r
+ */\r
+ boolean_t was_active;\r
+\r
+ /*\r
+ * Handle to the sent MAD, used for cancelling. We store the handle to\r
+ * the mad service so that we can properly cancel. This should not be a\r
+ * problem since all outstanding sends should be completed before the\r
+ * mad service completes its destruction and the handle becomes invalid.\r
+ */\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+ ib_mad_element_t *p_send_mad;\r
+\r
+ /* Number of outstanding MADs. Delays destruction of CEP destruction. */\r
+ atomic32_t ref_cnt;\r
+\r
+ /* MAD transaction ID to use when sending MADs. */\r
+ uint64_t tid;\r
+\r
+ /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */\r
+ uint8_t max_cm_retries;\r
+ /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */\r
+ uint32_t retry_timeout;\r
+\r
+ /* Timer that will be signalled when the CEP exits timewait. */\r
+ KTIMER timewait_timer;\r
+ LARGE_INTEGER timewait_time;\r
+ cl_list_item_t timewait_item;\r
+\r
+ /*\r
+ * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls\r
+ * allocate and format the MAD, and the send_req, send_rep and send_apr\r
+ * calls send it.\r
+ */\r
+ ib_mad_element_t *p_mad;\r
+\r
+ /* Cache the last MAD sent for retransmission. */\r
+ union _mads\r
+ {\r
+ ib_mad_t hdr;\r
+ mad_cm_mra_t mra;\r
+ mad_cm_rtu_t rtu;\r
+ mad_cm_drep_t drep;\r
+\r
+ } mads;\r
+\r
+} kcep_t;\r
+\r
+\r
+/* Structures stored in the CID vector. */\r
+typedef struct _cep_cid\r
+{\r
+ /* Owning AL handle. NULL if invalid. */\r
+ ib_al_handle_t h_al;\r
+ /* Pointer to CEP, or index of next free entry if h_al is NULL. */\r
+ kcep_t *p_cep;\r
+ /* For REJ Retry support */\r
+ uint8_t modifier;\r
+\r
+} cep_cid_t;\r
+\r
+\r
+/* Global instance of the CM agent. */\r
+al_cep_mgr_t *gp_cep_mgr = NULL;\r
+\r
+\r
+static ib_api_status_t\r
+__format_drep(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT mad_cm_drep_t* const p_drep );\r
+\r
+static ib_api_status_t\r
+__cep_queue_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* p_mad );\r
+\r
+static inline void\r
+__process_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline uint32_t\r
+__calc_mad_timeout(\r
+ IN const uint8_t pkt_life );\r
+\r
+static inline void\r
+__calc_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__create_cep( void );\r
+\r
+static int32_t\r
+__cleanup_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__bind_cep(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context );\r
+\r
+static inline void\r
+__unbind_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__pre_destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__lookup_by_id(\r
+ IN net32_t remote_comm_id,\r
+ IN net64_t remote_ca_guid );\r
+\r
+static kcep_t*\r
+__lookup_listen(\r
+ IN net64_t sid,\r
+ IN net64_t port_guid,\r
+ IN void *p_pdata );\r
+\r
+static inline kcep_t*\r
+__lookup_cep(\r
+ IN ib_al_handle_t h_al OPTIONAL,\r
+ IN net32_t cid );\r
+\r
+static inline kcep_t*\r
+__insert_cep(\r
+ IN kcep_t* const p_new_cep );\r
+\r
+static inline void\r
+__remove_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__insert_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static ib_api_status_t\r
+__cep_send_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad );\r
+\r
+/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
+static cep_agent_t*\r
+__find_port_cep(\r
+ IN const ib_gid_t* const p_gid,\r
+ IN const net16_t lid,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index );\r
+\r
+static cep_cid_t*\r
+__get_lcid(\r
+ OUT net32_t* const p_cid );\r
+\r
+static void\r
+__process_cep_send_comp(\r
+ IN cl_async_proc_item_t *p_item );\r
+\r
+\r
+/******************************************************************************\r
+* Per-port CEP agent\r
+******************************************************************************/\r
+\r
+\r
+static inline void\r
+__format_mad_hdr(\r
+ IN ib_mad_t* const p_mad,\r
+ IN const kcep_t* const p_cep,\r
+ IN net16_t attr_id )\r
+{\r
+ p_mad->base_ver = 1;\r
+ p_mad->mgmt_class = IB_MCLASS_COMM_MGMT;\r
+ p_mad->class_ver = IB_MCLASS_CM_VER_2;\r
+ p_mad->method = IB_MAD_METHOD_SEND;\r
+ p_mad->status = 0;\r
+ p_mad->class_spec = 0;\r
+ p_mad->trans_id = p_cep->tid;\r
+ p_mad->attr_id = attr_id;\r
+ p_mad->resv = 0;\r
+ p_mad->attr_mod = 0;\r
+}\r
+\r
+\r
+/* Consumes the input MAD. */\r
+static void\r
+__reject_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN ib_rej_status_t reason )\r
+{\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID );\r
+\r
+ p_rej->local_comm_id = p_cep->local_comm_id;\r
+ p_rej->remote_comm_id = p_cep->remote_comm_id;\r
+ p_rej->reason = reason;\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 1, p_rej );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( reason == IB_REJ_TIMEOUT );\r
+ conn_rej_set_msg_rejected( 2, p_rej );\r
+ break;\r
+ }\r
+\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_timeout(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ ib_mad_element_t *p_rej_mad;\r
+ ib_mad_t *p_mad_buf;\r
+ ib_grh_t *p_grh;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s\n", ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+ /* Save the buffer pointers from the new element. */\r
+ p_mad_buf = p_rej_mad->p_mad_buf;\r
+ p_grh = p_rej_mad->p_grh;\r
+\r
+ /*\r
+ * Copy the input MAD element to the reject - this gives us\r
+ * all appropriate addressing information.\r
+ */\r
+ cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) );\r
+ cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) );\r
+\r
+ /* Restore the buffer pointers now that the copy is complete. */\r
+ p_rej_mad->p_mad_buf = p_mad_buf;\r
+ p_rej_mad->p_grh = p_grh;\r
+\r
+ status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+\r
+ /* Copy the local CA GUID into the ARI. */\r
+ switch( p_mad->p_mad_buf->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID );\r
+ ib_put_mad( p_rej_mad );\r
+ return;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN const ib_rej_status_t reason )\r
+{\r
+ mad_cm_req_t *p_req;\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_mad );\r
+ CL_ASSERT( reason != 0 );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /*\r
+ * Format the reject information, overwriting the REQ data and send\r
+ * the response.\r
+ */\r
+ p_rej->hdr.attr_id = CM_REJ_ATTR_ID;\r
+ p_rej->remote_comm_id = p_req->local_comm_id;\r
+ p_rej->local_comm_id = 0;\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ p_rej->reason = reason;\r
+ conn_rej_set_ari( NULL, 0, p_rej );\r
+ conn_rej_set_pdata( NULL, 0, p_rej );\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+\r
+ p_mad->retry_cnt = 0;\r
+ p_mad->send_opt = 0;\r
+ p_mad->timeout_ms = 0;\r
+ p_mad->resp_expected = FALSE;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_av(\r
+ IN kcep_t* const p_cep,\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const uint8_t idx )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ const req_path_info_t *p_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) );\r
+\r
+ p_path = &((&p_req->primary_path)[idx]);\r
+\r
+ p_port_cep = __find_port_cep( &p_path->remote_gid,\r
+ p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ if( !idx )\r
+ p_cep->local_ca_guid = 0;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( !idx )\r
+ p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
+\r
+ /* Check that CA GUIDs match if formatting the alternate path. */\r
+ if( idx &&\r
+ p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Pkey indeces must match if formating the alternat path - the QP\r
+ * modify structure only allows for a single PKEY index to be specified.\r
+ */\r
+ if( idx &&\r
+ p_cep->av[0].pkey_index != p_cep->av[1].pkey_index )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ p_cep->av[idx].port_guid = p_port_cep->port_guid;\r
+ p_cep->av[idx].attr.port_num = p_port_cep->port_num;\r
+\r
+ p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path );\r
+ p_cep->av[idx].attr.dlid = p_path->local_lid;\r
+\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = TRUE;\r
+ p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) );\r
+ p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit;\r
+ p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid;\r
+ p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = FALSE;\r
+ }\r
+ p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path );\r
+ p_cep->av[idx].attr.path_bits =\r
+ (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users.\r
+ */\r
+ p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req );\r
+ p_cep->av[idx].attr.conn.local_ack_timeout =\r
+ conn_req_path_get_lcl_ack_timeout( p_path );\r
+ p_cep->av[idx].attr.conn.seq_err_retry_cnt =\r
+ conn_req_get_retry_cnt( p_req );\r
+ p_cep->av[idx].attr.conn.rnr_retry_cnt =\r
+ conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * + Validates the path information provided in the REQ and stores the\r
+ * associated CA attributes and port indeces.\r
+ * + Transitions a connection object from active to passive in the peer case.\r
+ * + Sets the path information in the connection and sets the CA GUID\r
+ * in the REQ callback record.\r
+ */\r
+static void\r
+__save_wire_req(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN OUT mad_cm_req_t* const p_req )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep->state = CEP_STATE_REQ_RCVD;\r
+ p_cep->was_active = FALSE;\r
+\r
+ p_cep->sid = p_req->sid;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_req->local_comm_id;\r
+ p_cep->remote_ca_guid = p_req->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req );\r
+ p_cep->local_qpn = 0;\r
+\r
+ p_cep->retry_timeout =\r
+ __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) );\r
+\r
+ /* Store the retry count. */\r
+ p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req );\r
+\r
+ /*\r
+ * Copy the paths from the req_rec into the connection for\r
+ * future use. Note that if the primary path is invalid,\r
+ * the REP will fail.\r
+ */\r
+ __format_req_av( p_cep, p_req, 0 );\r
+\r
+ if( p_req->alternate_path.local_lid )\r
+ __format_req_av( p_cep, p_req, 1 );\r
+ else\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+\r
+ p_cep->idx_primary = 0;\r
+\r
+ /* Store the maximum packet lifetime, used to calculate timewait. */\r
+ p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path );\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
+ conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) );\r
+\r
+ /*\r
+ * Make sure the target ack delay is cleared - the above\r
+ * "packet life" includes it.\r
+ */\r
+ p_cep->target_ack_delay = 0;\r
+\r
+ /* Store the requested initiator depth. */\r
+ p_cep->req_init_depth = conn_req_get_init_depth( p_req );\r
+\r
+ /*\r
+ * Store the provided responder resources. These turn into the local\r
+ * QP's initiator depth.\r
+ */\r
+ p_cep->init_depth = conn_req_get_resp_res( p_req );\r
+\r
+ p_cep->sq_psn = conn_req_get_starting_psn( p_req );\r
+\r
+ p_cep->tid = p_req->hdr.trans_id;\r
+ /* copy mad info for cm handoff */\r
+ /* TODO: Do need to support CM handoff? */\r
+ //p_cep->mads.req = *p_req;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/* Must be called with the CEP lock held. */\r
+static void\r
+__repeat_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_mad );\r
+\r
+ /* Repeat the last mad sent for the connection. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */\r
+ case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */\r
+ case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */\r
+ case CEP_STATE_ESTABLISHED: /* resend RTU */\r
+ case CEP_STATE_TIMEWAIT: /* resend the DREP */\r
+ cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE );\r
+ p_mad->send_context1 = NULL;\r
+ p_mad->send_context2 = NULL;\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ /* Return the MAD to the mad pool */\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_req_t *p_req;\r
+ kcep_t *p_cep, *p_new_cep, *p_stale_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ ib_rej_status_t reason;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+\r
+ if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN )\r
+ {\r
+ /* Reserved value. Reject. */\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid transport type received.\n") );\r
+ reason = IB_REJ_INVALID_XPORT;\r
+ goto reject;\r
+ }\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid );\r
+ if( p_cep )\r
+ {\r
+ /* Already received the REQ. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Send a reject. */\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ received for connection in TIME_WAIT state.\n") );\r
+ __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN );\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Let regular retries repeat the MAD. If our last message was\r
+ * dropped, resending only adds to the congestion. If it wasn't\r
+ * dropped, then the remote CM will eventually process it, and\r
+ * we'd just be adding traffic.\r
+ */\r
+ AL_TRACE( AL_DBG_CM, ("Duplicate REQ received.\n") );\r
+ ib_put_mad( p_mad );\r
+ }\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Allocate a new CEP for the new request. This will\r
+ * prevent multiple identical REQs from queueing up for processing.\r
+ */\r
+ p_new_cep = __create_cep();\r
+ if( !p_new_cep )\r
+ {\r
+ /* Reject the request for insufficient resources. */\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep failed\nREJ sent for insufficient resources.\n") );\r
+ goto reject;\r
+ }\r
+\r
+ __save_wire_req( p_new_cep, p_req );\r
+\r
+ /*\r
+ * Match against listens using SID and compare data, also provide the receiving\r
+ * MAD service's port GUID so we can properly filter.\r
+ */\r
+ p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata );\r
+ if( p_cep )\r
+ {\r
+ __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL );\r
+\r
+ /* Add the new CEP to the map so that repeated REQs match up. */\r
+ p_stale_cep = __insert_cep( p_new_cep );\r
+ if( p_stale_cep != p_new_cep )\r
+ {\r
+ /* Duplicate - must be a stale connection. */\r
+ /* TODO: Fail the CEP in p_stale_cep */\r
+ reason = IB_REJ_STALE_CONN;\r
+ goto unbind;\r
+ }\r
+\r
+ /*\r
+ * Queue the mad - the return value indicates whether we should\r
+ * invoke the callback.\r
+ */\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ switch( status )\r
+ {\r
+ case IB_SUCCESS:\r
+ case IB_PENDING:\r
+ p_mad->send_context1 = p_new_cep;\r
+ break;\r
+\r
+ default:\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ goto unbind;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("No listens active!\n") );\r
+\r
+ /* Match against peer-to-peer requests using SID and compare data. */\r
+ //p_cep = __lookup_peer();\r
+ //if( p_cep )\r
+ //{\r
+ // p_mad->send_context2 = NULL;\r
+ // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list,\r
+ // __match_peer, p_req );\r
+ // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) )\r
+ // {\r
+ // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item );\r
+ // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad );\r
+ // cl_free( p_async_mad );\r
+ // CL_TRACE_EXIT( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("REQ matched a peer-to-peer request.\n") );\r
+ // return;\r
+ // }\r
+ // reason = IB_REJ_INVALID_SID;\r
+ // goto free;\r
+ //}\r
+ //else\r
+ {\r
+ /* No match found. Reject. */\r
+ reason = IB_REJ_INVALID_SID;\r
+ AL_TRACE( AL_DBG_CM, ("REQ received but no match found.\n") );\r
+ goto cleanup;\r
+ }\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ /* Process any queued MADs for the CEP. */\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+unbind:\r
+ __unbind_cep( p_new_cep );\r
+\r
+cleanup:\r
+ /*\r
+ * Move the CEP in the idle state so that we don't send a reject\r
+ * for it when cleaning up. Also clear the RQPN and RCID so that\r
+ * we don't try to remove it from our maps (since it isn't inserted).\r
+ */\r
+ p_new_cep->state = CEP_STATE_IDLE;\r
+ p_new_cep->remote_comm_id = 0;\r
+ p_new_cep->remote_qpn = 0;\r
+ __cleanup_cep( p_new_cep );\r
+\r
+reject:\r
+ __reject_req( p_port_cep, p_mad, reason );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__save_wire_rep(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN const mad_cm_rep_t* const p_rep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* The send should have been cancelled during MRA processing. */\r
+ p_cep->state = CEP_STATE_REP_RCVD;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_rep->local_comm_id;\r
+ p_cep->remote_ca_guid = p_rep->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep );\r
+\r
+ /* Store the remote endpoint's target ACK delay. */\r
+ p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep );\r
+\r
+ /* Update the local ACK delay stored in the AV's. */\r
+ p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay );\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep );\r
+\r
+ if( p_cep->av[1].port_guid )\r
+ {\r
+ p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[1].attr.conn.local_ack_timeout,\r
+ p_cep->target_ack_delay );\r
+ p_cep->av[1].attr.conn.rnr_retry_cnt =\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt;\r
+ }\r
+\r
+ p_cep->init_depth = p_rep->resp_resources;\r
+ p_cep->resp_res = p_rep->initiator_depth;\r
+\r
+ p_cep->sq_psn = conn_rep_get_starting_psn( p_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_mra(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_mra_t *p_mra;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_mra->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id )\r
+ {\r
+ if( p_cep->remote_comm_id != p_mra->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+ }\r
+ /*\r
+ * Note that we don't update the CEP's remote comm ID - it messes up REP\r
+ * processing since a non-zero RCID implies the connection is in the RCID\r
+ * map. Adding it here requires checking there and conditionally adding\r
+ * it. Ignoring it is a valid thing to do.\r
+ */\r
+\r
+ if( !(p_cep->state & CEP_STATE_SENT) ||\r
+ (1 << conn_mra_get_msg_mraed( p_mra ) !=\r
+ (p_cep->state & CEP_MSG_MASK)) )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("MRA received in invalid state.\n") );\r
+ goto err;\r
+ }\r
+\r
+ /* Delay the current send. */\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad,\r
+ __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) +\r
+ __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) );\r
+\r
+ /* We only invoke a single callback for MRA. */\r
+ if( p_cep->state & CEP_STATE_MRA )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("Already received MRA.\n") );\r
+ goto err;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_MRA;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rej(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rej_t *p_rej;\r
+ kcep_t *p_cep = NULL;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ net64_t ca_guid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /* Either one of the communication IDs must be set. */\r
+ if( !p_rej->remote_comm_id && !p_rej->local_comm_id )\r
+ goto err1;\r
+\r
+ /* Check the pending list by the remote CA GUID and connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ if( p_rej->remote_comm_id )\r
+ {\r
+ p_cep = __lookup_cep( NULL, p_rej->remote_comm_id );\r
+ }\r
+ else if( p_rej->reason == IB_REJ_TIMEOUT &&\r
+ conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) )\r
+ {\r
+ cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) );\r
+ p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid );\r
+ }\r
+\r
+ if( !p_cep )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id &&\r
+ p_cep->remote_comm_id != p_rej->local_comm_id )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ /*\r
+ * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will\r
+ * continue to retry (up to max_cm_retries) to connect to the remote\r
+ * side. This is required to support peer-to-peer connections and\r
+ * clients that try to connect before the server comes up.\r
+ */\r
+ if( p_rej->reason == IB_REJ_INVALID_SID )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Request rejected (invalid SID) - retrying.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel any outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
+ /* Abort connection establishment. No transition to timewait. */\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ /* Ignore the REJ. */\r
+ AL_TRACE( AL_DBG_CM, ("REJ received in invalid state.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err2:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+err1:\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rep(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rep_t *p_rep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_state_t old_state;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rep->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_CM,\r
+ ("REP received that could not be matched.\n") );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REQ_SENT:\r
+ old_state = p_cep->state;\r
+ /* Save pertinent information and change state. */\r
+ __save_wire_rep( p_cep, p_rep );\r
+\r
+ if( __insert_cep( p_cep ) != p_cep )\r
+ {\r
+ /* Roll back the state change. */\r
+ p_cep->state = old_state;\r
+ __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN );\r
+ /* TODO: Handle stale connection. */\r
+ break;\r
+ }\r
+\r
+ /*\r
+ * Cancel any outstanding send. Note that we do this only after\r
+ * inserting the CEP - if we failed, then we the send will timeout\r
+ * and we'll finish our way through the state machine.\r
+ */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ /* Repeate the MRA or RTU. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_CM, ("REP received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rtu(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rtu_t *p_rtu;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) );\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("RTU received that could not be matched.\n") );\r
+ goto done;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel any outstanding send. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Update timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("RTU received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_dreq(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_dreq_t *p_dreq;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("DREQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) );\r
+\r
+ /* Find the connection by connection IDs. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id );\r
+ if( !p_cep ||\r
+ p_cep->remote_comm_id != p_dreq->local_comm_id ||\r
+ p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received that could not be matched.\n") );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_DREQ_SENT:\r
+ /* Cancel the outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through and process as DREQ received case. */\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ p_cep->state = CEP_STATE_DREQ_RCVD;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Store the TID for use in the reply DREP. */\r
+ p_cep->tid = p_dreq->hdr.trans_id;\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Repeat the DREP. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received in invalid state.\n") );\r
+ case CEP_STATE_DREQ_RCVD:\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_drep(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_drep_t *p_drep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf;\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_drep->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_drep->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREP received that could not be matched.\n") );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( p_cep->state != CEP_STATE_DREQ_SENT &&\r
+ p_cep->state != CEP_STATE_DREQ_DESTROY )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREP received in invalid state.\n") );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /* Cancel the DREQ. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ if( p_cep->state == CEP_STATE_DREQ_SENT )\r
+ {\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+ }\r
+ else\r
+ {\r
+ /* State is DREQ_DESTROY - move to DESTROY to allow cleanup. */\r
+ CL_ASSERT( p_cep->state == CEP_STATE_DREQ_DESTROY );\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+\r
+ ib_put_mad( p_mad );\r
+ status = IB_INVALID_STATE;\r
+ }\r
+\r
+ __insert_timewait( p_cep );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static boolean_t\r
+__format_lap_av(\r
+ IN kcep_t* const p_cep,\r
+ IN const lap_path_info_t* const p_path )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_path );\r
+\r
+ cl_memclr( &p_cep->alt_av, sizeof(kcep_av_t) );\r
+\r
+ p_port_cep = __find_port_cep( &p_path->remote_gid, p_path->remote_lid,\r
+ p_cep->pkey, &p_cep->alt_av.pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return FALSE;\r
+ }\r
+\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return FALSE;\r
+ }\r
+\r
+ p_cep->alt_av.port_guid = p_port_cep->port_guid;\r
+ p_cep->alt_av.attr.port_num = p_port_cep->port_num;\r
+\r
+ p_cep->alt_av.attr.sl = conn_lap_path_get_svc_lvl( p_path );\r
+ p_cep->alt_av.attr.dlid = p_path->local_lid;\r
+\r
+ if( !conn_lap_path_get_subn_lcl( p_path ) )\r
+ {\r
+ p_cep->alt_av.attr.grh_valid = TRUE;\r
+ p_cep->alt_av.attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, conn_lap_path_get_tclass( p_path ),\r
+ conn_lap_path_get_flow_lbl( p_path ) );\r
+ p_cep->alt_av.attr.grh.hop_limit = p_path->hop_limit;\r
+ p_cep->alt_av.attr.grh.dest_gid = p_path->local_gid;\r
+ p_cep->alt_av.attr.grh.src_gid = p_path->remote_gid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->alt_av.attr.grh_valid = FALSE;\r
+ }\r
+ p_cep->alt_av.attr.static_rate = conn_lap_path_get_pkt_rate( p_path );\r
+ p_cep->alt_av.attr.path_bits =\r
+ (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users. For the LAP, we\r
+ * first copy the settings from the current primary - MTU and retry\r
+ * counts are only specified in the REQ.\r
+ */\r
+ p_cep->alt_av.attr.conn = p_cep->av[p_cep->idx_primary].attr.conn;\r
+ p_cep->alt_av.attr.conn.local_ack_timeout =\r
+ conn_lap_path_get_lcl_ack_timeout( p_path );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return TRUE;\r
+}\r
+\r
+\r
+static void\r
+__process_lap(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_lap_t *p_lap;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_lap->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_lap->local_comm_id )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_CM, ("LAP received that could not be matched.\n") );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /*\r
+ * These two cases handle the RTU being dropped. Receipt of\r
+ * a LAP indicates that the connection is established.\r
+ */\r
+ case CEP_STATE_ESTABLISHED:\r
+ /*\r
+ * We don't check for other "established" states related to\r
+ * alternate path management (CEP_STATE_LAP_RCVD, etc)\r
+ */\r
+\r
+ /* We only support receiving LAP if we took the passive role. */\r
+ if( p_cep->was_active )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ /* Store the transaction ID for use during the LAP exchange. */\r
+ p_cep->tid = p_lap->hdr.trans_id;\r
+\r
+ /*\r
+ * Copy the path record into the connection for use when\r
+ * sending the APR and loading the path.\r
+ */\r
+ if( !__format_lap_av( p_cep, &p_lap->alternate_path ) )\r
+ {\r
+ /* Trap an invalid path. */\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_LAP_RCVD;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("LAP received in invalid state.\n") );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_apr(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_apr_t *p_apr;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_apr->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_apr->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("APR received that could not be matched.\n") );\r
+ goto done;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ /* Cancel sending the LAP. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Copy the temporary alternate AV. */\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1] = p_cep->alt_av;\r
+\r
+ /* Update the maximum packet lifetime. */\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
+\r
+ /* Update the timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("APR received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__cep_mad_recv_cb(\r
+ IN ib_mad_svc_handle_t h_mad_svc,\r
+ IN void *context,\r
+ IN ib_mad_element_t *p_mad )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_t *p_hdr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ UNUSED_PARAM( h_mad_svc );\r
+ p_port_cep = (cep_agent_t*)context;\r
+\r
+ CL_ASSERT( p_mad->p_next == NULL );\r
+\r
+ p_hdr = (ib_mad_t*)p_mad->p_mad_buf;\r
+\r
+ /*\r
+ * TODO: Add filtering in all the handlers for unsupported class version.\r
+ * See 12.6.7.2 Rejection Reason, code 31.\r
+ */\r
+\r
+ switch( p_hdr->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __process_req( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_MRA_ATTR_ID:\r
+ __process_mra( p_mad );\r
+ break;\r
+\r
+ case CM_REJ_ATTR_ID:\r
+ __process_rej( p_mad );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ __process_rep( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_RTU_ATTR_ID:\r
+ __process_rtu( p_mad );\r
+ break;\r
+\r
+ case CM_DREQ_ATTR_ID:\r
+ __process_dreq( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_DREP_ATTR_ID:\r
+ __process_drep( p_mad );\r
+ break;\r
+\r
+ case CM_LAP_ATTR_ID:\r
+ __process_lap( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_APR_ATTR_ID:\r
+ __process_apr( p_mad );\r
+ break;\r
+\r
+ case CM_SIDR_REQ_ATTR_ID:\r
+// p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
+// break;\r
+//\r
+ case CM_SIDR_REP_ATTR_ID:\r
+// p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
+// break;\r
+//\r
+ default:\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Invalid CM MAD attribute ID.\n") );\r
+ return;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static inline cep_agent_t*\r
+__get_cep_agent(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cl_map_item_t *p_item;\r
+\r
+ CL_ASSERT( p_cep );\r
+\r
+ /* Look up the primary CEP port agent */\r
+ p_item = cl_qmap_get( &gp_cep_mgr->port_map,\r
+ p_cep->av[p_cep->idx_primary].port_guid );\r
+ if( p_item == cl_qmap_end( &gp_cep_mgr->port_map ) )\r
+ return NULL;\r
+\r
+ return PARENT_STRUCT( p_item, cep_agent_t, item );\r
+}\r
+\r
+\r
+static inline void\r
+__format_mad_av(\r
+ OUT ib_mad_element_t* const p_mad,\r
+ IN kcep_av_t* const p_av )\r
+{\r
+ /* Set the addressing information in the MAD. */\r
+ p_mad->grh_valid = p_av->attr.grh_valid;\r
+ if( p_av->attr.grh_valid )\r
+ cl_memcpy( p_mad->p_grh, &p_av->attr.grh, sizeof(ib_grh_t) );\r
+\r
+ p_mad->remote_sl = p_av->attr.sl;\r
+ p_mad->remote_lid = p_av->attr.dlid;\r
+ p_mad->path_bits = p_av->attr.path_bits;\r
+ p_mad->pkey_index = p_av->pkey_index;\r
+ p_mad->remote_qp = IB_QP1;\r
+ p_mad->send_opt = IB_SEND_OPT_SIGNALED;\r
+ p_mad->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;\r
+ /* Let the MAD service manage the AV for us. */\r
+ p_mad->h_av = NULL;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_send_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_mad );\r
+\r
+ /* Use the mad's attributes already present */\r
+ p_mad->resp_expected = FALSE;\r
+ p_mad->retry_cnt = 0;\r
+ p_mad->timeout_ms = 0;\r
+\r
+ /* Clear the contexts since the send isn't associated with a CEP. */\r
+ p_mad->context1 = NULL;\r
+ p_mad->context2 = NULL;\r
+\r
+ status = ib_send_mad( p_port_cep->h_mad_svc, p_mad, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_send_retry(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_mad );\r
+ CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_LAP_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_DREQ_ATTR_ID );\r
+\r
+ /*\r
+ * REQ, REP, and DREQ are retried until either a response is\r
+ * received or the operation times out.\r
+ */\r
+ p_mad->resp_expected = TRUE;\r
+ p_mad->retry_cnt = p_cep->max_cm_retries;\r
+ p_mad->timeout_ms = p_cep->retry_timeout;\r
+\r
+ CL_ASSERT( !p_cep->p_send_mad );\r
+\r
+ /* Store the mad & mad service handle in the CEP for cancelling. */\r
+ p_cep->h_mad_svc = p_port_cep->h_mad_svc;\r
+ p_cep->p_send_mad = p_mad;\r
+\r
+ /* reference the connection for which we are sending the MAD. */\r
+ cl_atomic_inc( &p_cep->ref_cnt );\r
+\r
+ /* Set the context. */\r
+ p_mad->context1 = p_cep;\r
+ p_mad->context2 = NULL;\r
+\r
+ /* Fire in the hole! */\r
+ status = ib_send_mad( p_cep->h_mad_svc, p_mad, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ /*\r
+ * Note that we don't need to check for destruction here since\r
+ * we're holding the global lock.\r
+ */\r
+ cl_atomic_dec( &p_cep->ref_cnt );\r
+ p_cep->p_send_mad = NULL;\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__cep_mad_send_cb(\r
+ IN ib_mad_svc_handle_t h_mad_svc,\r
+ IN void *context,\r
+ IN ib_mad_element_t *p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_port_cep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+ void *cep_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( h_mad_svc );\r
+ CL_ASSERT( p_mad->p_next == NULL );\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_port_cep = (cep_agent_t*)context;\r
+\r
+ p_cep = (kcep_t* __ptr64)p_mad->context1;\r
+\r
+ /*\r
+ * The connection context is not set when performing immediate responses,\r
+ * such as repeating MADS.\r
+ */\r
+ if( !p_cep )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ p_mad->context1 = NULL;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ /* Clear the sent MAD pointer so that we don't try cancelling again. */\r
+ if( p_cep->p_send_mad == p_mad )\r
+ p_cep->p_send_mad = NULL;\r
+\r
+ switch( p_mad->status )\r
+ {\r
+ case IB_WCS_SUCCESS:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+\r
+ case IB_WCS_CANCELED:\r
+ if( p_cep->state != CEP_STATE_REQ_SENT &&\r
+ p_cep->state != CEP_STATE_REQ_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_REP_SENT &&\r
+ p_cep->state != CEP_STATE_REP_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_LAP_SENT &&\r
+ p_cep->state != CEP_STATE_LAP_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_DREQ_SENT &&\r
+ p_cep->state != CEP_STATE_SREQ_SENT )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+ /* Treat as a timeout so we don't stall the state machine. */\r
+ p_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
+\r
+ /* Fall through. */\r
+ case IB_WCS_TIMEOUT_RETRY_ERR:\r
+ default:\r
+ /* Timeout. Reject the connection. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Send the REJ. */\r
+ __reject_timeout( p_port_cep, p_cep, p_mad );\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_DREQ_DESTROY:\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ __insert_timewait( p_cep );\r
+ /* Fall through. */\r
+\r
+ case CEP_STATE_DESTROY:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ goto done;\r
+\r
+ case CEP_STATE_DREQ_SENT:\r
+ /*\r
+ * Make up a DREP mad so we can respond if we receive\r
+ * a DREQ while in timewait.\r
+ */\r
+ __format_mad_hdr( &p_cep->mads.drep.hdr, p_cep, CM_DREP_ATTR_ID );\r
+ __format_drep( p_cep, NULL, 0, &p_cep->mads.drep );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+\r
+ default:\r
+ break;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+ break;\r
+ }\r
+\r
+done:\r
+ pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
+ cep_context = p_cep->cep.context;\r
+\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
+ pfn_destroy_cb( cep_context );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__cep_qp_event_cb(\r
+ IN ib_async_event_rec_t *p_event_rec )\r
+{\r
+ UNUSED_PARAM( p_event_rec );\r
+\r
+ /*\r
+ * Most of the QP events are trapped by the real owner of the QP.\r
+ * For real events, the CM may not be able to do much anyways!\r
+ */\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__init_data_svc(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN const ib_port_attr_t* const p_port_attr )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_create_t qp_create;\r
+ ib_mad_svc_t mad_svc;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /*\r
+ * Create the PD alias. We use the port CM's al_obj_t as the context\r
+ * to allow using deref_al_obj as the destroy callback.\r
+ */\r
+ status = ib_alloc_pd( p_port_cep->h_ca, IB_PDT_ALIAS, &p_port_cep->obj,\r
+ &p_port_cep->h_pd );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_alloc_pd failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Reference the port object on behalf of the PD. */\r
+ ref_al_obj( &p_port_cep->obj );\r
+\r
+ /* Create the MAD QP. */\r
+ cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
+ qp_create.qp_type = IB_QPT_QP1_ALIAS;\r
+ qp_create.rq_depth = CEP_MAD_RQ_DEPTH;\r
+ qp_create.sq_depth = CEP_MAD_SQ_DEPTH;\r
+ qp_create.rq_sge = CEP_MAD_RQ_SGE;\r
+ qp_create.sq_sge = CEP_MAD_SQ_SGE;\r
+ qp_create.sq_signaled = TRUE;\r
+ /*\r
+ * We use the port CM's al_obj_t as the context to allow using\r
+ * deref_al_obj as the destroy callback.\r
+ */\r
+ status = ib_get_spl_qp( p_port_cep->h_pd, p_port_attr->port_guid,\r
+ &qp_create, &p_port_cep->obj, __cep_qp_event_cb, &p_port_cep->pool_key,\r
+ &p_port_cep->h_qp );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_spl_qp failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Reference the port object on behalf of the QP. */\r
+ ref_al_obj( &p_port_cep->obj );\r
+\r
+ /* Create the MAD service. */\r
+ cl_memclr( &mad_svc, sizeof(mad_svc) );\r
+ mad_svc.mad_svc_context = p_port_cep;\r
+ mad_svc.pfn_mad_recv_cb = __cep_mad_recv_cb;\r
+ mad_svc.pfn_mad_send_cb = __cep_mad_send_cb;\r
+ mad_svc.support_unsol = TRUE;\r
+ mad_svc.mgmt_class = IB_MCLASS_COMM_MGMT;\r
+ mad_svc.mgmt_version = IB_MCLASS_CM_VER_2;\r
+ mad_svc.method_array[IB_MAD_METHOD_SEND] = TRUE;\r
+ status =\r
+ ib_reg_mad_svc( p_port_cep->h_qp, &mad_svc, &p_port_cep->h_mad_svc );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_reg_mad_svc failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Performs immediate cleanup of resources.\r
+ */\r
+static void\r
+__destroying_port_cep(\r
+ IN al_obj_t *p_obj )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
+\r
+ if( p_port_cep->port_guid )\r
+ {\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ cl_qmap_remove_item( &gp_cep_mgr->port_map, &p_port_cep->item );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ }\r
+\r
+ if( p_port_cep->h_qp )\r
+ {\r
+ ib_destroy_qp( p_port_cep->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ p_port_cep->h_qp = NULL;\r
+ }\r
+\r
+ if( p_port_cep->h_pd )\r
+ {\r
+ ib_dealloc_pd( p_port_cep->h_pd, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ p_port_cep->h_pd = NULL;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+\r
+/*\r
+ * Release all resources allocated by a port CM agent. Finishes any cleanup\r
+ * for a port agent.\r
+ */\r
+static void\r
+__free_port_cep(\r
+ IN al_obj_t *p_obj )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_port_attr_mod_t port_attr_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
+\r
+ if( p_port_cep->h_ca )\r
+ {\r
+ /* Update local port attributes */\r
+ port_attr_mod.cap.cm = FALSE;\r
+ ib_modify_ca( p_port_cep->h_ca, p_port_cep->port_num,\r
+ IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
+\r
+ deref_al_obj( &p_port_cep->h_ca->obj );\r
+ }\r
+\r
+ destroy_al_obj( &p_port_cep->obj );\r
+ cl_free( p_port_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Create a port agent for a given port.\r
+ */\r
+static ib_api_status_t\r
+__create_port_cep(\r
+ IN ib_pnp_port_rec_t *p_pnp_rec )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_api_status_t status;\r
+ ib_port_attr_mod_t port_attr_mod;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* calculate size of port_cm struct */\r
+ p_port_cep = (cep_agent_t*)cl_zalloc( sizeof(cep_agent_t) );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed to cl_zalloc port CM agent.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &p_port_cep->obj, AL_OBJ_TYPE_CM );\r
+\r
+ status = init_al_obj( &p_port_cep->obj, p_port_cep, TRUE,\r
+ __destroying_port_cep, NULL, __free_port_cep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_port_cep( &p_port_cep->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Attach to the global CM object. */\r
+ status = attach_al_obj( &gp_cep_mgr->obj, &p_port_cep->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ p_port_cep->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
+ p_port_cep->port_num = p_pnp_rec->p_port_attr->port_num;\r
+ p_port_cep->base_lid = p_pnp_rec->p_port_attr->lid;\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ cl_qmap_insert(\r
+ &gp_cep_mgr->port_map, p_port_cep->port_guid, &p_port_cep->item );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ /* Get a reference to the CA on which we are loading. */\r
+ p_port_cep->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
+ if( !p_port_cep->h_ca )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
+ return IB_INVALID_GUID; }\r
+\r
+ status = __init_data_svc( p_port_cep, p_pnp_rec->p_port_attr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__init_data_svc failed with status %s.\n",\r
+ ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Update local port attributes */\r
+ cl_memclr( &port_attr_mod, sizeof(ib_port_attr_mod_t) );\r
+ port_attr_mod.cap.cm = TRUE;\r
+ status = ib_modify_ca( p_port_cep->h_ca, p_pnp_rec->p_port_attr->port_num,\r
+ IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
+\r
+ /* Update the PNP context to reference this port. */\r
+ p_pnp_rec->pnp_rec.context = p_port_cep;\r
+\r
+ /* Release the reference taken in init_al_obj. */\r
+ deref_al_obj( &p_port_cep->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Global CEP manager\r
+******************************************************************************/\r
+\r
+static cep_cid_t*\r
+__get_lcid(\r
+ OUT net32_t* const p_cid )\r
+{\r
+ cl_status_t status;\r
+ uint32_t size, cid;\r
+ cep_cid_t *p_cep_cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ size = (uint32_t)cl_vector_get_size( &gp_cep_mgr->cid_vector );\r
+ cid = gp_cep_mgr->free_cid;\r
+ if( gp_cep_mgr->free_cid == size )\r
+ {\r
+ /* Grow the vector pool. */\r
+ status =\r
+ cl_vector_set_size( &gp_cep_mgr->cid_vector, size + CEP_CID_GROW );\r
+ if( status != CL_SUCCESS )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+ }\r
+ /*\r
+ * Return the the start of the free list since the\r
+ * entry initializer incremented it.\r
+ */\r
+ gp_cep_mgr->free_cid = size;\r
+ }\r
+\r
+ /* Get the next free entry. */\r
+ p_cep_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid );\r
+\r
+ /* Update the next entry index. */\r
+ gp_cep_mgr->free_cid = (uint32_t)(uintn_t)p_cep_cid->p_cep;\r
+\r
+ *p_cid = cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep_cid;\r
+}\r
+\r
+\r
+static inline kcep_t*\r
+__lookup_cep(\r
+ IN ib_al_handle_t h_al OPTIONAL,\r
+ IN net32_t cid )\r
+{\r
+ size_t idx;\r
+ cep_cid_t *p_cid;\r
+\r
+ /* Mask off the counter bits so we get the index in our vector. */\r
+ idx = cid & CEP_MAX_CID_MASK;\r
+\r
+ /*\r
+ * Remove the CEP from the CID vector - no further API calls\r
+ * will succeed for it.\r
+ */\r
+ if( idx > cl_vector_get_size( &gp_cep_mgr->cid_vector ) )\r
+ return NULL;\r
+\r
+ p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, idx );\r
+ if( !p_cid->h_al )\r
+ return NULL;\r
+\r
+ /*\r
+ * h_al is NULL when processing MADs, so we need to match on\r
+ * the actual local communication ID. If h_al is non-NULL, we\r
+ * are doing a lookup from a call to our API, and only need to match\r
+ * on the index in the vector (without the modifier).\r
+ */\r
+ if( h_al )\r
+ {\r
+ if( p_cid->h_al != h_al )\r
+ return NULL;\r
+ }\r
+ else if( p_cid->p_cep->local_comm_id != cid )\r
+ {\r
+ return NULL;\r
+ }\r
+\r
+ return p_cid->p_cep;\r
+}\r
+\r
+\r
+/*\r
+ * Lookup a CEP by remote comm ID and CA GUID.\r
+ */\r
+static kcep_t*\r
+__lookup_by_id(\r
+ IN net32_t remote_comm_id,\r
+ IN net64_t remote_ca_guid )\r
+{\r
+ cl_rbmap_item_t *p_item;\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( remote_comm_id < p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else if( remote_comm_id > p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_right( p_item );\r
+ else if( remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else if( remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item );\r
+ else\r
+ return p_cep;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * Lookup a CEP by Service ID and private data.\r
+ */\r
+static kcep_t*\r
+__lookup_listen(\r
+ IN net64_t sid,\r
+ IN net64_t port_guid,\r
+ IN uint8_t *p_pdata )\r
+{\r
+ cl_rbmap_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ intn_t cmp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
+\r
+ if( sid == p_cep->sid )\r
+ goto port_cmp;\r
+ else if( sid < p_cep->sid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ continue;\r
+\r
+port_cmp:\r
+ if( p_cep->port_guid != IB_ALL_PORTS )\r
+ {\r
+ if( port_guid == p_cep->port_guid )\r
+ goto pdata_cmp;\r
+ else if( port_guid < p_cep->port_guid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ continue;\r
+ }\r
+\r
+pdata_cmp:\r
+ if( p_cep->p_cmp_buf && p_pdata )\r
+ {\r
+ cmp = cl_memcmp( &p_pdata[p_cep->cmp_offset],\r
+ p_cep->p_cmp_buf, p_cep->cmp_len );\r
+\r
+ if( !cmp )\r
+ goto match;\r
+ else if( cmp < 0 )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Svc ID match but compare buffer mismatch.\n") );\r
+ continue;\r
+ }\r
+\r
+match:\r
+ /* Everything matched. */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+static kcep_t*\r
+__insert_by_id(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( p_new_cep->remote_comm_id < p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_comm_id > p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ goto done;\r
+ }\r
+\r
+ cl_rbmap_insert(\r
+ &gp_cep_mgr->conn_id_map, p_insert_at, &p_new_cep->rem_id_item, left );\r
+ p_cep = p_new_cep;\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static kcep_t*\r
+__insert_by_qpn(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_qp_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_qp_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( p_new_cep->remote_qpn < p_cep->remote_qpn )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_qpn > p_cep->remote_qpn )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ goto done;\r
+ }\r
+\r
+ cl_rbmap_insert(\r
+ &gp_cep_mgr->conn_qp_map, p_insert_at, &p_new_cep->rem_qp_item, left );\r
+ p_cep = p_new_cep;\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline kcep_t*\r
+__insert_cep(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = __insert_by_qpn( p_new_cep );\r
+ if( p_cep != p_new_cep )\r
+ goto done;\r
+\r
+ p_cep = __insert_by_id( p_new_cep );\r
+ if( p_cep != p_new_cep )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_qp_map, &p_new_cep->rem_qp_item );\r
+ p_cep->remote_qpn = 0;\r
+ }\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline void\r
+__remove_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( p_cep->remote_comm_id )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_id_map, &p_cep->rem_id_item );\r
+ p_cep->remote_comm_id = 0;\r
+ }\r
+ if( p_cep->remote_qpn )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_qp_map, &p_cep->rem_qp_item );\r
+ p_cep->remote_qpn = 0;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static boolean_t\r
+__is_lid_valid(\r
+ IN ib_net16_t lid,\r
+ IN ib_net16_t port_lid,\r
+ IN uint8_t lmc )\r
+{\r
+ uint16_t lid1;\r
+ uint16_t lid2;\r
+ uint16_t path_bits;\r
+\r
+ if(lmc)\r
+ {\r
+ lid1 = CL_NTOH16(lid);\r
+ lid2 = CL_NTOH16(port_lid);\r
+ path_bits = 0;\r
+\r
+ if( lid1 < lid2 )\r
+ return FALSE;\r
+\r
+ while( lmc-- )\r
+ path_bits = (uint16_t)( (path_bits << 1) | 1 );\r
+\r
+ lid2 |= path_bits;\r
+\r
+ if( lid1 > lid2)\r
+ return FALSE;\r
+ }\r
+ else\r
+ {\r
+ if (lid != port_lid)\r
+ return FALSE;\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
+\r
+static inline boolean_t\r
+__is_gid_valid(\r
+ IN const ib_port_attr_t* const p_port_attr,\r
+ IN const ib_gid_t* const p_gid )\r
+{\r
+ uint16_t idx;\r
+\r
+ for( idx = 0; idx < p_port_attr->num_gids; idx++ )\r
+ {\r
+ if( !cl_memcmp(\r
+ p_gid, &p_port_attr->p_gid_table[idx], sizeof(ib_gid_t) ) )\r
+ {\r
+ return TRUE;\r
+ }\r
+ }\r
+ return FALSE;\r
+}\r
+\r
+\r
+static inline boolean_t\r
+__get_pkey_index(\r
+ IN const ib_port_attr_t* const p_port_attr,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index )\r
+{\r
+ uint16_t idx;\r
+\r
+ for( idx = 0; idx < p_port_attr->num_pkeys; idx++ )\r
+ {\r
+ if( p_port_attr->p_pkey_table[idx] == pkey )\r
+ {\r
+ *p_pkey_index = idx;\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+\r
+/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
+static cep_agent_t*\r
+__find_port_cep(\r
+ IN const ib_gid_t* const p_gid,\r
+ IN const net16_t lid,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ cl_list_item_t *p_item;\r
+ const ib_port_attr_t *p_port_attr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ for( p_item = cl_qlist_head( &gp_cep_mgr->obj.obj_list );\r
+ p_item != cl_qlist_end( &gp_cep_mgr->obj.obj_list );\r
+ p_item = cl_qlist_next( p_item ) )\r
+ {\r
+ p_port_cep = PARENT_STRUCT( p_item, cep_agent_t, obj.pool_item );\r
+\r
+ CL_ASSERT( p_port_cep->port_num );\r
+\r
+ ci_ca_lock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+\r
+ p_port_attr = p_port_cep->h_ca->obj.p_ci_ca->p_pnp_attr->p_port_attr;\r
+ p_port_attr += (p_port_cep->port_num - 1);\r
+\r
+ if( __is_lid_valid( lid, p_port_attr->lid, p_port_attr->lmc ) &&\r
+ __is_gid_valid( p_port_attr, p_gid ) &&\r
+ __get_pkey_index( p_port_attr, pkey, p_pkey_index ) )\r
+ {\r
+ ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_port_cep;\r
+ }\r
+\r
+ ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+ }\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * PnP callback for port event notifications.\r
+ */\r
+static ib_api_status_t\r
+__cep_pnp_cb(\r
+ IN ib_pnp_rec_t *p_pnp_rec )\r
+{\r
+ ib_api_status_t status = IB_SUCCESS;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ switch( p_pnp_rec->pnp_event )\r
+ {\r
+ case IB_PNP_PORT_ADD:\r
+ /* Create the port agent. */\r
+ CL_ASSERT( !p_pnp_rec->context );\r
+ status = __create_port_cep( (ib_pnp_port_rec_t*)p_pnp_rec );\r
+ break;\r
+\r
+ case IB_PNP_PORT_REMOVE:\r
+ CL_ASSERT( p_pnp_rec->context );\r
+\r
+ /* Destroy the port agent. */\r
+ ref_al_obj( &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj );\r
+ ((cep_agent_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy(\r
+ &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj, NULL );\r
+ break;\r
+\r
+ default:\r
+ break; /* Ignore other PNP events. */\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static inline int64_t\r
+__min_timewait(\r
+ IN int64_t current_min,\r
+ IN kcep_t* const p_cep )\r
+{\r
+ /*\r
+ * The minimum timer interval is 50 milliseconds. This means\r
+ * 500000 100ns increments. Since __process_timewait divides the\r
+ * result in half (so that the worst cast timewait interval is 150%)\r
+ * we compensate for this here. Note that relative time values are\r
+ * expressed as negative.\r
+ */\r
+#define MIN_TIMEWAIT_100NS -1000000\r
+\r
+ /* Still in timewait - try again next time. */\r
+ if( !current_min )\r
+ {\r
+ return min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS );\r
+ }\r
+ else\r
+ {\r
+ return max( current_min,\r
+ min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ) );\r
+ }\r
+}\r
+\r
+\r
+/*\r
+ * Timer callback to process CEPs in timewait state. Returns time in ms.\r
+ */\r
+static uint32_t\r
+__process_timewait()\r
+{\r
+ cl_list_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ LARGE_INTEGER timeout;\r
+ int64_t min_timewait = 0;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ timeout.QuadPart = 0;\r
+\r
+ p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
+ while( p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
+ p_item = cl_qlist_next( p_item );\r
+\r
+ CL_ASSERT( p_cep->state == CEP_STATE_DESTROY ||\r
+ p_cep->state == CEP_STATE_TIMEWAIT );\r
+\r
+ CL_ASSERT( !p_cep->p_mad );\r
+\r
+ if( KeWaitForSingleObject( &p_cep->timewait_timer, Executive,\r
+ KernelMode, FALSE, &timeout ) != STATUS_SUCCESS )\r
+ {\r
+ /* Still in timewait - try again next time. */\r
+ min_timewait = __min_timewait( min_timewait, p_cep );\r
+ continue;\r
+ }\r
+\r
+ if( p_cep->ref_cnt )\r
+ {\r
+ /* Send outstanding or destruction in progress. */\r
+ min_timewait = __min_timewait( min_timewait, p_cep );\r
+ continue;\r
+ }\r
+\r
+ /* Remove from the timewait list. */\r
+ cl_qlist_remove_item( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
+\r
+ /*\r
+ * Not in timewait. Remove the CEP from the maps - it should\r
+ * no longer be matched against.\r
+ */\r
+ __remove_cep( p_cep );\r
+\r
+ if( p_cep->state == CEP_STATE_DESTROY )\r
+ {\r
+ __destroy_cep( p_cep );\r
+ }\r
+ else\r
+ {\r
+ /* Move the CEP to the IDLE state so that it can be used again. */\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return (uint32_t)(min_timewait / -20000);\r
+}\r
+\r
+\r
+/*\r
+ * Timer callback to process CEPs in timewait state.\r
+ */\r
+static void\r
+__cep_timewait_cb(\r
+ IN void *context )\r
+{\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ uint32_t min_timewait;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+\r
+ min_timewait = __process_timewait();\r
+\r
+ if( cl_qlist_count( &gp_cep_mgr->timewait_list ) )\r
+ {\r
+ /*\r
+ * Reset the timer for half of the shortest timeout - this results\r
+ * in a worst case timeout of 150% of timewait.\r
+ */\r
+ cl_timer_trim( &gp_cep_mgr->timewait_timer, min_timewait );\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Starts immediate cleanup of the CM. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__destroying_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ ib_api_status_t status;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cl_list_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ LARGE_INTEGER timeout;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+ UNUSED_PARAM( p_obj );\r
+\r
+ /* Deregister from PnP notifications. */\r
+ if( gp_cep_mgr->h_pnp )\r
+ {\r
+ status = ib_dereg_pnp(\r
+ gp_cep_mgr->h_pnp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ ("ib_dereg_pnp failed with status %s.\n",\r
+ ib_get_err_str(status)) );\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+ }\r
+ }\r
+\r
+ /* Cancel all timewait timers. */\r
+ timeout.QuadPart = 0;\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ for( p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
+ p_item != cl_qlist_end( &gp_cep_mgr->timewait_list );\r
+ p_item = cl_qlist_next( p_item ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
+ KeSetTimer( &p_cep->timewait_timer, timeout, NULL );\r
+ }\r
+ __process_timewait();\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Frees the global CEP agent. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__free_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+ /* All listen request should have been cleaned up by this point. */\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->listen_map ) );\r
+ /* All connections should have been cancelled/disconnected by now. */\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_id_map ) );\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_qp_map ) );\r
+\r
+ cl_vector_destroy( &gp_cep_mgr->cid_vector );\r
+\r
+ cl_timer_destroy( &gp_cep_mgr->timewait_timer );\r
+\r
+ /*\r
+ * All CM port agents should have been destroyed by now via the\r
+ * standard child object destruction provided by the al_obj.\r
+ */\r
+ ExDeleteNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
+ destroy_al_obj( p_obj );\r
+\r
+ cl_free( gp_cep_mgr );\r
+ gp_cep_mgr = NULL;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static cl_status_t\r
+__cid_init(\r
+ IN void* const p_element,\r
+ IN void* context )\r
+{\r
+ cep_cid_t *p_cid;\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ p_cid = (cep_cid_t*)p_element;\r
+\r
+ p_cid->h_al = NULL;\r
+ p_cid->p_cep = (kcep_t*)(uintn_t)++gp_cep_mgr->free_cid;\r
+ p_cid->modifier = 0;\r
+\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Allocates and initialized the global CM agent.\r
+ */\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj )\r
+{\r
+ ib_api_status_t status;\r
+ cl_status_t cl_status;\r
+ ib_pnp_req_t pnp_req;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( gp_cep_mgr == NULL );\r
+\r
+ /* Allocate the global CM agent. */\r
+ gp_cep_mgr = (al_cep_mgr_t*)cl_zalloc( sizeof(al_cep_mgr_t) );\r
+ if( !gp_cep_mgr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed allocation of global CM agent.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM );\r
+ ExInitializeNPagedLookasideList( &gp_cep_mgr->cep_pool, NULL, NULL,\r
+ 0, sizeof(kcep_t), 'PECK', 0 );\r
+ cl_qmap_init( &gp_cep_mgr->port_map );\r
+ cl_rbmap_init( &gp_cep_mgr->listen_map );\r
+ cl_rbmap_init( &gp_cep_mgr->conn_id_map );\r
+ cl_rbmap_init( &gp_cep_mgr->conn_qp_map );\r
+ cl_qlist_init( &gp_cep_mgr->timewait_list );\r
+ /* Timer initialization can't fail in kernel-mode. */\r
+ cl_timer_init( &gp_cep_mgr->timewait_timer, __cep_timewait_cb, NULL );\r
+ cl_vector_construct( &gp_cep_mgr->cid_vector );\r
+\r
+ status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE,\r
+ __destroying_cep_mgr, NULL, __free_cep_mgr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_cep_mgr( &gp_cep_mgr->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Attach to the parent object. */\r
+ status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cl_status = cl_vector_init( &gp_cep_mgr->cid_vector,\r
+ CEP_CID_MIN, CEP_CID_GROW, sizeof(cep_cid_t), __cid_init, NULL, NULL );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cl_vector_init failed with status %s.\n",\r
+ CL_STATUS_MSG(cl_status)) );\r
+ return ib_convert_cl_status( cl_status );\r
+ }\r
+\r
+ gp_cep_mgr->free_cid = 0;\r
+\r
+ /* Register for port PnP notifications. */\r
+ cl_memclr( &pnp_req, sizeof(pnp_req) );\r
+ pnp_req.pnp_class = IB_PNP_PORT;\r
+ pnp_req.pnp_context = &gp_cep_mgr->obj;\r
+ pnp_req.pfn_pnp_cb = __cep_pnp_cb;\r
+ status = ib_reg_pnp( gh_al, &pnp_req, &gp_cep_mgr->h_pnp );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_reg_pnp failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /*\r
+ * Leave the reference taken in init_al_obj oustanding since PnP\r
+ * deregistration is asynchronous. This replaces a call to ref and\r
+ * deref the object.\r
+ */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+/******************************************************************************\r
+* CEP manager API\r
+******************************************************************************/\r
+\r
+/* Called with the CEP and CEP manager locks held */\r
+static ib_api_status_t\r
+__cep_queue_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( !p_mad->p_next );\r
+\r
+ if( p_cep->state == CEP_STATE_DESTROY )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_STATE;\r
+ }\r
+\r
+ /* Queue this MAD for processing. */\r
+ if( p_cep->p_mad_head )\r
+ {\r
+ CL_ASSERT( p_cep->signalled );\r
+ /*\r
+ * If there's already a MAD at the head of the list, we will not\r
+ * invoke the callback. Just queue and exit.\r
+ */\r
+ CL_ASSERT( p_cep->p_mad_tail );\r
+ p_cep->p_mad_tail->p_next = p_mad;\r
+ p_cep->p_mad_tail = p_mad;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_PENDING;\r
+ }\r
+\r
+ p_cep->p_mad_head = p_mad;\r
+ p_cep->p_mad_tail = p_mad;\r
+\r
+ if( p_cep->signalled )\r
+ {\r
+ /* signalled was already non-zero. Don't invoke the callback again. */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_PENDING;\r
+ }\r
+\r
+ p_cep->signalled = TRUE;\r
+\r
+ /* Take a reference since we're about to invoke the callback. */\r
+ cl_atomic_inc( &p_cep->ref_cnt );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static inline void\r
+__cep_complete_irp(\r
+ IN kcep_t* const p_cep,\r
+ IN NTSTATUS status,\r
+ IN CCHAR increment )\r
+{\r
+ IRP *p_irp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_irp = InterlockedExchangePointer( &p_cep->p_irp, NULL );\r
+\r
+ if( p_irp )\r
+ {\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, NULL );\r
+#pragma warning(pop)\r
+\r
+ /* Complete the IRP. */\r
+ p_irp->IoStatus.Status = status;\r
+ p_irp->IoStatus.Information = 0;\r
+ IoCompleteRequest( p_irp, increment );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static inline void\r
+__process_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+ void *context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ /* Signal to the user there are callback waiting. */\r
+ if( p_cep->pfn_cb )\r
+ p_cep->pfn_cb( p_cep->p_cid->h_al, &p_cep->cep );\r
+ else\r
+ __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
+\r
+ pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
+ context = p_cep->cep.context;\r
+\r
+ /*\r
+ * Release the reference for the callback and invoke the destroy\r
+ * callback if necessary.\r
+ */\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
+ pfn_destroy_cb( context );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static uint32_t\r
+__calc_mad_timeout(\r
+ IN const uint8_t pkt_life )\r
+{\r
+ /*\r
+ * Calculate the retry timeout.\r
+ * All timeout values in micro seconds are expressed as 4.096 * 2^x,\r
+ * where x is the timeout. The formula to approximates this to\r
+ * milliseconds using just shifts and subtraction is:\r
+ * timeout_ms = 67 << (x - 14)\r
+ * The results are off by 0.162%.\r
+ *\r
+ * Note that we will never return less than 1 millisecond. We also\r
+ * trap exceedingly large values to prevent wrapping.\r
+ */\r
+ if( pkt_life > 39 )\r
+ return ~0UL;\r
+ if( pkt_life > 14 )\r
+ return 67 << (pkt_life - 14);\r
+ else if( pkt_life > 8 )\r
+ return 67 >> (14 - pkt_life);\r
+ else\r
+ return 1;\r
+}\r
+\r
+\r
+/* CEP manager lock is held when calling this function. */\r
+static kcep_t*\r
+__create_cep()\r
+{\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = ExAllocateFromNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
+ return NULL;\r
+ }\r
+\r
+ cl_memclr( p_cep, sizeof(kcep_t) );\r
+\r
+ KeInitializeTimer( &p_cep->timewait_timer );\r
+\r
+ p_cep->state = CEP_STATE_IDLE;\r
+\r
+ /*\r
+ * Pre-charge the reference count to 1. The code will invoke the\r
+ * destroy callback once the ref count reaches to zero.\r
+ */\r
+ p_cep->ref_cnt = 1;\r
+ p_cep->signalled = FALSE;\r
+\r
+ /* Find a free entry in the CID vector. */\r
+ p_cep->p_cid = __get_lcid( &p_cep->cep.cid );\r
+\r
+ if( !p_cep->p_cid )\r
+ {\r
+ ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to get CID.\n") );\r
+ return NULL;\r
+ }\r
+\r
+ p_cep->p_cid->modifier++;\r
+ /*\r
+ * We don't ever want a modifier of zero for the CID at index zero\r
+ * since it would result in a total CID of zero.\r
+ */\r
+ if( !p_cep->cep.cid && !p_cep->p_cid->modifier )\r
+ p_cep->p_cid->modifier++;\r
+\r
+ p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24);\r
+ p_cep->tid = p_cep->local_comm_id;\r
+\r
+ p_cep->p_cid->p_cep = p_cep;\r
+\r
+ ref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline void\r
+__bind_cep(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context )\r
+{\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cep->p_cid );\r
+ CL_ASSERT( h_al );\r
+\r
+ p_cep->p_cid->h_al = h_al;\r
+ p_cep->pfn_cb = pfn_cb;\r
+ p_cep->cep.context = context;\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+}\r
+\r
+\r
+static inline void\r
+__unbind_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cep->p_cid );\r
+ CL_ASSERT( p_cep->p_cid->h_al );\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &p_cep->p_cid->h_al->obj.lock );\r
+ cl_qlist_remove_item( &p_cep->p_cid->h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &p_cep->p_cid->h_al->obj.lock );\r
+\r
+ /*\r
+ * Set to the internal AL handle - it needs to be non-NULL to indicate it's\r
+ * a valid entry, and it can't be a user's AL instance to prevent using a\r
+ * destroyed CEP.\r
+ */\r
+ p_cep->p_cid->h_al = gh_al;\r
+#ifdef _DEBUG_\r
+ p_cep->pfn_cb = NULL;\r
+#endif /* _DEBUG_ */\r
+}\r
+\r
+\r
+static inline void\r
+__calc_timewait(\r
+ IN kcep_t* const p_cep )\r
+{\r
+\r
+ /*\r
+ * Use the CEP's stored packet lifetime to calculate the time at which\r
+ * the CEP exits timewait. Packet lifetime is expressed as\r
+ * 4.096 * 2^pkt_life microseconds, and we need a timeout in 100ns\r
+ * increments. The formual using just shifts and subtraction is this:\r
+ * timeout = (41943 << (pkt_life - 10));\r
+ * The results are off by .0001%, which should be more than adequate.\r
+ */\r
+ if( p_cep->max_2pkt_life > 10 )\r
+ {\r
+ p_cep->timewait_time.QuadPart =\r
+ -(41943i64 << (p_cep->max_2pkt_life - 10));\r
+ }\r
+ else\r
+ {\r
+ p_cep->timewait_time.QuadPart =\r
+ -(41943i64 >> (10 - p_cep->max_2pkt_life));\r
+ }\r
+ if( p_cep->target_ack_delay > 10 )\r
+ {\r
+ p_cep->timewait_time.QuadPart -=\r
+ (41943i64 << (p_cep->target_ack_delay - 10));\r
+ }\r
+ else\r
+ {\r
+ p_cep->timewait_time.QuadPart -=\r
+ (41943i64 >> (10 - p_cep->target_ack_delay));\r
+ }\r
+}\r
+\r
+\r
+/* Called with CEP manager and CEP locks held. */\r
+static inline void\r
+__insert_timewait(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cl_qlist_insert_tail( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
+\r
+ KeSetTimer( &p_cep->timewait_timer, p_cep->timewait_time, NULL );\r
+\r
+ /*\r
+ * Reset the timer for half of the shortest timeout - this results\r
+ * in a worst case timeout of 150% of timewait.\r
+ */\r
+ cl_timer_trim( &gp_cep_mgr->timewait_timer,\r
+ (uint32_t)(-p_cep->timewait_time.QuadPart / 20000) );\r
+}\r
+\r
+\r
+static inline ib_api_status_t\r
+__do_cep_rej(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ return IB_INSUFFICIENT_RESOURCES;\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ status = conn_rej_set_ari(\r
+ p_ari, ari_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ status = conn_rej_set_pdata(\r
+ p_pdata, pdata_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ __reject_mad( p_port_cep, p_cep, p_mad, rej_status );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_get_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN net16_t attr_id,\r
+ OUT cep_agent_t** const pp_port_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("__get_cep_agent failed.\n") );\r
+ return IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ __format_mad_av( *pp_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ __format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id );\r
+\r
+ *pp_port_cep = p_port_cep;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_dreq(\r
+ IN kcep_t* const p_cep, \r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_dreq_t *p_dreq;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
+\r
+ p_dreq->local_comm_id = p_cep->local_comm_id;\r
+ p_dreq->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ conn_dreq_set_remote_qpn( p_cep->remote_qpn, p_dreq );\r
+\r
+ /* copy optional data */\r
+ status = conn_dreq_set_pdata( p_pdata, pdata_len, p_dreq );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__dreq_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_agt;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_agt, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ status = __format_dreq( p_cep, NULL, 0, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ return __cep_send_retry( p_agt, p_cep, p_mad );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_drep(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT mad_cm_drep_t* const p_drep )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_drep->local_comm_id = p_cep->local_comm_id;\r
+ p_drep->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ /* copy optional data */\r
+ status = conn_drep_set_pdata( p_pdata, pdata_len, p_drep );\r
+\r
+ /* Store the RTU MAD so we can repeat it if we get a repeated DREP. */\r
+ if( status == IB_SUCCESS && p_drep != &p_cep->mads.drep )\r
+ p_cep->mads.drep = *p_drep;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__drep_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cep_agent_t *p_agt;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_agt, &p_mad ) != IB_SUCCESS )\r
+ return;\r
+\r
+ if( __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf )\r
+ != IB_SUCCESS )\r
+ {\r
+ return;\r
+ }\r
+\r
+ __cep_send_mad( p_agt, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/* Called with CEP manager lock held. */\r
+static int32_t\r
+__cleanup_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_mad_element_t *p_mad;\r
+ kcep_t *p_new_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ /* If we've already come through here, we're done. */\r
+ if( p_cep->state == CEP_STATE_DESTROY ||\r
+ p_cep->state == CEP_STATE_DREQ_DESTROY )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return -1;\r
+ }\r
+\r
+ /* Cleanup the pending MAD list. */\r
+ while( p_cep->p_mad_head )\r
+ {\r
+ p_mad = p_cep->p_mad_head;\r
+ p_cep->p_mad_head = p_mad->p_next;\r
+ p_mad->p_next = NULL;\r
+ if( p_mad->send_context1 )\r
+ {\r
+ p_new_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
+\r
+ __unbind_cep( p_new_cep );\r
+ __cleanup_cep( p_new_cep );\r
+ }\r
+ ib_put_mad( p_mad );\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ /* Reject the connection. */\r
+ __do_cep_rej( p_cep, IB_REJ_USER_DEFINED, NULL, 0, NULL, 0 );\r
+ break;\r
+\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel the send. */\r
+ CL_ASSERT( p_cep->h_mad_svc );\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ /* Reject the connection. */\r
+ __do_cep_rej( p_cep, IB_REJ_TIMEOUT, (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), NULL, 0 );\r
+ break;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ /* Disconnect the connection. */\r
+ if( __dreq_cep( p_cep ) != IB_SUCCESS )\r
+ break;\r
+ /* Fall through. */\r
+\r
+ case CEP_STATE_DREQ_SENT:\r
+ p_cep->state = CEP_STATE_DREQ_DESTROY;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+\r
+ case CEP_STATE_DREQ_RCVD:\r
+ /* Send the DREP. */\r
+ __drep_cep( p_cep );\r
+ break;\r
+\r
+ case CEP_STATE_SREQ_RCVD:\r
+ /* TODO: Reject the SIDR request. */\r
+ break;\r
+\r
+ case CEP_STATE_LISTEN:\r
+ /* Remove from listen map. */\r
+ cl_rbmap_remove_item( &gp_cep_mgr->listen_map, &p_cep->listen_item );\r
+ break;\r
+\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR, ("CEP in state %d.\n", p_cep->state) );\r
+ case CEP_STATE_TIMEWAIT:\r
+ /* Already in timewait - so all is good. */\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ __insert_timewait( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+}\r
+\r
+\r
+static void\r
+__destroy_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT(\r
+ p_cep->cep.cid < cl_vector_get_size( &gp_cep_mgr->cid_vector ) );\r
+\r
+ CL_ASSERT( p_cep->p_cid == (cep_cid_t*)cl_vector_get_ptr(\r
+ &gp_cep_mgr->cid_vector, p_cep->cep.cid ) );\r
+\r
+ /* Free the CID. */\r
+ p_cep->p_cid->p_cep = (kcep_t*)(uintn_t)gp_cep_mgr->free_cid;\r
+ p_cep->p_cid->h_al = NULL;\r
+ gp_cep_mgr->free_cid = p_cep->cep.cid;\r
+\r
+ KeCancelTimer( &p_cep->timewait_timer );\r
+\r
+ ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
+\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cid );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __create_cep();\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ __bind_cep( p_cep, h_al, pfn_cb, context );\r
+\r
+ *p_cid = p_cep->cep.cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ void *context;\r
+ int32_t ref_cnt;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ /*\r
+ * Remove the CEP from the CID vector - no further API calls\r
+ * will succeed for it.\r
+ */\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ /* Invalid handle. */\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ context = p_cep->cep.context;\r
+ p_cep->pfn_destroy_cb = pfn_destroy_cb;\r
+\r
+ /* Cancel any queued IRP */\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
+\r
+ __unbind_cep( p_cep );\r
+ ref_cnt = __cleanup_cep( p_cep );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ /*\r
+ * Done waiting. Release the reference so the timewait timer callback\r
+ * can finish cleaning up.\r
+ */\r
+ if( !ref_cnt && pfn_destroy_cb )\r
+ pfn_destroy_cb( context );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep, *p_listen;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+ intn_t cmp;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_listen_info );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ break;\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ goto done;\r
+ }\r
+\r
+ /* Insert the CEP into the listen map. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+\r
+ p_listen = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
+\r
+ if( p_listen_info->svc_id == p_listen->sid )\r
+ goto port_cmp;\r
+ \r
+ if( p_listen_info->svc_id < p_listen->sid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+\r
+ continue;\r
+\r
+port_cmp:\r
+ if( p_listen_info->port_guid != IB_ALL_PORTS )\r
+ {\r
+ if( p_listen_info->port_guid == p_listen->port_guid )\r
+ goto pdata_cmp;\r
+ \r
+ if( p_listen_info->port_guid < p_listen->port_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+\r
+ continue;\r
+ }\r
+\r
+pdata_cmp:\r
+ /*\r
+ * If an existing listen doesn't have a compare buffer,\r
+ * then we found a duplicate.\r
+ */\r
+ if( !p_listen->p_cmp_buf )\r
+ break;\r
+\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ /* Compare length must match. */\r
+ if( p_listen_info->cmp_len != p_listen->cmp_len )\r
+ break;\r
+\r
+ /* Compare offset must match. */\r
+ if( p_listen_info->cmp_offset != p_listen->cmp_offset )\r
+ break;\r
+\r
+ cmp = cl_memcmp( &p_listen_info->p_cmp_buf,\r
+ p_listen->p_cmp_buf, p_listen->cmp_len );\r
+\r
+ if( cmp < 0 )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( cmp > 0 )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ break;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Svc ID match but compare buffer mismatch.\n") );\r
+ continue;\r
+ }\r
+ }\r
+\r
+ if( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ /* Duplicate!!! */\r
+ status = IB_INVALID_SETTING;\r
+ goto done;\r
+ }\r
+\r
+ /* Set up the CEP. */\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ p_cep->p_cmp_buf = cl_malloc( p_listen_info->cmp_len );\r
+ if( !p_cep->p_cmp_buf )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Failed to allocate compare buffer.\n") );\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto done;\r
+ }\r
+\r
+ cl_memcpy( p_cep->p_cmp_buf,\r
+ p_listen_info->p_cmp_buf, p_listen_info->cmp_len );\r
+ }\r
+ p_cep->cmp_len = p_listen_info->cmp_len;\r
+ p_cep->cmp_offset = p_listen_info->cmp_offset;\r
+ p_cep->sid = p_listen_info->svc_id;\r
+ p_cep->port_guid = p_listen_info->port_guid;\r
+ p_cep->state = CEP_STATE_LISTEN;\r
+\r
+ cl_rbmap_insert( &gp_cep_mgr->listen_map, p_insert_at,\r
+ &p_cep->listen_item, left );\r
+\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static cep_agent_t*\r
+__format_path_av(\r
+ IN const ib_path_rec_t* const p_path,\r
+ OUT kcep_av_t* const p_av )\r
+{\r
+ cep_agent_t* p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_path );\r
+ CL_ASSERT( p_av );\r
+\r
+ cl_memclr( p_av, sizeof(kcep_av_t) );\r
+\r
+ p_port_cep = __find_port_cep( &p_path->sgid, p_path->slid,\r
+ p_path->pkey, &p_av->pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+ }\r
+\r
+ p_av->port_guid = p_port_cep->port_guid;\r
+\r
+ p_av->attr.port_num = p_port_cep->port_num;\r
+\r
+ p_av->attr.sl = ib_path_rec_sl( p_path );\r
+ p_av->attr.dlid = p_path->dlid;\r
+\r
+ p_av->attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, p_path->tclass, ib_path_rec_flow_lbl( p_path ) );\r
+ p_av->attr.grh.hop_limit = ib_path_rec_hop_limit( p_path );\r
+ p_av->attr.grh.src_gid = p_path->sgid;\r
+ p_av->attr.grh.dest_gid = p_path->dgid;\r
+\r
+ p_av->attr.grh_valid = !ib_gid_is_link_local( &p_path->dgid );\r
+\r
+ p_av->attr.static_rate = ib_path_rec_rate( p_path );\r
+ p_av->attr.path_bits = (uint8_t)(p_path->slid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users.\r
+ */\r
+ p_av->attr.conn.path_mtu = p_path->mtu;\r
+ p_av->attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ ib_path_rec_pkt_life( p_path ) + 1, 0 );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_port_cep;\r
+}\r
+\r
+\r
+/*\r
+ * Formats a REQ mad's path information given a path record.\r
+ */\r
+static void\r
+__format_req_path(\r
+ IN const ib_path_rec_t* const p_path,\r
+ IN const uint8_t ack_delay,\r
+ OUT req_path_info_t* const p_req_path )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_req_path->local_lid = p_path->slid;\r
+ p_req_path->remote_lid = p_path->dlid;\r
+ p_req_path->local_gid = p_path->sgid;\r
+ p_req_path->remote_gid = p_path->dgid;\r
+\r
+ conn_req_path_set_flow_lbl( ib_path_rec_flow_lbl( p_path ),\r
+ p_req_path );\r
+ conn_req_path_set_pkt_rate( ib_path_rec_rate( p_path ),\r
+ p_req_path );\r
+\r
+ /* Traffic class & hop limit */\r
+ p_req_path->traffic_class = p_path->tclass;\r
+ p_req_path->hop_limit = ib_path_rec_hop_limit( p_path );\r
+\r
+ /* SL & Subnet Local fields */\r
+ conn_req_path_set_svc_lvl( ib_path_rec_sl( p_path ),\r
+ p_req_path );\r
+ conn_req_path_set_subn_lcl(\r
+ ib_gid_is_link_local( &p_path->dgid ), p_req_path );\r
+\r
+ conn_req_path_set_lcl_ack_timeout(\r
+ calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_path ) + 1,\r
+ ack_delay ), p_req_path );\r
+\r
+ conn_req_path_clr_rsvd_fields( p_req_path );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_req(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_req_t* p_req;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cm_req );\r
+ CL_ASSERT( p_cep->p_mad );\r
+\r
+ /* Format the MAD header. */\r
+ __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REQ_ATTR_ID );\r
+\r
+ /* Set the addressing information in the MAD. */\r
+ __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_req = (mad_cm_req_t*)p_cep->p_mad->p_mad_buf;\r
+\r
+ ci_ca_lock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
+ /*\r
+ * Store the local CA's ack timeout for use when computing\r
+ * the local ACK timeout.\r
+ */\r
+ p_cep->local_ack_delay =\r
+ p_cm_req->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay;\r
+ ci_ca_unlock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
+\r
+ /* Format the primary path. */\r
+ __format_req_path( p_cm_req->p_primary_path,\r
+ p_cep->local_ack_delay, &p_req->primary_path );\r
+\r
+ if( p_cm_req->p_alt_path )\r
+ {\r
+ /* Format the alternate path. */\r
+ __format_req_path( p_cm_req->p_alt_path,\r
+ p_cep->local_ack_delay, &p_req->alternate_path );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_req->alternate_path, sizeof(req_path_info_t) );\r
+ }\r
+\r
+ /* Set the local communication in the REQ. */\r
+ p_req->local_comm_id = p_cep->local_comm_id;\r
+ p_req->sid = p_cm_req->svc_id;\r
+ p_req->local_ca_guid = p_cm_req->h_qp->obj.p_ci_ca->verbs.guid;\r
+\r
+ conn_req_set_lcl_qpn( p_cep->local_qpn, p_req );\r
+ conn_req_set_resp_res( p_cm_req->resp_res, p_req );\r
+ conn_req_set_init_depth( p_cm_req->init_depth, p_req );\r
+ conn_req_set_remote_resp_timeout( p_cm_req->remote_resp_timeout, p_req );\r
+ conn_req_set_qp_type( p_cm_req->h_qp->type, p_req );\r
+ conn_req_set_flow_ctrl( p_cm_req->flow_ctrl, p_req );\r
+ conn_req_set_starting_psn( p_cep->rq_psn, p_req );\r
+\r
+ conn_req_set_lcl_resp_timeout( p_cm_req->local_resp_timeout, p_req );\r
+ conn_req_set_retry_cnt( p_cm_req->retry_cnt, p_req );\r
+\r
+ p_req->pkey = p_cm_req->p_primary_path->pkey;\r
+\r
+ conn_req_set_mtu( ib_path_rec_mtu( p_cm_req->p_primary_path ), p_req );\r
+ conn_req_set_rnr_retry_cnt( p_cm_req->rnr_retry_cnt, p_req );\r
+\r
+ conn_req_set_max_cm_retries( p_cm_req->max_cm_retries, p_req );\r
+ status = conn_req_set_pdata(\r
+ p_cm_req->p_req_pdata, p_cm_req->req_length, p_req );\r
+\r
+ conn_req_clr_rsvd_fields( p_req );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__save_user_req(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT cep_agent_t** const pp_port_cep )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_req->p_primary_path )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid primary path record.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->sid = p_cm_req->svc_id;\r
+\r
+ p_cep->idx_primary = 0;\r
+\r
+ p_cep->p2p = (p_cm_req->pfn_cm_req_cb != NULL);\r
+\r
+ if( p_cm_req->p_compare_buffer )\r
+ {\r
+ if( !p_cm_req->compare_length ||\r
+ (p_cm_req->compare_offset + p_cm_req->compare_length) >\r
+ IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+ p_cep->p_cmp_buf = cl_malloc( p_cm_req->compare_length );\r
+ if( !p_cep->p_cmp_buf )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ cl_memcpy( p_cep->p_cmp_buf,\r
+ p_cm_req->p_compare_buffer, p_cm_req->compare_length );\r
+\r
+ p_cep->cmp_len = p_cm_req->compare_length;\r
+ p_cep->cmp_offset = p_cm_req->compare_offset;\r
+ }\r
+ else\r
+ {\r
+ p_cep->p_cmp_buf = NULL;\r
+ p_cep->cmp_len = 0;\r
+ p_cep->cmp_offset = 0;\r
+ }\r
+ p_cep->was_active = TRUE;\r
+\r
+ /* Validate the primary path. */\r
+ p_port_cep = __format_path_av( p_cm_req->p_primary_path, &p_cep->av[0] );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Primary path unrealizable.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->av[0].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
+\r
+ /* Make sure the paths will work on the desired QP. */\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid !=\r
+ p_cm_req->h_qp->obj.p_ci_ca->verbs.guid )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Primary path not realizable on given QP.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
+\r
+ *pp_port_cep = p_port_cep;\r
+\r
+ /*\r
+ * Store the PKEY so we can ensure that alternate paths are\r
+ * on the same partition.\r
+ */\r
+ p_cep->pkey = p_cm_req->p_primary_path->pkey;\r
+ \r
+ p_cep->max_2pkt_life = ib_path_rec_pkt_life( p_cm_req->p_primary_path ) + 1;\r
+\r
+ if( p_cm_req->p_alt_path )\r
+ {\r
+ /* MTUs must match since they are specified only once. */\r
+ if( ib_path_rec_mtu( p_cm_req->p_primary_path ) !=\r
+ ib_path_rec_mtu( p_cm_req->p_alt_path ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Mismatched primary and alternate path MTUs.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ /* The PKEY must match too. */\r
+ if( p_cm_req->p_alt_path->pkey != p_cm_req->p_primary_path->pkey )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Mismatched pimary and alternate PKEYs.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_port_cep =\r
+ __format_path_av( p_cm_req->p_alt_path, &p_cep->av[1] );\r
+ if( p_port_cep &&\r
+ p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ /* Alternate path is not on same CA. */\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Alternate path unrealizable.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->av[1].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
+\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
+ (ib_path_rec_pkt_life( p_cm_req->p_alt_path ) + 1) );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+ }\r
+\r
+ p_cep->p_cid->modifier++;\r
+ /*\r
+ * We don't ever want a modifier of zero for the CID at index zero\r
+ * since it would result in a total CID of zero.\r
+ */\r
+ if( !p_cep->cep.cid && !p_cep->p_cid->modifier )\r
+ p_cep->p_cid->modifier++;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24);\r
+ p_cep->remote_comm_id = 0;\r
+\r
+ /* Cache the local QPN. */\r
+ p_cep->local_qpn = p_cm_req->h_qp->num;\r
+ p_cep->remote_ca_guid = 0;\r
+ p_cep->remote_qpn = 0;\r
+\r
+ /* Retry timeout is remote CM response timeout plus 2 * packet life. */\r
+ p_cep->retry_timeout = __calc_mad_timeout( p_cep->max_2pkt_life ) +\r
+ __calc_mad_timeout( p_cm_req->remote_resp_timeout );\r
+ \r
+\r
+ /* Store the retry count. */\r
+ p_cep->max_cm_retries = p_cm_req->max_cm_retries;\r
+\r
+ /*\r
+ * Clear the maximum packet lifetime, used to calculate timewait.\r
+ * It will be set when we transition into the established state.\r
+ */\r
+ p_cep->timewait_time.QuadPart = 0;\r
+\r
+ p_cep->rq_psn = p_cep->local_qpn;\r
+\r
+ p_cep->rnr_nak_timeout = p_cm_req->rnr_nak_timeout;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_req );\r
+ CL_ASSERT( p_init );\r
+\r
+ /* TODO: Code P2P support. */\r
+ if( p_cm_req->pfn_cm_req_cb )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ status = __save_user_req( p_cep, p_cm_req, &p_port_cep );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status =\r
+ ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_req( p_cep, p_cm_req );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid pdata length.\n") );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ /* Format the INIT qp modify attributes. */\r
+ p_init->req_state = IB_QPS_INIT;\r
+ p_init->state.init.primary_port =\r
+ p_cep->av[p_cep->idx_primary].attr.port_num;\r
+ p_init->state.init.qkey = 0;\r
+ p_init->state.init.pkey_index =\r
+ p_cep->av[p_cep->idx_primary].pkey_index;\r
+ p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
+\r
+ p_cep->state = CEP_STATE_PRE_REQ;\r
+ break;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ status = IB_QP_IN_TIMEWAIT;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ status = IB_INVALID_SETTING;\r
+ }\r
+ else\r
+ {\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
+\r
+ if( status == IB_SUCCESS )\r
+ p_cep->state = CEP_STATE_REQ_SENT;\r
+ else\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__save_user_rep(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Cache the local QPN. */\r
+ p_cep->local_qpn = p_cm_rep->h_qp->num;\r
+ p_cep->rq_psn = p_cep->local_qpn;\r
+ p_cep->init_depth = p_cm_rep->init_depth;\r
+\r
+ ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+ /* Check the CA's responder resource max and trim if necessary. */\r
+ if( (p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res <\r
+ p_cep->req_init_depth) )\r
+ {\r
+ /*\r
+ * The CA cannot handle the requested responder resources.\r
+ * Set the response to the CA's maximum.\r
+ */\r
+ p_cep->resp_res = \r
+ p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res;\r
+ }\r
+ else\r
+ {\r
+ /* The CA supports the requested responder resources. */\r
+ p_cep->resp_res = p_cep->req_init_depth;\r
+ }\r
+ ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+\r
+ p_cep->rnr_nak_timeout = p_cm_rep->rnr_nak_timeout;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_rep(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rep_t *p_rep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cm_rep );\r
+ CL_ASSERT( p_cep->p_mad );\r
+\r
+ /* Format the MAD header. */\r
+ __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REP_ATTR_ID );\r
+\r
+ /* Set the addressing information in the MAD. */\r
+ __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_rep = (mad_cm_rep_t*)p_cep->p_mad->p_mad_buf;\r
+\r
+ p_rep->local_comm_id = p_cep->local_comm_id;\r
+ p_rep->remote_comm_id = p_cep->remote_comm_id;\r
+ conn_rep_set_lcl_qpn( p_cep->local_qpn, p_rep );\r
+ conn_rep_set_starting_psn( p_cep->rq_psn, p_rep );\r
+\r
+ if( p_cm_rep->failover_accepted != IB_FAILOVER_ACCEPT_SUCCESS )\r
+ {\r
+ /*\r
+ * Failover rejected - clear the alternate AV information.\r
+ * Note that at this point, the alternate is always at index 1.\r
+ */\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+ }\r
+ else if( !p_cep->av[1].port_guid )\r
+ {\r
+ /*\r
+ * Always reject alternate path if it's zero. We might\r
+ * have cleared the AV because it was unrealizable when\r
+ * processing the REQ.\r
+ */\r
+ conn_rep_set_failover( IB_FAILOVER_ACCEPT_ERROR, p_rep );\r
+ }\r
+ else\r
+ {\r
+ conn_rep_set_failover( p_cm_rep->failover_accepted, p_rep );\r
+ }\r
+\r
+ p_rep->resp_resources = p_cep->resp_res;\r
+\r
+ ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+ conn_rep_set_target_ack_delay(\r
+ p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay, p_rep );\r
+ ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+\r
+ p_rep->initiator_depth = p_cep->init_depth;\r
+\r
+ conn_rep_set_e2e_flow_ctl( p_cm_rep->flow_ctrl, p_rep );\r
+\r
+ conn_rep_set_rnr_retry_cnt(\r
+ (uint8_t)(p_cm_rep->rnr_retry_cnt & 0x07), p_rep );\r
+\r
+ /* Local CA guid should have been set when processing the received REQ. */\r
+ CL_ASSERT( p_cep->local_ca_guid );\r
+ p_rep->local_ca_guid = p_cep->local_ca_guid;\r
+\r
+ status = conn_rep_set_pdata(\r
+ p_cm_rep->p_rep_pdata, p_cm_rep->rep_length, p_rep );\r
+\r
+ conn_rep_clr_rsvd_fields( p_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_rep );\r
+ CL_ASSERT( p_init );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ CL_ASSERT( !p_cep->p_mad );\r
+ status =\r
+ __cep_get_mad( p_cep, CM_REP_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ __save_user_rep( p_cep, p_cm_rep );\r
+\r
+ status = __format_rep( p_cep, p_cm_rep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ /* Format the INIT qp modify attributes. */\r
+ p_init->req_state = IB_QPS_INIT;\r
+ p_init->state.init.primary_port =\r
+ p_cep->av[p_cep->idx_primary].attr.port_num;\r
+ p_init->state.init.qkey = 0;\r
+ p_init->state.init.pkey_index =\r
+ p_cep->av[p_cep->idx_primary].pkey_index;\r
+ p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
+\r
+ p_cep->cep.context = context;\r
+\r
+ /* Just OR in the PREP bit into the state. */\r
+ p_cep->state |= CEP_STATE_PREP;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+ else\r
+ {\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
+ if( status == IB_SUCCESS )\r
+ {\r
+ p_cep->state = CEP_STATE_REP_SENT;\r
+ }\r
+ else\r
+ {\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static inline ib_api_status_t\r
+__format_rtu(\r
+ IN kcep_t* const p_cep, \r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rtu_t *p_rtu;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
+\r
+ p_rtu->local_comm_id = p_cep->local_comm_id;\r
+ p_rtu->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ /* copy optional data */\r
+ status = conn_rtu_set_pdata( p_pdata, pdata_len, p_rtu );\r
+\r
+ /* Store the RTU MAD so we can repeat it if we get a repeated REP. */\r
+ if( status == IB_SUCCESS )\r
+ p_cep->mads.rtu = *p_rtu;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ status = __cep_get_mad( p_cep, CM_RTU_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_rtu( p_cep, p_pdata, pdata_len, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ /* Update the timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ /* Send failures will get another chance if we receive a repeated REP. */\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ status = __do_cep_rej(\r
+ p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ status = __do_cep_rej(\r
+ p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_mra(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t msg_mraed,\r
+ IN const ib_cm_mra_t* const p_cm_mra,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_mra_t *p_mra;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
+\r
+ conn_mra_set_msg_mraed( msg_mraed, p_mra );\r
+\r
+ p_mra->local_comm_id = p_cep->local_comm_id;\r
+ p_mra->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ conn_mra_set_svc_timeout( p_cm_mra->svc_timeout, p_mra );\r
+ status = conn_mra_set_pdata(\r
+ p_cm_mra->p_mra_pdata, p_cm_mra->mra_length, p_mra );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+ conn_mra_clr_rsvd_fields( p_mra );\r
+\r
+ /* Save the MRA so we can repeat it if we get a repeated message. */\r
+ p_cep->mads.mra = *p_mra;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+ uint8_t msg_mraed;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_mra );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_PRE_REP:\r
+ msg_mraed = 0;\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ msg_mraed = 1;\r
+ break;\r
+\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_LAP_RCVD:\r
+ msg_mraed = 2;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ goto done;\r
+ }\r
+\r
+ status = __cep_get_mad( p_cep, CM_MRA_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ goto done;\r
+\r
+ status = __format_mra( p_cep, msg_mraed, p_cm_mra, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ goto done;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_MRA;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+static ib_api_status_t\r
+__format_lap(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_lap_t* const p_cm_lap,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_lap_t *p_lap;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_LAP_ATTR_ID );\r
+\r
+ __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
+\r
+ p_lap->alternate_path.local_lid = p_cm_lap->p_alt_path->slid;\r
+ p_lap->alternate_path.remote_lid = p_cm_lap->p_alt_path->dlid;\r
+ p_lap->alternate_path.local_gid = p_cm_lap->p_alt_path->sgid;\r
+ p_lap->alternate_path.remote_gid = p_cm_lap->p_alt_path->dgid;\r
+\r
+ /* Set Flow Label and Packet Rate */\r
+ conn_lap_path_set_flow_lbl(\r
+ ib_path_rec_flow_lbl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+ conn_lap_path_set_tclass(\r
+ p_cm_lap->p_alt_path->tclass, &p_lap->alternate_path );\r
+\r
+ p_lap->alternate_path.hop_limit =\r
+ ib_path_rec_hop_limit( p_cm_lap->p_alt_path );\r
+ conn_lap_path_set_pkt_rate(\r
+ ib_path_rec_rate( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+\r
+ /* Set SL and Subnet Local */\r
+ conn_lap_path_set_svc_lvl(\r
+ ib_path_rec_sl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+ conn_lap_path_set_subn_lcl(\r
+ ib_gid_is_link_local( &p_cm_lap->p_alt_path->dgid ),\r
+ &p_lap->alternate_path );\r
+\r
+ conn_lap_path_set_lcl_ack_timeout(\r
+ calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1,\r
+ p_cep->local_ack_delay), &p_lap->alternate_path );\r
+\r
+ conn_lap_path_clr_rsvd_fields( &p_lap->alternate_path );\r
+\r
+ p_lap->local_comm_id = p_cep->local_comm_id;\r
+ p_lap->remote_comm_id = p_cep->remote_comm_id;\r
+ conn_lap_set_remote_qpn( p_cep->remote_qpn, p_lap );\r
+ conn_lap_set_resp_timeout( p_cm_lap->remote_resp_timeout, p_lap );\r
+\r
+ status = conn_lap_set_pdata(\r
+ p_cm_lap->p_lap_pdata, p_cm_lap->lap_length, p_lap );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("lap pdata invalid.\n") );\r
+ return status;\r
+ }\r
+\r
+ conn_lap_clr_rsvd_fields( p_lap );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_lap );\r
+ CL_ASSERT( p_cm_lap->p_alt_path );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ if( !p_cep->was_active )\r
+ {\r
+ /* Only the side that took the active role can initialte a LAP. */\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Only the active side of a connection can initiate a LAP.\n") );\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+\r
+ /*\r
+ * Format the AV information - store in the temporary location until we\r
+ * get the APR indicating acceptance.\r
+ */\r
+ p_port_cep = __format_path_av( p_cm_lap->p_alt_path, &p_cep->alt_av );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR, ("Alternate path invalid!\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+ }\r
+\r
+ p_cep->alt_av.attr.conn.seq_err_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
+ p_cep->alt_av.attr.conn.rnr_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
+\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Alternate CA GUID different from current!\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+ }\r
+\r
+ /* Store the alternate path info temporarilly. */\r
+ p_cep->alt_2pkt_life = ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1;\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_lap( p_cep, p_cm_lap, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_mad );\r
+ if( status == IB_SUCCESS )\r
+ p_cep->state = CEP_STATE_LAP_SENT;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_apr(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_apr_t *p_apr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
+\r
+ p_apr->local_comm_id = p_cep->local_comm_id;\r
+ p_apr->remote_comm_id = p_cep->remote_comm_id;\r
+ p_apr->status = p_cm_apr->apr_status;\r
+\r
+ status = conn_apr_set_apr_info( p_cm_apr->p_info->data,\r
+ p_cm_apr->info_length, p_apr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("apr_info invalid\n") );\r
+ return status;\r
+ }\r
+\r
+ status = conn_apr_set_pdata( p_cm_apr->p_apr_pdata,\r
+ p_cm_apr->apr_length, p_apr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("apr pdata invalid\n") );\r
+ return status;\r
+ }\r
+\r
+ conn_apr_clr_rsvd_fields( p_apr );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_apr );\r
+ CL_ASSERT( p_apr );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ CL_ASSERT( !p_cep->p_mad );\r
+ status = __cep_get_mad( p_cep, CM_APR_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_apr( p_cep, p_cm_apr, p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ if( !p_cm_apr->apr_status )\r
+ {\r
+ /*\r
+ * Copy the temporary AV and port GUID information into\r
+ * the alternate path.\r
+ */\r
+ p_cep->av[((p_cep->idx_primary + 1) & 0x1)] = p_cep->alt_av;\r
+\r
+ /* Update our maximum packet lifetime. */\r
+ p_cep->max_2pkt_life =\r
+ max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
+\r
+ /* Update our timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ /* Fill in the QP attributes. */\r
+ cl_memclr( p_apr, sizeof(ib_qp_mod_t) );\r
+ p_apr->req_state = IB_QPS_RTS;\r
+ p_apr->state.rts.opts =\r
+ IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
+ p_apr->state.rts.alternate_av = p_cep->alt_av.attr;\r
+ p_apr->state.rts.apm_state = IB_APM_REARM;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_PREP;\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+ else\r
+ {\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ __cep_send_mad( p_port_cep, p_cep->p_mad );\r
+ status = IB_SUCCESS;\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata,\r
+ IN const uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__format_dreq returned %s.\n", ib_get_err_str( status )) );\r
+ break;\r
+ }\r
+\r
+ if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS )\r
+ {\r
+ p_cep->state = CEP_STATE_DREQ_SENT;\r
+ }\r
+ else\r
+ {\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_drep );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_DREQ_RCVD:\r
+ status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_drep( p_cep, p_cm_drep->p_drep_pdata,\r
+ p_cm_drep->drep_length, (mad_cm_drep_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_migrate(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_cep->idx_primary++;\r
+ p_cep->idx_primary &= 0x1;\r
+ status = IB_SUCCESS;\r
+ break;\r
+ }\r
+\r
+ AL_TRACE( AL_DBG_ERROR, ("No alternate path avaialble.\n") );\r
+\r
+ /* Fall through. */\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_established(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_rtr );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_ESTABLISHED:\r
+ cl_memclr( p_rtr, sizeof(ib_qp_mod_t) );\r
+ p_rtr->req_state = IB_QPS_RTR;\r
+\r
+ /* Required params. */\r
+ p_rtr->state.rtr.rq_psn = p_cep->rq_psn;\r
+ p_rtr->state.rtr.dest_qp = p_cep->remote_qpn;\r
+ p_rtr->state.rtr.primary_av = p_cep->av[p_cep->idx_primary].attr;\r
+ p_rtr->state.rtr.resp_res = p_cep->resp_res;\r
+ p_rtr->state.rtr.rnr_nak_timeout = p_cep->rnr_nak_timeout;\r
+\r
+ /* Optional params. */\r
+ p_rtr->state.rtr.opts = 0;\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_rtr->state.rtr.opts |= IB_MOD_QP_ALTERNATE_AV;\r
+ p_rtr->state.rtr.alternate_av =\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
+ }\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_rts );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ case CEP_STATE_ESTABLISHED:\r
+ cl_memclr( p_rts, sizeof(ib_qp_mod_t) );\r
+ p_rts->req_state = IB_QPS_RTS;\r
+\r
+ /* Required params. */\r
+ p_rts->state.rts.sq_psn = p_cep->sq_psn;\r
+ p_rts->state.rts.retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
+ p_rts->state.rts.rnr_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
+ p_rts->state.rts.local_ack_timeout =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.local_ack_timeout;\r
+ p_rts->state.rts.init_depth = p_cep->init_depth;\r
+\r
+ /* Optional params. */\r
+ p_rts->state.rts.opts = 0;\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_rts->state.rts.opts =\r
+ IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
+ p_rts->state.rts.apm_state = IB_APM_REARM;\r
+ p_rts->state.rts.alternate_av =\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
+ }\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ *p_timewait_us = p_cep->timewait_time.QuadPart / 10;\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_new_cep );\r
+ CL_ASSERT( pp_mad );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cep->p_mad_head )\r
+ {\r
+ p_cep->signalled = FALSE;\r
+ status = IB_NOT_DONE;\r
+ goto done;\r
+ }\r
+\r
+ /* Set the MAD. */\r
+ *pp_mad = p_cep->p_mad_head;\r
+ p_cep->p_mad_head = p_cep->p_mad_head->p_next;\r
+ (*pp_mad)->p_next = NULL;\r
+\r
+ /* We're done with the input CEP. Reuse the variable */\r
+ p_cep = (kcep_t* __ptr64)(*pp_mad)->send_context1;\r
+ if( p_cep )\r
+ {\r
+ *p_new_cep = p_cep->cep;\r
+ }\r
+ else\r
+ {\r
+ p_new_cep->context = NULL;\r
+ p_new_cep->cid = AL_INVALID_CID;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__cep_cancel_irp(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN IRP* p_irp )\r
+{\r
+ net32_t cid;\r
+ ib_al_handle_t h_al;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_dev_obj );\r
+ CL_ASSERT( p_irp );\r
+\r
+ cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0];\r
+ h_al = (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1];\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( p_cep )\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+NTSTATUS\r
+al_cep_queue_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_irp )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_irp );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return STATUS_INVALID_PARAMETER;\r
+ }\r
+\r
+ /*\r
+ * Store the CID an AL handle in the IRP's driver context\r
+ * so we can cancel it.\r
+ */\r
+ p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
+ p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al;\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, __cep_cancel_irp );\r
+#pragma warning(pop)\r
+ IoMarkIrpPending( p_irp );\r
+\r
+ /* Always dequeue and complete whatever IRP is there. */\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
+\r
+ InterlockedExchangePointer( &p_cep->p_irp, p_irp );\r
+\r
+ /* Complete the IRP if there are MADs to be reaped. */\r
+ if( p_cep->p_mad_head )\r
+ __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return STATUS_PENDING;\r
+}\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al )\r
+{\r
+ cl_list_item_t *p_item;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Destroy all CEPs associated with the input instance of AL. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ for( p_item = cl_qlist_head( &h_al->cep_list );\r
+ p_item != cl_qlist_end( &h_al->cep_list );\r
+ p_item = cl_qlist_head( &h_al->cep_list ) )\r
+ {\r
+ /*\r
+ * Note that we don't walk the list - we can't hold the AL\r
+ * lock when cleaning up its CEPs because the cleanup path\r
+ * takes the CEP's lock. We always want to take the CEP\r
+ * before the AL lock to prevent any possibilities of deadlock.\r
+ *\r
+ * So we just get the CID, and then release the AL lock and try to\r
+ * destroy. This should unbind the CEP from the AL instance and\r
+ * remove it from the list, allowing the next CEP to be cleaned up\r
+ * in the next pass through.\r
+ */\r
+ cid = PARENT_STRUCT( p_item, kcep_t, al_item )->cep.cid;\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ }\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
IN al_dev_open_context_t *p_context )\r
{\r
al_handle_t *p_h;\r
- ib_cm_handle_t h_cm;\r
- union _u\r
- {\r
- ib_cm_rej_t cm_rej;\r
- ib_cm_drep_t cm_drep;\r
- ib_cm_apr_t cm_apr;\r
- } u;\r
size_t i;\r
- uint32_t cm_subtype;\r
\r
CL_ENTER( AL_DBG_DEV, g_al_dbg_lvl );\r
\r
al_hdl_free( p_context->h_al, i );\r
break;\r
\r
- case AL_OBJ_TYPE_H_CONN:\r
- h_cm = (ib_cm_handle_t)p_h->p_obj;\r
- cm_subtype = AL_SUBTYPE( p_h->type );\r
- al_hdl_free( p_context->h_al, i );\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- switch( cm_subtype )\r
- {\r
- case AL_OBJ_SUBTYPE_REQ:\r
- case AL_OBJ_SUBTYPE_REP:\r
- /* Reject any outstanding connections. */\r
- cl_memclr( &u.cm_rej, sizeof( ib_cm_rej_t ) );\r
- u.cm_rej.rej_status = IB_REJ_TIMEOUT;\r
- ib_cm_rej( h_cm, &u.cm_rej );\r
- break;\r
-\r
- case AL_OBJ_SUBTYPE_DREQ:\r
- /* Issue a disconnect reply to any requests. */\r
- cl_memclr( &u.cm_drep, sizeof( ib_cm_drep_t ) );\r
- ib_cm_drep( h_cm, &u.cm_drep );\r
- break;\r
-\r
- case AL_OBJ_SUBTYPE_LAP:\r
- /* Reject the LAP. */\r
- cl_memclr( &u.cm_apr, sizeof( ib_cm_apr_t ) );\r
- u.cm_apr.apr_status = IB_AP_REJECT;\r
- ib_cm_apr( h_cm, &u.cm_apr );\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- break;\r
-\r
case AL_OBJ_TYPE_H_SA_REQ:\r
al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj );\r
break;\r
cl_status = proxy_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
cl_status = verbs_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = cm_ioctl( h_ioctl, &ret_bytes );\r
+ //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
+ // cl_status = cm_ioctl( h_ioctl, &ret_bytes );\r
+ else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
+ cl_status = cep_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
cl_status = al_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
*********/\r
\r
\r
+#pragma warning(disable:4324)\r
typedef struct _iou_ioc\r
{\r
cl_map_item_t map_item;\r
iou_node_t *p_iou;\r
uint8_t slot;\r
- uint8_t pad[7]; /* Align IOC profile on 64-bit boundary */\r
ib_ioc_profile_t profile;\r
uint8_t num_valid_entries;\r
ib_svc_entry_t *p_svc_entries;\r
\r
} iou_ioc_t;\r
+#pragma warning(default:4324)\r
\r
\r
typedef enum _sweep_state\r
p_mad_send->mad_send.h_av = NULL;\r
p_mad_send->mad_send.retry_cnt = 0;\r
p_mad_send->mad_send.retry_time = 0;\r
+ p_mad_send->mad_send.delay = 0;\r
p_mad_send->h_pool = p_mad_item->pool_key->h_pool;\r
\r
ref_al_obj( &p_mad_item->pool_key->h_pool->obj );\r
#include <iba/ib_ci.h>\r
\r
#include "al.h"\r
-#include "al_cm.h"\r
+#include "al_cm_cep.h"\r
#include "al_debug.h"\r
#include "al_dm.h"\r
#include "al_mad_pool.h"\r
}\r
\r
/* Initialize CM */\r
- status = create_cm( &gp_al_mgr->obj );\r
+ status = create_cep_mgr( &gp_al_mgr->obj );\r
if( status != IB_SUCCESS )\r
{\r
gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
cl_qlist_init( &h_al->mad_list );\r
cl_qlist_init( &h_al->key_list );\r
cl_qlist_init( &h_al->query_list );\r
- cl_qlist_init( &h_al->conn_list );\r
+ cl_qlist_init( &h_al->cep_list );\r
\r
cl_vector_construct( &h_al->hdl_vector );\r
\r
return p_obj;\r
}\r
\r
-\r
-al_conn_t*\r
-al_hdl_ref_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t sub_type )\r
-{\r
- al_handle_t *p_h;\r
- al_conn_t *p_conn;\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /*\r
- * Make sure that the handle is valid and the correct type. Note that we\r
- * support having multiple possible subtypes provided, and check against\r
- * any of them.\r
- */\r
- if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) ||\r
- ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_conn = (al_conn_t*)p_h->p_obj;\r
-\r
- __ref_conn( p_conn );\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_conn;\r
-}\r
-\r
-\r
-al_conn_t*\r
-al_hdl_get_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t sub_type )\r
-{\r
- al_handle_t *p_h;\r
- al_conn_t *p_conn;\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /*\r
- * Make sure that the handle is valid and the correct type. Note that we\r
- * support having multiple possible subtypes provided, and check against\r
- * any of them.\r
- */\r
- if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) ||\r
- ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_conn = (al_conn_t*)p_h->p_obj;\r
-\r
- /* Clear the entry. */\r
- p_h->type = AL_OBJ_TYPE_UNKNOWN;\r
- p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl;\r
- h_al->free_hdl = hdl;\r
-\r
- __ref_conn( p_conn );\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_conn;\r
-}\r
-\r
return CL_INVALID_PARAMETER;\r
}\r
\r
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_in_t) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_out_t) )\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_in_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_out_t) )\r
{\r
AL_EXIT( AL_DBG_DEV | AL_DBG_PNP );\r
return CL_INVALID_PARAMETER;\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include "al_debug.h"\r
+#include "al_cm_cep.h"\r
+#include "al_dev.h"\r
+#include <iba/ib_al_ioctl.h>\r
+#include "al_proxy.h"\r
+#include "al.h"\r
+#include "al_qp.h"\r
+\r
+\r
+static cl_status_t\r
+proxy_create_cep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_create_cep_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_create_cep_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_out_size( h_ioctl ) != sizeof(ual_create_cep_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* We use IRPs as notification mechanism so the callback is NULL. */\r
+ p_ioctl->status = al_create_cep( p_context->h_al, NULL,\r
+ p_context, &p_ioctl->cid );\r
+\r
+ *p_ret_bytes = sizeof(ual_create_cep_ioctl_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static inline void\r
+__complete_get_event_ioctl(\r
+ IN ib_al_handle_t h_al,\r
+ IN IRP* const p_irp,\r
+ IN NTSTATUS status )\r
+{\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, NULL );\r
+#pragma warning(pop)\r
+\r
+ /* Complete the IRP. */\r
+ p_irp->IoStatus.Status = status;\r
+ p_irp->IoStatus.Information = 0;\r
+ IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT );\r
+\r
+ deref_al_obj( &h_al->obj );\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_destroy_cep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ al_destroy_cep( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_listen(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_listen_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_listen_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_listen_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Set the private data compare buffer to our kernel copy. */\r
+ if( p_ioctl->cep_listen.p_cmp_buf )\r
+ p_ioctl->cep_listen.p_cmp_buf = p_ioctl->compare;\r
+\r
+ status =\r
+ al_cep_listen( p_context->h_al, p_ioctl->cid, &p_ioctl->cep_listen );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_req(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_req_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_req_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_req_ioctl_out);\r
+\r
+ p_ioctl->in.cm_req.h_al = p_context->h_al;\r
+ p_ioctl->in.cm_req.p_primary_path = &p_ioctl->in.paths[0];\r
+ if( p_ioctl->in.cm_req.p_alt_path )\r
+ p_ioctl->in.cm_req.p_alt_path = &p_ioctl->in.paths[1];\r
+ if( p_ioctl->in.cm_req.p_compare_buffer )\r
+ p_ioctl->in.cm_req.p_compare_buffer = p_ioctl->in.compare;\r
+ if( p_ioctl->in.cm_req.p_req_pdata )\r
+ p_ioctl->in.cm_req.p_req_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_req.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_req.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_req( p_context->h_al, p_ioctl->in.cid,\r
+ &p_ioctl->in.cm_req, &p_ioctl->out.init );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_req(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_req(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_rep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rep_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_rep_ioctl_out);\r
+\r
+ if( p_ioctl->in.cm_rep.p_rep_pdata )\r
+ p_ioctl->in.cm_rep.p_rep_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_rep.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_rep.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid,\r
+ p_context, &p_ioctl->in.cm_rep, &p_ioctl->out.init );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_rep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_rep(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_rtr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_rtr_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_rtr_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rtr_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_rtr_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_get_rtr_attr( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rtr );\r
+\r
+ if( p_ioctl->status != IB_SUCCESS )\r
+ cl_memclr( &p_ioctl->rtr, sizeof(ib_qp_mod_t) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_rts(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_rts_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_rts_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rts_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_rts_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_get_rts_attr( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rts );\r
+\r
+ if( p_ioctl->status != IB_SUCCESS )\r
+ cl_memclr( &p_ioctl->rts, sizeof(ib_qp_mod_t) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_rtu(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rtu_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rtu_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rtu_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_rtu( p_context->h_al,\r
+ p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_rej(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rej_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rej_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rej_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_rej(\r
+ p_context->h_al, p_ioctl->cid, p_ioctl->rej_status, p_ioctl->ari,\r
+ p_ioctl->ari_len, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_mra(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_mra_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_mra_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_mra_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->cm_mra.p_mra_pdata = p_ioctl->pdata;\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_mra(\r
+ p_context->h_al, p_ioctl->cid, &p_ioctl->cm_mra );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_lap(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_lap_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_lap_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_lap_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ p_ioctl->cm_lap.p_alt_path = &p_ioctl->alt_path;\r
+ if( p_ioctl->cm_lap.p_lap_pdata )\r
+ p_ioctl->pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->cm_lap.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->cm_lap.h_qp = h_qp;\r
+\r
+ status = al_cep_lap( p_context->h_al, p_ioctl->cid, &p_ioctl->cm_lap );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+done:\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_apr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_apr_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_apr_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_apr_ioctl_out);\r
+\r
+ if( p_ioctl->in.cm_apr.p_info )\r
+ p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_ioctl->in.apr_info;\r
+ if( p_ioctl->in.cm_apr.p_apr_pdata )\r
+ p_ioctl->in.cm_apr.p_apr_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_apr.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_apr.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_apr( p_context->h_al, p_ioctl->in.cid,\r
+ &p_ioctl->in.cm_apr, &p_ioctl->out.apr );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.apr, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_apr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_apr(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_dreq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_dreq_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_dreq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_dreq_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Set the private data compare buffer to our kernel copy. */\r
+ status = al_cep_dreq( p_context->h_al,\r
+ p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_drep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_drep_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_drep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_drep_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->cm_drep.p_drep_pdata = p_ioctl->pdata;\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_drep(\r
+ p_context->h_al, p_ioctl->cid, &p_ioctl->cm_drep );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_timewait(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_timewait_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_timewait_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_timewait_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->status = al_cep_get_timewait( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->timewait_us );\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_timewait_ioctl_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_poll(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_poll_ioctl_t *p_ioctl;\r
+ ib_mad_element_t *p_mad = NULL;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_poll_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_poll_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_poll_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_poll( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->new_cep, &p_mad );\r
+\r
+ if( p_ioctl->status == IB_SUCCESS )\r
+ {\r
+ /* Copy the MAD for user consumption and free the it. */\r
+ CL_ASSERT( p_mad );\r
+ p_ioctl->element = *p_mad;\r
+ if( p_mad->grh_valid )\r
+ p_ioctl->grh = *p_mad->p_grh;\r
+ else\r
+ cl_memclr( &p_ioctl->grh, sizeof(ib_grh_t) );\r
+ cl_memcpy( p_ioctl->mad_buf, p_mad->p_mad_buf, MAD_BLOCK_SIZE );\r
+ ib_put_mad( p_mad );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_ioctl->mad_buf, sizeof(MAD_BLOCK_SIZE) );\r
+ cl_memclr( &p_ioctl->new_cep, sizeof(ib_cep_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_event(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ NTSTATUS status;\r
+ IO_STACK_LOCATION *p_io_stack;\r
+ al_dev_open_context_t *p_context;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = p_open_context;\r
+\r
+ p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
+ if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Invalid file object type for request: %d\n",\r
+ p_io_stack->FileObject->FsContext2) );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Check the size of the ioctl */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid IOCTL input buffer.\n") );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ status = al_cep_queue_irp( p_context->h_al, cid, h_ioctl );\r
+ if( status != STATUS_PENDING )\r
+ {\r
+ /* Invalid CID. Complete the request. */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_PENDING;\r
+}\r
+\r
+\r
+cl_status_t cep_ioctl(\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ cl_status_t cl_status;\r
+ IO_STACK_LOCATION *p_io_stack;\r
+ void *p_context;\r
+\r
+ AL_ENTER( AL_DBG_DEV );\r
+\r
+ CL_ASSERT( h_ioctl && p_ret_bytes );\r
+\r
+ p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
+ p_context = p_io_stack->FileObject->FsContext;\r
+\r
+ if( !p_context )\r
+ {\r
+ AL_EXIT( AL_DBG_DEV );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ switch( cl_ioctl_ctl_code( h_ioctl ) )\r
+ {\r
+ case UAL_CREATE_CEP:\r
+ cl_status = proxy_create_cep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_DESTROY_CEP:\r
+ cl_status = proxy_destroy_cep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_LISTEN:\r
+ cl_status = proxy_cep_listen( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_REQ:\r
+ cl_status = proxy_cep_pre_req( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_REQ:\r
+ cl_status = proxy_cep_send_req( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_REP:\r
+ cl_status = proxy_cep_pre_rep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_REP:\r
+ cl_status = proxy_cep_send_rep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_RTR:\r
+ cl_status = proxy_cep_get_rtr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_RTS:\r
+ cl_status = proxy_cep_get_rts( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_RTU:\r
+ cl_status = proxy_cep_rtu( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_REJ:\r
+ cl_status = proxy_cep_rej( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_MRA:\r
+ cl_status = proxy_cep_mra( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_LAP:\r
+ cl_status = proxy_cep_lap( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_APR:\r
+ cl_status = proxy_cep_pre_apr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_APR:\r
+ cl_status = proxy_cep_send_apr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_DREQ:\r
+ cl_status = proxy_cep_dreq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_DREP:\r
+ cl_status = proxy_cep_drep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_TIMEWAIT:\r
+ cl_status = proxy_cep_get_timewait( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_EVENT:\r
+ cl_status = proxy_cep_get_event( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_POLL:\r
+ cl_status = proxy_cep_poll( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ default:\r
+ cl_status = CL_INVALID_PARAMETER;\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_DEV );\r
+ return cl_status;\r
+}\r
#include "al.h"\r
#include "al_debug.h"\r
#include "al_dev.h"\r
-#include "al_cm.h"\r
+#include "al_cm_cep.h"\r
#include "al_qp.h"\r
#include "al_proxy.h"\r
\r
p_context = p_open_context;\r
\r
p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC )\r
+ /*\r
+ * We support SA requests coming in either through the main file object\r
+ * or the async file handle.\r
+ */\r
+ if( p_io_stack->FileObject->FsContext2 &&\r
+ (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC )\r
{\r
AL_TRACE_EXIT( AL_DBG_ERROR,\r
("Invalid file object type for request: %d\n",\r
/* Synchronize with callbacks. */\r
cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
\r
+ /*\r
+ * We never pass the user-mode flag when sending SA requests - the\r
+ * I/O manager will perform all synchronization to make this IRP sync\r
+ * if it needs to.\r
+ */\r
ib_status = al_send_sa_req( p_sa_req, p_ioctl->in.port_guid,\r
p_ioctl->in.timeout_ms, p_ioctl->in.retry_cnt,\r
- &p_ioctl->in.sa_req );\r
+ &p_ioctl->in.sa_req, 0 );\r
if( ib_status == IB_SUCCESS )\r
{\r
/* Hold a reference on the proxy context until the request completes. */\r
IN const net64_t port_guid,\r
IN const uint32_t timeout_ms,\r
IN const uint32_t retry_cnt,\r
- IN const ib_user_query_t* const p_sa_req_data )\r
+ IN const ib_user_query_t* const p_sa_req_data,\r
+ IN const ib_al_flags_t flags )\r
{\r
ib_api_status_t status;\r
sa_req_svc_t *p_sa_req_svc;\r
ib_mad_element_t *p_mad_request;\r
ib_mad_t *p_mad_hdr;\r
ib_sa_mad_t *p_sa_mad;\r
+ KEVENT event;\r
\r
CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl );\r
- \r
+\r
+ if( flags & IB_FLAGS_SYNC )\r
+ {\r
+ if( !cl_is_blockable() )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Thread context not blockable\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+ p_sa_req->p_sync_event = &event;\r
+ }\r
+ else\r
+ {\r
+ p_sa_req->p_sync_event = NULL;\r
+ }\r
+\r
/* Locate the sa_req service to issue the sa_req on. */\r
p_sa_req->p_sa_req_svc = acquire_sa_req_svc( port_guid );\r
if( !p_sa_req->p_sa_req_svc )\r
ib_put_mad( p_mad_request );\r
deref_al_obj( &p_sa_req->p_sa_req_svc->obj );\r
}\r
+ else if( flags & IB_FLAGS_SYNC )\r
+ {\r
+ /* Wait for the MAD completion. */\r
+ KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL );\r
+ }\r
\r
CL_EXIT( AL_DBG_SA_REQ, g_al_dbg_lvl );\r
return status;\r
{\r
al_sa_req_t *p_sa_req;\r
sa_req_svc_t *p_sa_req_svc;\r
+ KEVENT *p_sync_event;\r
\r
CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl );\r
\r
\r
p_sa_req = p_request_mad->send_context1;\r
p_sa_req_svc = p_sa_req->p_sa_req_svc;\r
+ p_sync_event = p_sa_req->p_sync_event;\r
\r
p_sa_req->status = convert_wc_status( p_request_mad->status );\r
p_sa_req->pfn_sa_req_cb( p_sa_req, NULL );\r
+ if( p_sync_event )\r
+ KeSetEvent( p_sync_event, 0, FALSE );\r
deref_al_obj( &p_sa_req_svc->obj );\r
}\r
\r
al_sa_req_t *p_sa_req;\r
sa_req_svc_t *p_sa_req_svc;\r
ib_sa_mad_t *p_sa_mad;\r
+ KEVENT *p_sync_event;\r
\r
CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl );\r
\r
\r
p_sa_req = p_mad_response->send_context1;\r
p_sa_req_svc = p_sa_req->p_sa_req_svc;\r
+ p_sync_event = p_sa_req->p_sync_event;\r
\r
//*** check for SA redirection...\r
\r
/* Notify the requestor of the result. */\r
CL_TRACE( AL_DBG_SA_REQ, g_al_dbg_lvl, ("notifying user\n") );\r
p_sa_req->pfn_sa_req_cb( p_sa_req, p_mad_response );\r
+ if( p_sync_event )\r
+ KeSetEvent( p_sync_event, 0, FALSE );\r
deref_al_obj( &p_sa_req_svc->obj );\r
\r
CL_EXIT( AL_DBG_SA_REQ, g_al_dbg_lvl );\r
ual_av.c \\r
ual_ca.c \\r
ual_ci_ca.c \\r
- ual_cm.c \\r
+ ual_cm_cep.c \\r
ual_cq.c \\r
ual_dm.c \\r
ual_mad.c \\r
..\al_av.c \\r
..\al_ca.c \\r
..\al_ci_ca_shared.c \\r
- ..\al_cm_shared.c \\r
+ ..\al_cm_qp.c \\r
..\al_common.c \\r
..\al_cq.c \\r
..\al_dm.c \\r
\r
if( cl_status != CL_SUCCESS )\r
{\r
+ CL_ASSERT( cl_status != CL_PENDING );\r
AL_TRACE( AL_DBG_ERROR,\r
("Error performing IOCTL 0x%08x to AL driver (%s)\n",\r
command, CL_STATUS_MSG(cl_status)) );\r
- return IB_ERROR;\r
+ return CL_ERROR;\r
}\r
\r
AL_EXIT( AL_DBG_DEV );\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_ptr_vector.h>\r
+#include <complib/cl_qlist.h>\r
+#include "al_common.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_debug.h"\r
+#include "ib_common.h"\r
+#include "al_mgr.h"\r
+//#include "al_ca.h"\r
+#include "al.h"\r
+//#include "al_mad.h"\r
+#include "al_qp.h"\r
+\r
+\r
+#define UAL_CEP_MIN (512)\r
+#define UAL_CEP_GROW (256)\r
+\r
+\r
+/* Global connection manager object. */\r
+typedef struct _ual_cep_mgr\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_ptr_vector_t cep_vector;\r
+\r
+ /* File handle on which to issue query IOCTLs. */\r
+ HANDLE h_file;\r
+\r
+} ual_cep_mgr_t;\r
+\r
+\r
+typedef struct _al_ucep\r
+{\r
+ ib_cep_t cep;\r
+ al_pfn_cep_cb_t pfn_cb;\r
+ ib_al_handle_t h_al;\r
+ cl_list_item_t al_item;\r
+\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+\r
+ OVERLAPPED ov;\r
+ atomic32_t ref_cnt;\r
+\r
+} ucep_t;\r
+\r
+\r
+/* Global instance of the CM agent. */\r
+ual_cep_mgr_t *gp_cep_mgr = NULL;\r
+\r
+\r
+/*\r
+ * Frees the global CEP manager. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__free_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+\r
+ if( gp_cep_mgr->h_file != INVALID_HANDLE_VALUE )\r
+ CloseHandle( gp_cep_mgr->h_file );\r
+\r
+ cl_ptr_vector_destroy( &gp_cep_mgr->cep_vector );\r
+\r
+ destroy_al_obj( p_obj );\r
+\r
+ cl_free( gp_cep_mgr );\r
+ gp_cep_mgr = NULL;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Allocates and initialized the global user-mode CM agent.\r
+ */\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj )\r
+{\r
+ ib_api_status_t status;\r
+ cl_status_t cl_status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( gp_cep_mgr == NULL );\r
+\r
+ /* Allocate the global CM agent. */\r
+ gp_cep_mgr = (ual_cep_mgr_t*)cl_zalloc( sizeof(ual_cep_mgr_t) );\r
+ if( !gp_cep_mgr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed allocation of global CEP manager.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM );\r
+ cl_ptr_vector_construct( &gp_cep_mgr->cep_vector );\r
+ gp_cep_mgr->h_file = INVALID_HANDLE_VALUE;\r
+\r
+ status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE,\r
+ NULL, NULL, __free_cep_mgr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_cep_mgr( &gp_cep_mgr->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Attach to the parent object. */\r
+ status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cl_status = cl_ptr_vector_init(\r
+ &gp_cep_mgr->cep_vector, UAL_CEP_MIN, UAL_CEP_GROW );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cl_vector_init failed with status %s.\n",\r
+ CL_STATUS_MSG(cl_status)) );\r
+ return ib_convert_cl_status( cl_status );\r
+ }\r
+\r
+ /* Create a file object on which to issue all CM requests. */\r
+ gp_cep_mgr->h_file = ual_create_async_file( UAL_BIND_CM );\r
+ if( gp_cep_mgr->h_file == INVALID_HANDLE_VALUE )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
+ GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ /* Release the reference from init_al_obj */\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al )\r
+{\r
+ cl_list_item_t *p_item;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Destroy all CEPs associated with the input instance of AL. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ for( p_item = cl_qlist_head( &h_al->cep_list );\r
+ p_item != cl_qlist_end( &h_al->cep_list );\r
+ p_item = cl_qlist_head( &h_al->cep_list ) )\r
+ {\r
+ /*\r
+ * Note that we don't walk the list - we can't hold the AL\r
+ * lock when cleaning up its CEPs because the cleanup path\r
+ * takes the CEP's lock. We always want to take the CEP\r
+ * before the AL lock to prevent any possibilities of deadlock.\r
+ *\r
+ * So we just get the CID, and then release the AL lock and try to\r
+ * destroy. This should unbind the CEP from the AL instance and\r
+ * remove it from the list, allowing the next CEP to be cleaned up\r
+ * in the next pass through.\r
+ */\r
+ cid = PARENT_STRUCT( p_item, ucep_t, al_item )->cep.cid;\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ }\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__destroy_ucep(\r
+ IN ucep_t* const p_cep )\r
+{\r
+ if( p_cep->pfn_destroy_cb )\r
+ p_cep->pfn_destroy_cb( p_cep->cep.context );\r
+ cl_free( p_cep );\r
+}\r
+\r
+\r
+ib_api_status_t\r
+__create_ucep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ ucep_t *p_cep;\r
+ DWORD bytes_ret;\r
+ ual_create_cep_ioctl_t ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = cl_zalloc( sizeof(ucep_t) );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate ucep_t\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ /* Initialize to two - one for the CEP, and one for the IOCTL. */\r
+ p_cep->ref_cnt = 2;\r
+\r
+ /* Store user parameters. */\r
+ p_cep->pfn_cb = pfn_cb;\r
+ p_cep->cep.context = context;\r
+\r
+ /* Create a kernel CEP only if we don't already have a CID. */\r
+ if( cid == AL_INVALID_CID )\r
+ {\r
+ if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, NULL, 0,\r
+ &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ __destroy_ucep( p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CREATE_CEP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status != IB_SUCCESS )\r
+ {\r
+ __destroy_ucep( p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("UAL_CREATE_CEP IOCTL returned %s\n",\r
+ ib_get_err_str( ioctl.status )) );\r
+ return ioctl.status;\r
+ }\r
+\r
+ p_cep->cep.cid = ioctl.cid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->cep.cid = cid;\r
+ }\r
+\r
+ /* Track the CEP before we issue any further IOCTLs on it. */\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ cl_ptr_vector_set_min_size( &gp_cep_mgr->cep_vector, p_cep->cep.cid + 1 );\r
+ CL_ASSERT( !cl_ptr_vector_get( &gp_cep_mgr->cep_vector, p_cep->cep.cid ) );\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, p_cep );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ /* Now issue a poll request. This request is async. */\r
+ if( DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT,\r
+ &p_cep->cep.cid, sizeof(p_cep->cep.cid),\r
+ NULL, 0, NULL, &p_cep->ov ) ||\r
+ GetLastError() != ERROR_IO_PENDING )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR, ("Failed to issue CEP poll IOCTL.\n") );\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, NULL );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid,\r
+ sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL );\r
+\r
+ __destroy_ucep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ p_cep->h_al = h_al;\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ if( p_cid )\r
+ *p_cid = p_cep->cep.cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = __create_ucep( h_al, AL_INVALID_CID, pfn_cb, context, p_cid );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Note that destroy_cep is synchronous. It does however handle the case\r
+ * where a user calls it from a callback context.\r
+ */\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
+{\r
+ ucep_t *p_cep;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ if( cid < cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) )\r
+ {\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ if( p_cep && p_cep->h_al == h_al )\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, cid, NULL );\r
+ else\r
+ p_cep = NULL;\r
+ }\r
+ else\r
+ {\r
+ p_cep = NULL;\r
+ }\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ if( !p_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_cep->pfn_destroy_cb = pfn_destroy_cb;\r
+\r
+ /*\r
+ * Remove from the AL instance. Note that once removed, all\r
+ * callbacks for an item will stop.\r
+ */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_remove_item( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ /* Destroy the kernel CEP right away. */\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid,\r
+ sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL );\r
+\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) )\r
+ {\r
+ /* We have no remaining refrences. */\r
+ __destroy_ucep( p_cep );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info )\r
+{\r
+ ual_cep_listen_ioctl_t ioctl;\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_listen_info )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cep_listen = *p_listen_info;\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ if( p_listen_info->cmp_len > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Listen compare data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.compare, p_listen_info->p_cmp_buf,\r
+ p_listen_info->cmp_len );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_LISTEN, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ual_cep_listen IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ual_cep_req_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_req )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !p_init )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_req = *p_cm_req;\r
+ ioctl.in.cm_req.h_qp = (ib_qp_handle_t)p_cm_req->h_qp->obj.hdl;\r
+ ioctl.in.paths[0] = *(p_cm_req->p_primary_path);\r
+ if( p_cm_req->p_alt_path )\r
+ ioctl.in.paths[1] = *(p_cm_req->p_alt_path);\r
+ /* Copy private data, if any. */\r
+ if( p_cm_req->p_req_pdata )\r
+ {\r
+ if( p_cm_req->req_length > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.pdata, p_cm_req->p_req_pdata,\r
+ p_cm_req->req_length );\r
+ }\r
+\r
+ /* Copy compare data, if any. */\r
+ if( p_cm_req->p_compare_buffer )\r
+ {\r
+ if( p_cm_req->compare_length > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("REQ compare data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.compare, p_cm_req->p_compare_buffer,\r
+ p_cm_req->compare_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REQ, &ioctl,\r
+ sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_init = ioctl.out.init;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REQ, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ucep_t *p_cep;\r
+ ual_cep_rep_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_rep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !p_init )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Store the context for the CEP. */\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ if( !p_cep )\r
+ {\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+ p_cep->cep.context = context;\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_rep = *p_cm_rep;\r
+ ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)p_cm_rep->h_qp->obj.hdl;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_rep->p_rep_pdata )\r
+ {\r
+ if( p_cm_rep->rep_length > IB_REP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.pdata, p_cm_rep->p_rep_pdata,\r
+ p_cm_rep->rep_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REP, &ioctl,\r
+ sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_init = ioctl.out.init;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REP, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_REP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr )\r
+{\r
+ ual_cep_get_rtr_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_rtr )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTR, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTR IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_rtr = ioctl.rtr;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts )\r
+{\r
+ ual_cep_get_rts_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_rts )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTS, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_rts = ioctl.rts;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_rtu_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ /* Copy private data, if any. */\r
+ if( p_pdata )\r
+ {\r
+ if( pdata_len > IB_RTU_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than RTU private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ }\r
+ ioctl.pdata_len = pdata_len;\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_RTU, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_RTU IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_rej_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.rej_status = rej_status;\r
+ if( p_ari )\r
+ {\r
+ if( ari_len > IB_ARI_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REJ ARI data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.ari, p_ari, ari_len );\r
+ ioctl.ari_len = ari_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.ari_len = 0;\r
+ }\r
+ /* Copy private data, if any. */\r
+ if( p_pdata)\r
+ {\r
+ if( pdata_len > IB_REJ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REJ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ ioctl.pdata_len = pdata_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.pdata_len = 0;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_mra_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_mra )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_mra = *p_cm_mra;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_mra->p_mra_pdata )\r
+ {\r
+ if( p_cm_mra->mra_length > IB_MRA_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than MRA private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_mra->p_mra_pdata, p_cm_mra->mra_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_MRA, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_MRA IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_lap_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_lap )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_lap->p_alt_path )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_lap = *p_cm_lap;\r
+ ioctl.cm_lap.h_qp = (ib_qp_handle_t)p_cm_lap->h_qp->obj.hdl;\r
+ ioctl.alt_path = *(p_cm_lap->p_alt_path);\r
+ /* Copy private data, if any. */\r
+ if( p_cm_lap->p_lap_pdata )\r
+ {\r
+ if( p_cm_lap->lap_length > IB_LAP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than LAP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_lap->p_lap_pdata, p_cm_lap->lap_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_LAP, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_LAP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr )\r
+{\r
+ ual_cep_apr_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_apr || !p_apr )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_apr = *p_cm_apr;\r
+ ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)p_cm_apr->h_qp->obj.hdl;\r
+ if( p_cm_apr->p_info )\r
+ {\r
+ if( p_cm_apr->info_length > IB_APR_INFO_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than APR info data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.in.apr_info, p_cm_apr->p_info, p_cm_apr->info_length );\r
+ }\r
+ /* Copy private data, if any. */\r
+ if( p_cm_apr->p_apr_pdata )\r
+ {\r
+ if( p_cm_apr->apr_length > IB_REJ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than APR private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.in.pdata, p_cm_apr->p_apr_pdata, p_cm_apr->apr_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl.in,\r
+ sizeof(ioctl.in), &ioctl.out, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+ \r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_apr = ioctl.out.apr;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_APR, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_APR IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata OPTIONAL,\r
+ IN const uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_dreq_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ /* Copy private data, if any. */\r
+ if( p_pdata )\r
+ {\r
+ if( pdata_len > IB_DREQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than DREQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ ioctl.pdata_len = pdata_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.pdata_len = 0;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_DREQ, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_drep_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_drep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_drep = *p_cm_drep;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_drep->p_drep_pdata )\r
+ {\r
+ if( p_cm_drep->drep_length > IB_DREP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than DREP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_drep->p_drep_pdata, p_cm_drep->drep_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_DREP, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us )\r
+{\r
+ ual_cep_get_timewait_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_timewait_us )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_TIMEWAIT, &cid, sizeof(cid),\r
+ &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_timewait_us = ioctl.timewait_us;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+//\r
+//\r
+//ib_api_status_t\r
+//al_cep_migrate(\r
+// IN ib_al_handle_t h_al,\r
+// IN net32_t cid );\r
+//\r
+//\r
+//ib_api_status_t\r
+//al_cep_established(\r
+// IN ib_al_handle_t h_al,\r
+// IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ ucep_t *p_cep;\r
+ ib_api_status_t status;\r
+ ual_cep_poll_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+ ib_mad_element_t *p_mad;\r
+ ib_grh_t *p_grh;\r
+ ib_mad_t *p_mad_buf;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_new_cep || !pp_mad )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ if( cid > cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) )\r
+ p_cep = NULL;\r
+ else\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ if( !p_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = ib_get_mad( g_pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ p_mad_buf = p_mad->p_mad_buf;\r
+ p_grh = p_mad->p_grh;\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_POLL, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ {\r
+ if( ioctl.new_cep.cid != AL_INVALID_CID )\r
+ {\r
+ /* Need to create a new CEP for user-mode. */\r
+ status = __create_ucep( p_cep->h_al, ioctl.new_cep.cid,\r
+ p_cep->pfn_cb, ioctl.new_cep.context, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP,\r
+ &ioctl.new_cep.cid, sizeof(ioctl.new_cep.cid),\r
+ NULL, 0, &bytes_ret, NULL );\r
+ goto err;\r
+ }\r
+ }\r
+\r
+ /* Copy the MAD payload as it's all that's used. */\r
+ *p_mad = ioctl.element;\r
+ p_mad->p_grh = p_grh;\r
+ if( p_mad->grh_valid )\r
+ cl_memcpy( p_mad->p_grh, &ioctl.grh, sizeof(ib_grh_t) );\r
+ p_mad->p_mad_buf = p_mad_buf;\r
+ \r
+ cl_memcpy( p_mad->p_mad_buf, ioctl.mad_buf, MAD_BLOCK_SIZE );\r
+\r
+ *p_new_cep = ioctl.new_cep;\r
+ *pp_mad = p_mad;\r
+ }\r
+ else\r
+ {\r
+err:\r
+ ib_put_mad( p_mad );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+/* Callback to process CM events */\r
+void\r
+cm_cb(\r
+ IN DWORD error_code,\r
+ IN DWORD ret_bytes,\r
+ IN LPOVERLAPPED p_ov )\r
+{\r
+ ucep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* The UAL_CEP_GET_EVENT IOCTL does not have any output data. */\r
+ UNUSED_PARAM( ret_bytes );\r
+\r
+ p_cep = PARENT_STRUCT( p_ov, ucep_t, ov );\r
+\r
+ if( !error_code )\r
+ {\r
+ p_cep->pfn_cb( p_cep->h_al, &p_cep->cep );\r
+\r
+ if( !DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT,\r
+ &p_cep->cep.cid, sizeof(p_cep->cep.cid), NULL, 0,\r
+ NULL, &p_cep->ov ) && GetLastError() == ERROR_IO_PENDING )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+ else if( GetLastError() != ERROR_INVALID_PARAMETER )\r
+ {\r
+ /* We can get ERROR_INVALID_PARAMETER if the CEP was destroyed. */\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("DeviceIoControl for CEP callback request returned %d.\n",\r
+ GetLastError()) );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ AL_TRACE( AL_DBG_WARN,\r
+ ("UAL_CEP_GET_EVENT IOCTL returned %d.\n", error_code) );\r
+ }\r
+\r
+ /*\r
+ * We failed to issue the next request or the previous request was\r
+ * cancelled. Release the reference held by the previous IOCTL and exit.\r
+ */\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) )\r
+ __destroy_ucep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
#include "al_cq.h"\r
#include "ual_ca.h"\r
#include "ual_qp.h"\r
-#include "ual_cm.h"\r
#include "ual_mad.h"\r
#include "ib_common.h"\r
+#include "al_cm_cep.h"\r
\r
\r
/* Global AL manager handle is defined in al_mgr_shared.c */\r
__cb_thread_routine(\r
IN void *context );\r
\r
-static void\r
-__process_cm_cb(\r
- IN cm_cb_ioctl_info_t* p_cm_cb_info);\r
+//static void\r
+//__process_cm_cb(\r
+// IN cm_cb_ioctl_info_t* p_cm_cb_info);\r
\r
static void\r
__process_misc_cb(\r
gp_al_mgr->ual_mgr.exit_thread = TRUE;\r
\r
/* Closing the file handles cancels any pending I/O requests. */\r
- CloseHandle( gp_al_mgr->ual_mgr.h_cm_file );\r
+ //CloseHandle( gp_al_mgr->ual_mgr.h_cm_file );\r
CloseHandle( gp_al_mgr->ual_mgr.h_cq_file );\r
CloseHandle( gp_al_mgr->ual_mgr.h_misc_file );\r
CloseHandle( g_al_device );\r
}\r
\r
/* Create CM callback file handle. */\r
- gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM );\r
- if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
- GetLastError()) );\r
- return IB_ERROR;\r
- }\r
+ //gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM );\r
+ //if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
+ //{\r
+ // gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
+ // AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ // ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
+ // GetLastError()) );\r
+ // return IB_ERROR;\r
+ //}\r
\r
/* Create the CQ completion callback file handle. */\r
gp_al_mgr->ual_mgr.h_cq_file = ual_create_async_file( UAL_BIND_CQ );\r
return ib_status;\r
}\r
\r
+ /* Initialize CM */\r
+ ib_status = create_cep_mgr( &gp_al_mgr->obj );\r
+ if( ib_status != IB_SUCCESS )\r
+ {\r
+ gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("create_cm_mgr failed, status = 0x%x.\n", ib_status) );\r
+ return ib_status;\r
+ }\r
+\r
cl_status = cl_event_init( &gp_al_mgr->ual_mgr.sync_event, FALSE );\r
if( cl_status != CL_SUCCESS )\r
{\r
}\r
}\r
\r
- if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
- NULL, 0,\r
- &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
- NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
- {\r
- if( GetLastError() != ERROR_IO_PENDING )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("DeviceIoControl for CM callback request returned %d.\n",\r
- GetLastError()) );\r
- gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
- return IB_ERROR;\r
- }\r
- }\r
+ //if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
+ // NULL, 0,\r
+ // &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
+ // NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
+ //{\r
+ // if( GetLastError() != ERROR_IO_PENDING )\r
+ // {\r
+ // AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ // ("DeviceIoControl for CM callback request returned %d.\n",\r
+ // GetLastError()) );\r
+ // gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
+ // return IB_ERROR;\r
+ // }\r
+ //}\r
\r
if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO,\r
NULL, 0,\r
/*\r
* UAL thread start routines.\r
*/\r
-\r
-\r
-/* Thread to process the asynchronous CM notifications */\r
-void\r
-cm_cb(\r
- IN DWORD error_code,\r
- IN DWORD ret_bytes,\r
- IN LPOVERLAPPED p_ov )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_ov );\r
-\r
- if( !error_code && ret_bytes )\r
- {\r
- /* Check the record type and adjust the pointers */\r
- /* TBD */\r
- __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info );\r
- }\r
- \r
- if( error_code != ERROR_OPERATION_ABORTED )\r
- {\r
- if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
- NULL, 0,\r
- &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
- NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
- {\r
- if( GetLastError() != ERROR_IO_PENDING )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("DeviceIoControl for CM callback request returned %d.\n",\r
- GetLastError()) );\r
- }\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-static void\r
-__process_cm_cb(\r
- IN cm_cb_ioctl_info_t* p_cm_cb_info)\r
-{\r
- switch( p_cm_cb_info->rec_type)\r
- {\r
- case CM_REQ_REC:\r
- {\r
- struct _cm_req_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec;\r
-\r
- if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
- {\r
- p_ioctl_rec->req_rec.p_req_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
- }\r
- else\r
- {\r
- p_ioctl_rec->req_rec.p_req_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
- }\r
- ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr,\r
- &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms );\r
- break;\r
- }\r
- case CM_REP_REC:\r
- {\r
- struct _cm_rep_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec;\r
-\r
- if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
- {\r
- p_ioctl_rec->rep_rec.p_rep_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
- }\r
- else\r
- {\r
- p_ioctl_rec->rep_rec.p_rep_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
- }\r
- ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr,\r
- &p_ioctl_rec->qp_mod_rts );\r
- break;\r
- }\r
- case CM_RTU_REC:\r
- {\r
- struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata;\r
- ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec );\r
- break;\r
- }\r
- case CM_REJ_REC:\r
- {\r
- struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->rej_rec.p_rej_pdata = \r
- (uint8_t*)&p_ioctl_rec->rej_pdata;\r
- p_ioctl_rec->rej_rec.p_ari =\r
- (uint8_t*)&p_ioctl_rec->ari_pdata;\r
- ual_cm_rej_cb( &p_ioctl_rec->rej_rec );\r
- break;\r
- }\r
- case CM_MRA_REC:\r
- {\r
- struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->mra_rec.p_mra_pdata =\r
- (uint8_t*)&p_ioctl_rec->mra_pdata;\r
- ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec );\r
- break;\r
- }\r
- case CM_LAP_REC:\r
- {\r
- struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->lap_rec.p_lap_pdata =\r
- (uint8_t *)&p_ioctl_rec->lap_pdata;\r
- ual_cm_lap_cb( &p_ioctl_rec->lap_rec );\r
- break;\r
- }\r
- case CM_APR_REC:\r
- {\r
- struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->apr_rec.p_apr_pdata =\r
- (uint8_t*)&p_ioctl_rec->apr_pdata;\r
- p_ioctl_rec->apr_rec.p_info =\r
- (uint8_t*)&p_ioctl_rec->apr_info;\r
- ual_cm_apr_cb( &p_ioctl_rec->apr_rec );\r
- break;\r
- }\r
- case CM_DREQ_REC:\r
- {\r
- struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->dreq_rec.p_dreq_pdata =\r
- (uint8_t*)&p_ioctl_rec->dreq_pdata;\r
- ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec );\r
- break;\r
- }\r
- case CM_DREP_REC:\r
- {\r
- struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->drep_rec.p_drep_pdata =\r
- (uint8_t*)&p_ioctl_rec->drep_pdata;\r
- ual_cm_drep_cb( &p_ioctl_rec->drep_rec );\r
- break;\r
- }\r
- default:\r
- /* Unknown record type - just return */\r
- break;\r
- }\r
-}\r
-\r
-\r
-\r
+//\r
+//\r
+///* Thread to process the asynchronous CM notifications */\r
+//void\r
+//cm_cb(\r
+// IN DWORD error_code,\r
+// IN DWORD ret_bytes,\r
+// IN LPOVERLAPPED p_ov )\r
+//{\r
+// AL_ENTER( AL_DBG_CM );\r
+//\r
+// UNUSED_PARAM( p_ov );\r
+//\r
+// if( !error_code && ret_bytes )\r
+// {\r
+// /* Check the record type and adjust the pointers */\r
+// /* TBD */\r
+// __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info );\r
+// }\r
+// \r
+// if( error_code != ERROR_OPERATION_ABORTED )\r
+// {\r
+// if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
+// NULL, 0,\r
+// &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
+// NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
+// {\r
+// if( GetLastError() != ERROR_IO_PENDING )\r
+// {\r
+// AL_TRACE_EXIT( AL_DBG_ERROR,\r
+// ("DeviceIoControl for CM callback request returned %d.\n",\r
+// GetLastError()) );\r
+// }\r
+// }\r
+// }\r
+//\r
+// AL_EXIT( AL_DBG_CM );\r
+//}\r
+\r
+\r
+\r
+//static void\r
+//__process_cm_cb(\r
+// IN cm_cb_ioctl_info_t* p_cm_cb_info)\r
+//{\r
+// switch( p_cm_cb_info->rec_type)\r
+// {\r
+// case CM_REQ_REC:\r
+// {\r
+// struct _cm_req_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec;\r
+//\r
+// if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
+// {\r
+// p_ioctl_rec->req_rec.p_req_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
+// }\r
+// else\r
+// {\r
+// p_ioctl_rec->req_rec.p_req_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
+// }\r
+// ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr,\r
+// &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms );\r
+// break;\r
+// }\r
+// case CM_REP_REC:\r
+// {\r
+// struct _cm_rep_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec;\r
+//\r
+// if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
+// {\r
+// p_ioctl_rec->rep_rec.p_rep_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
+// }\r
+// else\r
+// {\r
+// p_ioctl_rec->rep_rec.p_rep_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
+// }\r
+// ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr,\r
+// &p_ioctl_rec->qp_mod_rts );\r
+// break;\r
+// }\r
+// case CM_RTU_REC:\r
+// {\r
+// struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata;\r
+// ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec );\r
+// break;\r
+// }\r
+// case CM_REJ_REC:\r
+// {\r
+// struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->rej_rec.p_rej_pdata = \r
+// (uint8_t*)&p_ioctl_rec->rej_pdata;\r
+// p_ioctl_rec->rej_rec.p_ari =\r
+// (uint8_t*)&p_ioctl_rec->ari_pdata;\r
+// ual_cm_rej_cb( &p_ioctl_rec->rej_rec );\r
+// break;\r
+// }\r
+// case CM_MRA_REC:\r
+// {\r
+// struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->mra_rec.p_mra_pdata =\r
+// (uint8_t*)&p_ioctl_rec->mra_pdata;\r
+// ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec );\r
+// break;\r
+// }\r
+// case CM_LAP_REC:\r
+// {\r
+// struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->lap_rec.p_lap_pdata =\r
+// (uint8_t *)&p_ioctl_rec->lap_pdata;\r
+// ual_cm_lap_cb( &p_ioctl_rec->lap_rec );\r
+// break;\r
+// }\r
+// case CM_APR_REC:\r
+// {\r
+// struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->apr_rec.p_apr_pdata =\r
+// (uint8_t*)&p_ioctl_rec->apr_pdata;\r
+// p_ioctl_rec->apr_rec.p_info =\r
+// (uint8_t*)&p_ioctl_rec->apr_info;\r
+// ual_cm_apr_cb( &p_ioctl_rec->apr_rec );\r
+// break;\r
+// }\r
+// case CM_DREQ_REC:\r
+// {\r
+// struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->dreq_rec.p_dreq_pdata =\r
+// (uint8_t*)&p_ioctl_rec->dreq_pdata;\r
+// ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec );\r
+// break;\r
+// }\r
+// case CM_DREP_REC:\r
+// {\r
+// struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->drep_rec.p_drep_pdata =\r
+// (uint8_t*)&p_ioctl_rec->drep_pdata;\r
+// ual_cm_drep_cb( &p_ioctl_rec->drep_rec );\r
+// break;\r
+// }\r
+// default:\r
+// /* Unknown record type - just return */\r
+// break;\r
+// }\r
+//}\r
+//\r
+//\r
+//\r
static void\r
__process_comp_cb(\r
IN comp_cb_ioctl_info_t* p_comp_cb_info )\r
cl_qlist_init( &h_al->mad_list );\r
cl_qlist_init( &h_al->key_list );\r
cl_qlist_init( &h_al->query_list );\r
- cl_qlist_init( &h_al->conn_list );\r
+ cl_qlist_init( &h_al->cep_list );\r
\r
if( cl_spinlock_init( &h_al->mad_lock ) != CL_SUCCESS )\r
{\r
switch( key )\r
{\r
case UAL_BIND_CM:\r
+ //DebugBreak();\r
/* CM callback. */\r
cm_cb( err, ret_bytes, p_ov );\r
break;\r
HANDLE h_cb_port;\r
\r
/* File to handle CM related notifications */\r
- HANDLE h_cm_file;\r
- cm_cb_ioctl_info_t cm_cb_info;\r
- OVERLAPPED cm_ov;\r
+ //HANDLE h_cm_file;\r
+ //cm_cb_ioctl_info_t cm_cb_info;\r
+ //OVERLAPPED cm_ov;\r
\r
/* Thread to handle work request completions */\r
HANDLE h_cq_file;\r
return status;\r
}\r
\r
- /* Create a file object on which to issue all SA requests. */\r
+ /* Create a file object on which to issue all PNP requests. */\r
gp_pnp->h_file = ual_create_async_file( UAL_BIND_PNP );\r
if( gp_pnp->h_file == INVALID_HANDLE_VALUE )\r
{\r
}\r
\r
\r
-\r
ib_api_status_t\r
al_send_sa_req(\r
IN al_sa_req_t *p_sa_req,\r
IN const net64_t port_guid,\r
IN const uint32_t timeout_ms,\r
IN const uint32_t retry_cnt,\r
- IN const ib_user_query_t* const p_sa_req_data )\r
+ IN const ib_user_query_t* const p_sa_req_data,\r
+ IN const ib_al_flags_t flags )\r
{\r
ib_api_status_t status;\r
+ HANDLE h_dev;\r
+ DWORD ret_bytes;\r
\r
AL_ENTER( AL_DBG_QUERY );\r
\r
p_sa_req->ioctl.in.ph_sa_req = &p_sa_req->hdl;\r
p_sa_req->ioctl.in.p_status = &p_sa_req->status;\r
\r
- if( !DeviceIoControl( gp_sa_req_mgr->h_sa_dev, UAL_SEND_SA_REQ,\r
+ if( flags & IB_FLAGS_SYNC )\r
+ h_dev = g_al_device;\r
+ else\r
+ h_dev = gp_sa_req_mgr->h_sa_dev;\r
+\r
+ if( !DeviceIoControl( h_dev, UAL_SEND_SA_REQ,\r
&p_sa_req->ioctl.in, sizeof(p_sa_req->ioctl.in),\r
&p_sa_req->ioctl.out, sizeof(p_sa_req->ioctl.out),\r
NULL, &p_sa_req->ov ) )\r
}\r
else\r
{\r
- CL_ASSERT( GetLastError() == ERROR_IO_PENDING );\r
- status = IB_ERROR;\r
+ /* Completed synchronously. */\r
+ if( GetOverlappedResult( h_dev, &p_sa_req->ov, &ret_bytes, FALSE ) )\r
+ {\r
+ status = IB_SUCCESS;\r
+ /* Process the completion. */\r
+ sa_req_cb( 0, ret_bytes, &p_sa_req->ov );\r
+ }\r
+ else\r
+ {\r
+ sa_req_cb( GetLastError(), 0, &p_sa_req->ov );\r
+ status = IB_ERROR;\r
+ }\r
}\r
\r
AL_EXIT( AL_DBG_QUERY );\r
}\r
\r
\r
-\r
void CALLBACK\r
sa_req_cb(\r
IN DWORD error_code,\r
*****************************************************************************/\r
\r
\r
+#include <complib/cl_rbmap.h>\r
#include <complib/cl_qmap.h>\r
#include <complib/cl_map.h>\r
#include <complib/cl_fleximap.h>\r
#include <complib/cl_memory.h>\r
\r
\r
+/******************************************************************************\r
+*******************************************************************************\r
+************** ************\r
+************** IMPLEMENTATION OF RB MAP ************\r
+************** ************\r
+*******************************************************************************\r
+******************************************************************************/\r
+\r
+\r
+/*\r
+ * Returns whether a given item is on the left of its parent.\r
+ */\r
+static boolean_t\r
+__cl_rbmap_is_left_child(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_up );\r
+ CL_ASSERT( p_item->p_up != p_item );\r
+\r
+ return( p_item->p_up->p_left == p_item );\r
+}\r
+\r
+\r
+/*\r
+ * Retrieve the pointer to the parent's pointer to an item.\r
+ */\r
+static cl_rbmap_item_t**\r
+__cl_rbmap_get_parent_ptr_to_item(\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_up );\r
+ CL_ASSERT( p_item->p_up != p_item );\r
+\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ return( &p_item->p_up->p_left );\r
+\r
+ CL_ASSERT( p_item->p_up->p_right == p_item );\r
+ return( &p_item->p_up->p_right );\r
+}\r
+\r
+\r
+/*\r
+ * Rotate a node to the left. This rotation affects the least number of links\r
+ * between nodes and brings the level of C up by one while increasing the depth\r
+ * of A one. Note that the links to/from W, X, Y, and Z are not affected.\r
+ *\r
+ * R R\r
+ * | |\r
+ * A C\r
+ * / \ / \\r
+ * W C A Z\r
+ * / \ / \\r
+ * B Z W B\r
+ * / \ / \\r
+ * X Y X Y\r
+ */\r
+static void\r
+__cl_rbmap_rot_left(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t **pp_root;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_right != &p_map->nil );\r
+\r
+ pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item );\r
+\r
+ /* Point R to C instead of A. */\r
+ *pp_root = p_item->p_right;\r
+ /* Set C's parent to R. */\r
+ (*pp_root)->p_up = p_item->p_up;\r
+\r
+ /* Set A's right to B */\r
+ p_item->p_right = (*pp_root)->p_left;\r
+ /*\r
+ * Set B's parent to A. We trap for B being NIL since the\r
+ * caller may depend on NIL not changing.\r
+ */\r
+ if( (*pp_root)->p_left != &p_map->nil )\r
+ (*pp_root)->p_left->p_up = p_item;\r
+\r
+ /* Set C's left to A. */\r
+ (*pp_root)->p_left = p_item;\r
+ /* Set A's parent to C. */\r
+ p_item->p_up = *pp_root;\r
+}\r
+\r
+\r
+/*\r
+ * Rotate a node to the right. This rotation affects the least number of links\r
+ * between nodes and brings the level of A up by one while increasing the depth\r
+ * of C one. Note that the links to/from W, X, Y, and Z are not affected.\r
+ *\r
+ * R R\r
+ * | |\r
+ * C A\r
+ * / \ / \\r
+ * A Z W C\r
+ * / \ / \\r
+ * W B B Z\r
+ * / \ / \\r
+ * X Y X Y\r
+ */\r
+static void\r
+__cl_rbmap_rot_right(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t **pp_root;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_left != &p_map->nil );\r
+\r
+ /* Point R to A instead of C. */\r
+ pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item );\r
+ (*pp_root) = p_item->p_left;\r
+ /* Set A's parent to R. */\r
+ (*pp_root)->p_up = p_item->p_up;\r
+\r
+ /* Set C's left to B */\r
+ p_item->p_left = (*pp_root)->p_right;\r
+ /*\r
+ * Set B's parent to C. We trap for B being NIL since the\r
+ * caller may depend on NIL not changing.\r
+ */\r
+ if( (*pp_root)->p_right != &p_map->nil )\r
+ (*pp_root)->p_right->p_up = p_item;\r
+\r
+ /* Set A's right to C. */\r
+ (*pp_root)->p_right = p_item;\r
+ /* Set C's parent to A. */\r
+ p_item->p_up = *pp_root;\r
+}\r
+\r
+\r
+/*\r
+ * Balance a tree starting at a given item back to the root.\r
+ */\r
+static void\r
+__cl_rbmap_ins_bal(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* p_item )\r
+{\r
+ cl_rbmap_item_t* p_grand_uncle;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item != &p_map->root );\r
+\r
+ while( p_item->p_up->color == CL_MAP_RED )\r
+ {\r
+ if( __cl_rbmap_is_left_child( p_item->p_up ) )\r
+ {\r
+ p_grand_uncle = p_item->p_up->p_up->p_right;\r
+ CL_ASSERT( p_grand_uncle );\r
+ if( p_grand_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_grand_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ p_item = p_item->p_up->p_up;\r
+ continue;\r
+ }\r
+\r
+ if( !__cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_item = p_item->p_up;\r
+ __cl_rbmap_rot_left( p_map, p_item );\r
+ }\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up->p_up );\r
+ }\r
+ else\r
+ {\r
+ p_grand_uncle = p_item->p_up->p_up->p_left;\r
+ CL_ASSERT( p_grand_uncle );\r
+ if( p_grand_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_grand_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ p_item = p_item->p_up->p_up;\r
+ continue;\r
+ }\r
+\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_item = p_item->p_up;\r
+ __cl_rbmap_rot_right( p_map, p_item );\r
+ }\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up->p_up );\r
+ }\r
+ }\r
+}\r
+\r
+\r
+void\r
+cl_rbmap_insert(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_insert_at,\r
+ IN cl_rbmap_item_t* const p_item,\r
+ IN boolean_t left )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ CL_ASSERT( p_insert_at );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_map->root.p_up == &p_map->root );\r
+ CL_ASSERT( p_map->root.color != CL_MAP_RED );\r
+ CL_ASSERT( p_map->nil.color != CL_MAP_RED );\r
+\r
+ p_item->p_left = &p_map->nil;\r
+ p_item->p_right = &p_map->nil;\r
+ p_item->color = CL_MAP_RED;\r
+\r
+ if( p_insert_at == cl_rbmap_end( p_map ) )\r
+ {\r
+ p_map->root.p_left = p_item;\r
+ p_item->p_up = &p_map->root;\r
+ }\r
+ else\r
+ {\r
+ if( left )\r
+ p_insert_at->p_left = p_item;\r
+ else\r
+ p_insert_at->p_right = p_item;\r
+\r
+ p_item->p_up = p_insert_at;\r
+ }\r
+\r
+ /* Increase the count. */\r
+ p_map->count++;\r
+\r
+ /*\r
+ * We have added depth to this section of the tree.\r
+ * Rebalance as necessary as we retrace our path through the tree\r
+ * and update colors.\r
+ */\r
+ __cl_rbmap_ins_bal( p_map, p_item );\r
+\r
+ cl_rbmap_root( p_map )->color = CL_MAP_BLACK;\r
+\r
+ /*\r
+ * Note that it is not necessary to re-color the nil node black because all\r
+ * red color assignments are made via the p_up pointer, and nil is never\r
+ * set as the value of a p_up pointer.\r
+ */\r
+\r
+#ifdef _DEBUG_\r
+ /* Set the pointer to the map in the map item for consistency checking. */\r
+ p_item->p_map = p_map;\r
+#endif\r
+}\r
+\r
+\r
+static void\r
+__cl_rbmap_del_bal(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* p_item )\r
+{\r
+ cl_rbmap_item_t *p_uncle;\r
+\r
+ while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) )\r
+ {\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_uncle = p_item->p_up->p_right;\r
+\r
+ if( p_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up );\r
+ p_uncle = p_item->p_up->p_right;\r
+ }\r
+\r
+ if( p_uncle->p_right->color != CL_MAP_RED )\r
+ {\r
+ if( p_uncle->p_left->color != CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_RED;\r
+ p_item = p_item->p_up;\r
+ continue;\r
+ }\r
+\r
+ p_uncle->p_left->color = CL_MAP_BLACK;\r
+ p_uncle->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_uncle );\r
+ p_uncle = p_item->p_up->p_right;\r
+ }\r
+ p_uncle->color = p_item->p_up->color;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_uncle->p_right->color = CL_MAP_BLACK;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up );\r
+ break;\r
+ }\r
+ else\r
+ {\r
+ p_uncle = p_item->p_up->p_left;\r
+\r
+ if( p_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up );\r
+ p_uncle = p_item->p_up->p_left;\r
+ }\r
+\r
+ if( p_uncle->p_left->color != CL_MAP_RED )\r
+ {\r
+ if( p_uncle->p_right->color != CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_RED;\r
+ p_item = p_item->p_up;\r
+ continue;\r
+ }\r
+\r
+ p_uncle->p_right->color = CL_MAP_BLACK;\r
+ p_uncle->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_uncle );\r
+ p_uncle = p_item->p_up->p_left;\r
+ }\r
+ p_uncle->color = p_item->p_up->color;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_uncle->p_left->color = CL_MAP_BLACK;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up );\r
+ break;\r
+ }\r
+ }\r
+ p_item->color = CL_MAP_BLACK;\r
+}\r
+\r
+\r
+void\r
+cl_rbmap_remove_item(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t *p_child, *p_del_item;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_map == p_map );\r
+\r
+ if( p_item == cl_rbmap_end( p_map ) )\r
+ return;\r
+\r
+ if( p_item->p_right == &p_map->nil )\r
+ {\r
+ /* The item being removed has children on at most its left. */\r
+ p_del_item = p_item;\r
+ p_child = p_del_item->p_left;\r
+ }\r
+ else if( p_item->p_left == &p_map->nil )\r
+ {\r
+ /* The item being removed has children on at most its right. */\r
+ p_del_item = p_item;\r
+ p_child = p_del_item->p_right;\r
+ }\r
+ else\r
+ {\r
+ /*\r
+ * The item being removed has children on both side.\r
+ * We select the item that will replace it. After removing\r
+ * the substitute item and rebalancing, the tree will have the\r
+ * correct topology. Exchanging the substitute for the item\r
+ * will finalize the removal.\r
+ */\r
+ p_del_item = p_item->p_right;\r
+ CL_ASSERT( p_del_item != &p_map->nil );\r
+ while( p_del_item->p_left != &p_map->nil )\r
+ p_del_item = p_del_item->p_left;\r
+ p_child = p_del_item->p_right;\r
+ }\r
+\r
+ /* Decrement the item count. */\r
+ p_map->count--;\r
+\r
+ /*\r
+ * This assignment may modify the parent pointer of the nil node.\r
+ * This is inconsequential.\r
+ */\r
+ p_child->p_up = p_del_item->p_up;\r
+ (*__cl_rbmap_get_parent_ptr_to_item( p_del_item )) = p_child; // 2 right = 5\r
+\r
+ if( p_del_item->color != CL_MAP_RED )\r
+ __cl_rbmap_del_bal( p_map, p_child );\r
+\r
+ /*\r
+ * Note that the splicing done below does not need to occur before\r
+ * the tree is balanced, since the actual topology changes are made by the\r
+ * preceding code. The topology is preserved by the color assignment made\r
+ * below (reader should be reminded that p_del_item == p_item in some cases).\r
+ */\r
+ if( p_del_item != p_item )\r
+ {\r
+ /*\r
+ * Finalize the removal of the specified item by exchanging it with\r
+ * the substitute which we removed above.\r
+ */\r
+ p_del_item->p_up = p_item->p_up;\r
+ p_del_item->p_left = p_item->p_left;\r
+ p_del_item->p_right = p_item->p_right;\r
+ (*__cl_rbmap_get_parent_ptr_to_item( p_item )) = p_del_item;\r
+ p_item->p_right->p_up = p_del_item;\r
+ p_item->p_left->p_up = p_del_item;\r
+ p_del_item->color = p_item->color;\r
+ }\r
+\r
+ CL_ASSERT( p_map->nil.color != CL_MAP_RED );\r
+\r
+#ifdef _DEBUG_\r
+ /* Clear the pointer to the map since the item has been removed. */\r
+ p_item->p_map = NULL;\r
+#endif\r
+}\r
+\r
+\r
/******************************************************************************\r
*******************************************************************************\r
************** ************\r
}\r
\r
\r
-#define SEC_TO_MICRO 1000000 // s to µs conversion\r
+#define SEC_TO_MICRO 1000000ULL // s to µs conversion\r
\r
uint64_t\r
cl_get_time_stamp( void )\r
if( !QueryPerformanceCounter( &tick_count ) )\r
return( 0 );\r
\r
- return( tick_count.QuadPart * SEC_TO_MICRO / frequency.QuadPart );\r
+ return( tick_count.QuadPart / (frequency.QuadPart / SEC_TO_MICRO) );\r
}\r
\r
uint32_t\r
}\r
}\r
\r
-/////////////////////////////////////////////////////////\r
-// Convert a Mellanox CQE into IBAL format\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_vapi_cqe(\r
- IN VAPI_wc_desc_t *desc_p,\r
- OUT ib_wc_t *wc_p )\r
-{\r
- wc_p->wr_id = desc_p->id;\r
-\r
- wc_p->status = mlnx_map_vapi_cqe_status(desc_p->status);\r
- wc_p->wc_type = mlnx_map_vapi_cqe_type(desc_p->opcode);\r
-\r
- wc_p->length = (IB_COMP_SUCCESS == desc_p->status) ? desc_p->byte_len : 0;\r
- wc_p->recv.conn.recv_opt = 0; // TBD: RC support, SE\r
-\r
- CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("cqe type %d length 0x%x\n", wc_p->wc_type, wc_p->length));\r
-\r
- // Immediate data\r
- if (desc_p->imm_data_valid)\r
- {\r
- wc_p->recv.conn.recv_opt |= IB_RECV_OPT_IMMEDIATE; \r
- wc_p->recv.conn.immediate_data = cl_ntoh32 (desc_p->imm_data);\r
- }\r
- // GRH\r
- if (desc_p->grh_flag)\r
- {\r
- wc_p->recv.conn.recv_opt |= IB_RECV_OPT_GRH_VALID;\r
- }\r
-\r
- switch(desc_p->remote_node_addr.type)\r
- {\r
- case VAPI_RNA_RD:\r
- // TBD: RD Support\r
- CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("Unsupported RD\n"));\r
- break;\r
-\r
- case VAPI_RNA_UD:\r
- // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("Supported UD\n"));\r
- wc_p->recv.ud.remote_qp = cl_ntoh32(desc_p->remote_node_addr.qp_ety.qp);\r
- wc_p->recv.ud.pkey_index = (uint16_t)desc_p->pkey_ix;\r
- wc_p->recv.ud.remote_lid = cl_ntoh16(desc_p->remote_node_addr.slid);\r
- wc_p->recv.ud.remote_sl = desc_p->remote_node_addr.sl;\r
- // wc_p->recv.ud.path_bits = desc_p->remote_node_addr.ee_dlid.dst_path_bits; // PATH:\r
- wc_p->recv.ud.path_bits = 0;\r
-#if 0\r
- printk ("********* MLNX *************\n"); \r
- printk ("rem_qp = 0x%x pbits = 0x%x pkey_idx = 0x%x\n",\r
- wc_p->recv.ud.remote_qp, wc_p->recv.ud.path_bits,\r
- wc_p->recv.ud.pkey_index );\r
- printk ("GOT PBITS 0x%x\n", desc_p->remote_node_addr.ee_dlid.dst_path_bits);\r
- printk ("*****************\n");\r
-#endif\r
-\r
- break;\r
-\r
- case VAPI_RNA_RAW_ETY:\r
- // TBD: RAW ETH\r
- break;\r
-\r
- case VAPI_RNA_RAW_IPV6:\r
- // TBD: RAW IPV6\r
- default:\r
- break;\r
- }\r
-}\r
-\r
//////////////////////////////////////////////////////////////\r
// Convert from VAPI memory-region attributes to IBAL \r
//////////////////////////////////////////////////////////////\r
OUT VAPI_qp_attr_t *qp_attr_p, \r
OUT VAPI_qp_attr_mask_t *attr_mask_p)\r
{\r
- /* VAPI doesn't support modifying the WQE depth ever. */\r
- if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
- modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
- {\r
- return IB_UNSUPPORTED;\r
- }\r
\r
qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
*attr_mask_p = QP_ATTR_QP_STATE;\r
break;\r
\r
case IB_QPS_RTR:\r
+ /* VAPI doesn't support modifying the WQE depth ever. */\r
+ if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
+ {\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
*attr_mask_p |= QP_ATTR_RQ_PSN |\r
QP_ATTR_DEST_QP_NUM |\r
QP_ATTR_QP_OUS_RD_ATOM |\r
break;\r
\r
case IB_QPS_RTS:\r
+ /* VAPI doesn't support modifying the WQE depth ever. */\r
+ if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
+ {\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
*attr_mask_p |= QP_ATTR_SQ_PSN |\r
QP_ATTR_RETRY_COUNT |\r
QP_ATTR_RNR_RETRY |\r
mlnx_map_vapi_rna_type(\r
IN VAPI_remote_node_addr_type_t rna);\r
\r
-void\r
-mlnx_conv_vapi_cqe(\r
- IN VAPI_wc_desc_t *desc_p,\r
- OUT ib_wc_t *wc_p );\r
-\r
void\r
mlnx_conv_vapi_mr_attr(\r
IN ib_pd_handle_t pd_h,\r
\r
hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
\r
-#if MLNX_POLL_NATIVE\r
return THHUL_cqm_poll4wc(hobul_p->hhul_hndl, hhul_cq_hndl,\r
pp_free_wclist, pp_done_wclist );\r
-#else\r
- for (wc_p = *pp_free_wclist; wc_p; wc_p = wc_p->p_next) {\r
- // Terminate the completed list (MUST be here)\r
- *pp_done_wclist = NULL;\r
-\r
- ret = THHUL_cqm_poll4cqe(hobul_p->hhul_hndl, hhul_cq_hndl, &comp_desc);\r
- // CQ_EMPTY is not an error\r
- if (HH_CQ_EMPTY == ret) {\r
- status = IB_NOT_FOUND; // This is a successful completion (no entries)\r
- break;\r
- }\r
- // Handle real errors\r
- if (HH_OK != ret) {\r
- status = IB_ERROR;\r
- goto cleanup;\r
- }\r
-\r
- status = IB_SUCCESS;\r
-\r
- // Convert the CQE and add to list (no memset())\r
- mlnx_conv_vapi_cqe( &comp_desc, wc_p );\r
- *pp_done_wclist = wc_p;\r
- pp_done_wclist = &wc_p->p_next;\r
- CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("status %d done_list 0x%p\n", comp_desc.status, *pp_done_wclist));\r
- }\r
-\r
- Update free list to point to the first unused qce\r
- if (IB_NOT_FOUND == status && wc_p != *pp_free_wclist)\r
- status = IB_SUCCESS;\r
- *pp_free_wclist = wc_p;\r
-\r
- return status;\r
-#endif\r
\r
cleanup:\r
CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
if( p_umv_buf && p_umv_buf->command )\r
{\r
// For user mode calls - obtain and verify the vendor information\r
- if ((p_umv_buf->input_size - sizeof (u_int32_t)) != hobul_p->cq_ul_resources_sz ||\r
- NULL == p_umv_buf->p_inout_buf) {\r
- status = IB_INVALID_PARAMETER;\r
- goto cleanup_locked;\r
- }\r
- cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+ if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz ||\r
+ NULL == p_umv_buf->p_inout_buf )\r
+ {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto cleanup_locked;\r
+ }\r
+ cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
\r
} else {\r
// for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence\r
IN const ib_net64_t ca_guid,\r
IN OUT ci_interface_t *p_interface )\r
{\r
- cl_memclr(p_interface, sizeof(p_interface));\r
+ cl_memclr(p_interface, sizeof(*p_interface));\r
\r
/* Guid of the CA. */\r
p_interface->guid = ca_guid;\r
("After resize_cq_prep *p_size = %d\n", *p_size));\r
\r
p_umv_buf->p_inout_buf = p_cq_ul_resources;\r
- p_umv_buf->input_size = (uint32_t)p_hobul->p_hca_ul_info->cq_ul_resources_sz + sizeof(uint32_t);\r
+ p_umv_buf->input_size = (uint32_t)p_hobul->p_hca_ul_info->cq_ul_resources_sz;\r
p_umv_buf->output_size = p_umv_buf->input_size;\r
p_umv_buf->command = TRUE;\r
\r
break;\r
}\r
else if (p_umv_buf->output_size != \r
- (p_hobul->p_hca_ul_info->cq_ul_resources_sz + sizeof(uint32_t)) )\r
+ (p_hobul->p_hca_ul_info->cq_ul_resources_sz) )\r
{\r
CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl,\r
("Bad priv buf size %ld\n", p_umv_buf->output_size));\r
}\r
\r
\r
-void\r
-map_mtoi_wcqe (\r
- IN VAPI_wc_desc_t *p_m_cqe,\r
- OUT ib_wc_t *p_i_cqe)\r
-{\r
- p_i_cqe->wr_id = p_m_cqe->id;\r
- p_i_cqe->status = map_mtoi_cqe_status (p_m_cqe->status);\r
- p_i_cqe->wc_type = map_mtoi_cqe_type (p_m_cqe->opcode);\r
- p_i_cqe->length = (IB_COMP_SUCCESS == p_m_cqe->status) ? \r
- p_m_cqe->byte_len : 0;\r
-\r
- CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl,\r
- ("cqe type %d length 0x%x status %d\n",\r
- p_i_cqe->wc_type, p_i_cqe->length, p_i_cqe->status));\r
-\r
- /*\r
- * FIXME: RC case\r
- */\r
- p_i_cqe->recv.conn.recv_opt = 0;\r
-\r
- if (p_m_cqe->imm_data_valid)\r
- {\r
- p_i_cqe->recv.conn.recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
- p_i_cqe->recv.conn.immediate_data = CL_NTOH32 (p_m_cqe->imm_data);\r
- }\r
-\r
- if (p_m_cqe->grh_flag)\r
- {\r
- p_i_cqe->recv.conn.recv_opt |= IB_RECV_OPT_GRH_VALID;\r
- }\r
-\r
- switch (p_m_cqe->remote_node_addr.type)\r
- {\r
- case VAPI_RNA_UD:\r
- p_i_cqe->recv.ud.remote_qp = \r
- CL_NTOH32 (p_m_cqe->remote_node_addr.qp_ety.qp);\r
- p_i_cqe->recv.ud.pkey_index = (uint16_t)p_m_cqe->pkey_ix; \r
- p_i_cqe->recv.ud.remote_lid = \r
- CL_NTOH16 (p_m_cqe->remote_node_addr.slid);\r
- p_i_cqe->recv.ud.remote_sl = p_m_cqe->remote_node_addr.sl;\r
- p_i_cqe->recv.ud.path_bits = 0;\r
- break;\r
-\r
- case VAPI_RNA_RD:\r
- CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RD\n"));\r
- break;\r
-\r
- case VAPI_RNA_RAW_ETY:\r
- CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RAW_ETY\n"));\r
- break;\r
-\r
- case VAPI_RNA_RAW_IPV6:\r
- CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RAW_IPV6\n"));\r
- break;\r
-\r
- default:\r
- CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unknown type\n"));\r
- break;\r
- }\r
-}\r
-\r
-\r
static VAPI_mrw_acl_t\r
map_itom_access_ctrl (\r
IN ib_access_t i_acl)\r
status = IB_INVALID_PARAMETER;\r
return status;\r
}\r
-#if TRUE\r
- status = THHUL_cqm_poll4wc(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl,\r
- pp_free_wclist, pp_done_wclist );\r
-#else\r
- for (p_i_cqe = *pp_free_wclist; p_i_cqe; p_i_cqe = p_i_cqe->p_next)\r
- {\r
- *pp_done_wclist = NULL;\r
-\r
- hh_ret = THHUL_cqm_poll4cqe (p_hobul->hhul_hca_hndl,\r
- p_cq_info->hhul_cq_hndl,\r
- &m_cqe);\r
- if (HH_CQ_EMPTY == hh_ret)\r
- {\r
- status = IB_NOT_FOUND;\r
- break;\r
- }\r
-\r
- /*\r
- * Errors cases\r
- */\r
- if (HH_OK != hh_ret) \r
- {\r
- CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl,\r
- ("poll4cqe get error status %d\n", hh_ret));\r
- status = IB_ERROR;\r
- return status;\r
- }\r
\r
- map_mtoi_wcqe (&m_cqe, p_i_cqe);\r
- *pp_done_wclist = p_i_cqe;\r
- pp_done_wclist = (ib_wc_t **)&p_i_cqe->p_next;\r
- status = IB_SUCCESS;\r
- }\r
+ status = THHUL_cqm_poll4wc(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl,\r
+ pp_free_wclist, pp_done_wclist );\r
\r
- /*\r
- * Update free list to point to the first unused cqe\r
- */\r
- if (IB_NOT_FOUND == status && p_i_cqe != *pp_free_wclist)\r
- {\r
- status = IB_SUCCESS;\r
- }\r
- \r
- *pp_free_wclist = p_i_cqe;\r
-#endif\r
FUNC_EXIT;\r
return status;\r
}\r
MTL_DEBUG4("THH_hob_get_gid_tbl_local: hca_hndl=0x%p, port= %d, return table len = %d\n",\r
hca_hndl, port, tbl_len_in);\r
\r
- if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) {\r
+ if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK && use_mad_query_for_gid_prefix) {\r
MTL_ERROR1("THH_hob_get_gid_tbl: NOT IN TASK CONTEXT)\n");\r
return HH_ERR;\r
}\r
return HH_EINVAL;\r
}\r
\r
- mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
- if ( !mad_frame_in ) {\r
- return HH_EAGAIN;\r
- }\r
- mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
- if ( !mad_frame_out ) {\r
- FREE(mad_frame_in);\r
- return HH_EAGAIN;\r
- }\r
-\r
\r
/* get GID table using MAD commands in THH_cmd object */\r
if (use_mad_query_for_gid_prefix == TRUE) {\r
+\r
+ mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
+ if ( !mad_frame_in ) {\r
+ return HH_EAGAIN;\r
+ }\r
+ mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
+ if ( !mad_frame_out ) {\r
+ FREE(mad_frame_in);\r
+ return HH_EAGAIN;\r
+ }\r
/* First, get the GID prefix from via MAD query */\r
memset(mad_frame_in, 0, sizeof(mad_frame_in));\r
memset(mad_frame_out, 0, sizeof(mad_frame_out));\r
}\r
}\r
}\r
+ FREE(mad_frame_out);\r
+ FREE(mad_frame_in);\r
} else {\r
memset(&port_info, 0, sizeof(port_info));\r
hh_ret = THH_hob_get_qpm ( thh_hob_p, &qpm );\r
if (hh_ret != HH_OK) {\r
MTL_ERROR2( "THH_hob_get_qpm: invalid QPM handle (ret= %d)\n", hh_ret);\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_EINVAL;\r
}\r
/*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/\r
hh_ret = THH_qpm_get_all_sgids(qpm,port,(u_int8_t)num_guids, param_gid_p);\r
if (hh_ret != HH_OK) {\r
MTL_ERROR2( "THH_qpm_get_all_sgids failed (ret= %d)\n", hh_ret);\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_EINVAL;\r
}\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_OK;\r
}\r
\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_OK;\r
} /* THH_get_gid_tbl */\r
/******************************************************************************\r
#define CQE_OPCODE_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode))\r
#define CQE_OPCODE_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,opcode)>>2\r
#define CQE_OPCODE_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode) & MASK32(5))\r
-HH_ret_t THHUL_cqm_poll4cqe( \r
- /*IN*/ HHUL_hca_hndl_t hca_hndl, \r
- /*IN*/ HHUL_cq_hndl_t cq, \r
- /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p \r
-) \r
-{ \r
- THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq;\r
- volatile u_int32_t *cur_cqe;\r
- u_int32_t wqe_addr_32lsb,next_wqe_addr_32lsb;\r
- IB_wqpn_t qpn;\r
- u_int8_t opcode;\r
- u_int8_t dbd_bit;\r
- VAPI_special_qp_t qp_type;\r
- VAPI_ts_type_t qp_ts_type;\r
- u_int32_t i,dbd_cnt;\r
- HH_ret_t rc;\r
- u_int32_t cqe_cpy[CQE_SZ>>2]; /* CQE copy */\r
- /* The CQE copy is required for 2 reasons:\r
- * 1) Hold in CPU endianess. \r
- * 2) Free real CQE as soon as possible in order to release CQ lock quickly.\r
- */\r
-\r
- if (MOSAL_EXPECT_FALSE(thhul_cq_p == NULL)) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: NULL CQ handle.\n");\r
- return HH_EINVAL_CQ_HNDL;\r
- }\r
-\r
- MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock));\r
-\r
- /* Check if CQE at consumer index is valid */\r
- cur_cqe= (volatile u_int32_t *)\r
- (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); \r
- if (is_cqe_hw_own(cur_cqe) && /* CQE is still in HW ownership */\r
- (!cq_transition_to_resized_buf(thhul_cq_p, &cur_cqe)) ) { \r
-\r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
-#if 0\r
- THHUL_cqm_dump_cq(cq);\r
-#endif\r
- return HH_CQ_EMPTY;\r
- }\r
-\r
- /* Remove Copy of all cqe, copy as nedded only */ \r
-#if 0 /* original code */ \r
- /* Make CQE copy in correct endianess */\r
- for (i= 0; i < (CQE_SZ>>2); i++) { \r
- cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]);\r
- }\r
- /* Extract QP/WQE context fields from the CQE */\r
- wqe_addr_32lsb= (cqe_cpy[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2] & \r
- (~MASK32(CQE_WQE_ADR_BIT_SZ)) );\r
- qpn= (cqe_cpy[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_qpn)>>2] & MASK32(24) );\r
- vapi_cqe_p->local_qp_num= qpn;\r
- /* new CQE: completion status is taken from "opcode" field */\r
- opcode= MT_EXTRACT_ARRAY32(cqe_cpy,\r
- MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode), \r
- MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode));\r
-#endif\r
- \r
- /* Extract QP/WQE context fields from the CQE */\r
- /* Byte 6 */ \r
- wqe_addr_32lsb= MOSAL_be32_to_cpu(cur_cqe[CQE_WQE_ADDR_BYTE_OFFSET]) & \r
- CQE_WQE_ADDR_BIT_MASK;\r
- \r
- /* Byte 0*/\r
- qpn= MOSAL_be32_to_cpu(cur_cqe[CQE_MY_QPN_BYTE_OFFSET]) & CQE_MY_QPN_BYTE_BIT_MASK;\r
-\r
- /* Byte 1 */\r
- cqe_cpy[CQE_MY_EE_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_MY_EE_DWORD_OFFSET]); \r
- /* Byte 2 */\r
- cqe_cpy[CQE_RQPN_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RQPN_DWORD_OFFSET]); \r
- /* Byte 3 */\r
- cqe_cpy[CQE_RLID_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RLID_DWORD_OFFSET]);\r
- /* Byte 7 Convert S,Opcode,Owner fileld to Be */\r
- cqe_cpy[CQE_S_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_S_DWORD_OFFSET]); \r
- \r
- /* Byte 4 */\r
- vapi_cqe_p->imm_data = MOSAL_be32_to_cpu(cur_cqe[CQE_IMMEDIATE_DWORD_OFFSET]);\r
- /* Byte 5 */\r
- vapi_cqe_p->byte_len= MOSAL_be32_to_cpu(cur_cqe[CQE_BYTE_CNT_DWORD_OFFSET]);\r
-\r
- /* new CQE: completion status is taken from "opcode" field */\r
-\r
- opcode=(cqe_cpy[CQE_OPCODE_DWORD_OFFSET]>>CQE_OPCODE_SHIFT) & CQE_OPCODE_BIT_MASK;\r
- \r
-\r
- if (MOSAL_EXPECT_TRUE((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK)) { /* Completed OK */ \r
- MTPERF_TIME_START(free_cqe);\r
- free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */\r
- MTPERF_TIME_END(free_cqe);\r
-\r
- /* DEBUG: Sanity check that the same WQE is not used twice simultaneosly */\r
-#ifdef THHUL_CQM_DEBUG_WQE_REUSE\r
- /* Get next CQE and check if valid and NDA equals freed CQE's */\r
- cur_cqe= (volatile u_int32_t *)\r
- (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); \r
- if ((!is_cqe_hw_own(cur_cqe)) &&\r
- ( (MOSAL_be32_to_cpu(\r
- cur_cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2]) & \r
- (~MASK32(CQE_WQE_ADR_BIT_SZ)) ) == wqe_addr_32lsb) ){ \r
- MTL_ERROR1(MT_FLFMT("%s: Duplicate NDA on next CQE (NDA=0x%X , consumer index=%u,%u)"),\r
- __func__, wqe_addr_32lsb, \r
- thhul_cq_p->cur_buf.consumer_index-1, thhul_cq_p->cur_buf.consumer_index);\r
- THHUL_cqm_dump_cq(cq);\r
- }\r
-#endif \r
-\r
-#ifndef IVAPI_THH\r
- rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count));\r
-#else\r
- rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),NULL);\r
-#endif \r
- \r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
- \r
- if (MOSAL_EXPECT_FALSE(rc != HH_OK)) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n");\r
- for (i= 0; i < (CQE_SZ>>2); i++) { \r
- MTL_ERROR1(MT_FLFMT("CQ[0x%X][%u][%u]=0x%X"),thhul_cq_p->cq_num,\r
- (thhul_cq_p->cur_buf.consumer_index - 1) & MASK32(thhul_cq_p->cur_buf.log2_num_o_cqes),\r
- i, cqe_cpy[i]);\r
- }\r
- return HH_EFATAL; /* unexpected error */\r
- }\r
- /* Extract the rest of the CQE fields into vapi_cqe_p*/\r
- rc= extract_cqe_new(cqe_cpy,vapi_cqe_p,qp_type,qp_ts_type,opcode); \r
- vapi_cqe_p->status= VAPI_SUCCESS;\r
- \r
- } else { /* Completion with error */\r
- \r
- /* Make CQE copy in correct endianess */\r
- for (i= 0; i < (CQE_SZ>>2); i++) { \r
- cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]);\r
- }\r
- \r
- MTL_DEBUG4("THHUL_cqm_poll4cqe: completion with error: cq=%d consumer_index=%d\n",\r
- thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index);\r
- DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe);\r
- rc= THHUL_qpm_comp_err(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),&next_wqe_addr_32lsb,&dbd_bit);\r
- if (rc != HH_OK) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP (QPn=0x%X , CQn=0x%X).\n",\r
- qpn, thhul_cq_p->cq_num);\r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
- return HH_EFATAL; /* unexpected error */\r
- }\r
- vapi_cqe_p->status= decode_error_syndrome((tavor_if_comp_status_t)MT_EXTRACT_ARRAY32(cqe_cpy,\r
- CQE_ERROR_SYNDROM_BIT_OFFSET, CQE_ERROR_SYNDROM_BIT_SIZE) );\r
- vapi_cqe_p->vendor_err_syndrome= MT_EXTRACT_ARRAY32(cqe_cpy,\r
- CQE_ERROR_VENDOR_SYNDROM_BIT_OFFSET, CQE_ERROR_VENDOR_SYNDROM_BIT_SIZE);\r
- dbd_cnt= MT_EXTRACT_ARRAY32(cqe_cpy,CQE_ERROR_DBDCNT_BIT_OFFSET, CQE_ERROR_DBDCNT_BIT_SIZE);\r
- if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) || /* End of WQE chain */\r
- ((dbd_cnt + 1 - dbd_bit) == 0) ) { /* or dbd counter reached 0 */\r
- if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) && (dbd_cnt > 0)) {\r
- MTL_ERROR1(MT_FLFMT("%s: CQ[0x%X]:CQE[0x%X]: Reached end of chain while dbd_cnt==%u"),\r
- __func__, thhul_cq_p->cq_num, thhul_cq_p->cur_buf.consumer_index, dbd_cnt);\r
- }\r
- MTPERF_TIME_START(free_cqe);\r
- free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */\r
- MTPERF_TIME_END(free_cqe);\r
- } else {\r
- recycle_cqe(cur_cqe, next_wqe_addr_32lsb, dbd_cnt - dbd_bit);\r
- } \r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
- /* Only WQE-ID, free_res_count and status are required for completion with error. \r
- * No other CQE fields are extracted (see IB-spec. 11.4.2.1). \r
- * Even though, for the sake of some legacy code:\r
- * ...putting an opcode to distinguish completion of SQ from RQ*/\r
- if (opcode == CQE_ERROR_ON_SQ) { \r
- vapi_cqe_p->opcode= VAPI_CQE_SQ_SEND_DATA; \r
- } else { /* receive queue completion */\r
- vapi_cqe_p->opcode= VAPI_CQE_RQ_SEND_DATA; \r
- }\r
- }\r
-\r
- return rc; \r
-}\r
\r
#ifdef WIN32\r
/* Successful completion */\r
\r
}\r
\r
-/* This code is mainly from poll4cqe with rearm_cqe if next_cqe available\r
- */\r
-HH_ret_t THHUL_cqm_poll_and_rearm_cq( \r
- /*IN*/ HHUL_hca_hndl_t hca_hndl, \r
- /*IN*/ HHUL_cq_hndl_t cq, \r
- /*IN*/ int solicitedNotification,\r
- /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p \r
-) \r
-{ \r
-\r
- THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq;\r
- volatile u_int32_t *cur_cqe;\r
- u_int32_t wqe_addr_32lsb,next_wqe_addr_32lsb;\r
- IB_wqpn_t qpn;\r
- u_int8_t opcode;\r
- u_int8_t dbd_bit;\r
- VAPI_special_qp_t qp_type;\r
- VAPI_ts_type_t qp_ts_type;\r
- u_int32_t i,dbd_cnt;\r
- HH_ret_t rc;\r
- u_int32_t cqe_cpy[CQE_SZ>>2]; /* CQE copy */\r
- /* The CQE copy is required for 2 reasons:\r
- * 1) Hold in CPU endianess. \r
- * 2) Free real CQE as soon as possible in order to release CQ lock quickly.\r
- */\r
-\r
- if (MOSAL_EXPECT_FALSE(thhul_cq_p == NULL)) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: NULL CQ handle.\n");\r
- return HH_EINVAL_CQ_HNDL;\r
- }\r
-\r
- MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock));\r
-\r
- /* Check if CQE at consumer index is valid */\r
- cur_cqe= (volatile u_int32_t *)\r
- (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); \r
- if (is_cqe_hw_own(cur_cqe)) { /* CQE is still in HW ownership */\r
-\r
- rearm_cq(thhul_cq_p, (MT_bool)solicitedNotification);\r
- /* Tavor actually solves the race condition where the s/w may have missed the\r
- next valid entry being written, just prior to rearming the CQ. So we really\r
- don't need to repoll the cq entry, since Tavor guarantees that we will see\r
- an interrupt if it happened to go valid before or after rearming the CQ\r
- */\r
- \r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
-#if 0\r
- THHUL_cqm_dump_cq(cq);\r
-#endif\r
- return HH_CQ_EMPTY;\r
- }\r
-\r
-\r
- /* Remove Copy of all cqe, copy as nedded only */ \r
-#if 0 \r
- /* Make CQE copy in correct endianess */\r
- for (i= 0; i < (CQE_SZ>>2); i++) { \r
- cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]);\r
- }\r
-#endif\r
- \r
- /* Extract QP/WQE context fields from the CQE */\r
- \r
- wqe_addr_32lsb= MOSAL_be32_to_cpu(cur_cqe[CQE_WQE_ADDR_BYTE_OFFSET]) & \r
- CQE_WQE_ADDR_BIT_MASK;\r
- \r
- qpn= MOSAL_be32_to_cpu(cur_cqe[CQE_MY_QPN_BYTE_OFFSET]) & CQE_MY_QPN_BYTE_BIT_MASK;\r
-\r
- cqe_cpy[CQE_MY_EE_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_MY_EE_DWORD_OFFSET]); \r
- cqe_cpy[CQE_RQPN_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RQPN_DWORD_OFFSET]); \r
- cqe_cpy[CQE_RLID_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RLID_DWORD_OFFSET]); \r
- cqe_cpy[CQE_S_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_S_DWORD_OFFSET]); \r
- \r
- vapi_cqe_p->imm_data = MOSAL_be32_to_cpu(cur_cqe[CQE_IMMEDIATE_DWORD_OFFSET]);\r
- vapi_cqe_p->byte_len= MOSAL_be32_to_cpu(cur_cqe[CQE_BYTE_CNT_DWORD_OFFSET]);\r
-\r
- /* new CQE: completion status is taken from "opcode" field */\r
-\r
- opcode=(cqe_cpy[CQE_OPCODE_DWORD_OFFSET]>>CQE_OPCODE_SHIFT) & CQE_OPCODE_BIT_MASK;\r
- \r
-\r
- if (MOSAL_EXPECT_TRUE((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK)) { /* Completed OK */ \r
- HH_ret_t rc_rearm; \r
- volatile u_int32_t *next_cqe;\r
-\r
- MTPERF_TIME_START(free_cqe);\r
- free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */\r
- MTPERF_TIME_END(free_cqe);\r
-\r
- // form pointer to next cqe that would be serviced next \r
- next_cqe= (volatile u_int32_t *)\r
- (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); \r
- \r
- \r
- if (is_cqe_hw_own(next_cqe)) { /* CQE is still in HW ownership */\r
- rearm_cq(thhul_cq_p, (MT_bool)solicitedNotification);\r
- // check again\r
- next_cqe= (volatile u_int32_t *)\r
- (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); \r
- if (is_cqe_hw_own(next_cqe)) { /* CQE is still in HW ownership */\r
- rc_rearm = HH_COMPLETED; \r
- } else {\r
- rc_rearm = HH_POLL_NEEDED; \r
- }\r
- }\r
- else\r
- {\r
- rc_rearm = HH_OK;\r
- } \r
- \r
-\r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
-#ifndef IVAPI_THH\r
- rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count));\r
-#else\r
- rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),NULL);\r
-#endif \r
- \r
- if (MOSAL_EXPECT_FALSE(rc != HH_OK)) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n");\r
- DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe);\r
- return rc;\r
- }\r
- /* Extract the rest of the CQE fields into vapi_cqe_p*/\r
- \r
- rc= extract_cqe_new(cqe_cpy,vapi_cqe_p,qp_type,qp_ts_type,opcode); \r
- vapi_cqe_p->status= VAPI_SUCCESS;\r
- if(MOSAL_EXPECT_FALSE(rc != HH_OK)) {\r
- return rc;\r
- }\r
- else\r
- return rc_rearm;\r
- \r
- \r
- } else { /* Completion with error */\r
- /* Make CQE copy in correct endianess */\r
- for (i= 0; i < (CQE_SZ>>2); i++) { \r
- cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]);\r
- } \r
- MTL_DEBUG4("THHUL_cqm_poll4cqe: completion with error: cq=%d consumer_index=%d\n",\r
- thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index);\r
- DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe);\r
- rc= THHUL_qpm_comp_err(thhul_cq_p->qpm, qpn, wqe_addr_32lsb,\r
- &(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),&next_wqe_addr_32lsb,&dbd_bit);\r
- if (rc != HH_OK) {\r
- MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n");\r
- return rc;\r
- }\r
- vapi_cqe_p->status= decode_error_syndrome(MT_EXTRACT_ARRAY32(cqe_cpy,\r
- CQE_ERROR_SYNDROM_BIT_OFFSET, CQE_ERROR_SYNDROM_BIT_SIZE) );\r
- vapi_cqe_p->vendor_err_syndrome= MT_EXTRACT_ARRAY32(cqe_cpy,\r
- CQE_ERROR_VENDOR_SYNDROM_BIT_OFFSET, CQE_ERROR_VENDOR_SYNDROM_BIT_SIZE);\r
- dbd_cnt= MT_EXTRACT_ARRAY32(cqe_cpy,CQE_ERROR_DBDCNT_BIT_OFFSET, CQE_ERROR_DBDCNT_BIT_SIZE);\r
- if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) || /* End of WQE chain */\r
- ((dbd_cnt + 1 - dbd_bit) == 0) ) { /* or dbd counter reached 0 */\r
- MTPERF_TIME_START(free_cqe);\r
- free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */\r
- MTPERF_TIME_END(free_cqe);\r
- } else {\r
- recycle_cqe(cur_cqe, next_wqe_addr_32lsb, dbd_cnt - dbd_bit);\r
- } \r
- MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); \r
- /* Only WQE-ID, free_res_count and status are required for completion with error. \r
- * No other CQE fields are extracted (see IB-spec. 11.4.2.1). \r
- * Even though, for the sake of some legacy code:\r
- * ...putting an opcode to distinguish completion of SQ from RQ*/\r
- if (opcode == CQE_ERROR_ON_SQ) { \r
- vapi_cqe_p->opcode= VAPI_CQE_SQ_SEND_DATA; \r
- } else { /* receive queue completion */\r
- vapi_cqe_p->opcode= VAPI_CQE_RQ_SEND_DATA; \r
- }\r
- }\r
-\r
- return rc; \r
- \r
-}\r
-\r
\r
HH_ret_t THHUL_cqm_req_comp_notif( \r
/*IN*/ HHUL_hca_hndl_t hca_hndl, \r
/*IN*/ HHUL_srq_hndl_t srq\r
);\r
\r
-\r
-DLL_API HH_ret_t THHUL_cqm_poll4cqe( \r
- /*IN*/ HHUL_hca_hndl_t hca_hndl, \r
- /*IN*/ HHUL_cq_hndl_t cq, \r
- /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p \r
-);\r
-\r
#ifdef WIN32\r
#include <iba/ib_types.h>\r
DLL_API ib_api_status_t\r
OUT uint32_t* const p_n_cqes );\r
#endif\r
\r
-DLL_API HH_ret_t THHUL_cqm_poll_and_rearm_cq(\r
- /*IN*/ HHUL_hca_hndl_t hca_hndl,\r
- /*IN*/ HHUL_cq_hndl_t cq,\r
- /*IN*/ int solicitedNotification,\r
- /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p\r
-);\r
-\r
DLL_API HH_ret_t THHUL_cqm_peek_cq( \r
/*IN*/ HHUL_hca_hndl_t hca_hndl, \r
/*IN*/ HHUL_cq_hndl_t cq, \r
THHUL_cqm_create_cq_done /* HHULIF_create_cq_done */,\r
THHUL_cqm_resize_cq_prep /* HHULIF_resize_cq_prep */,\r
THHUL_cqm_resize_cq_done /* HHULIF_resize_cq_done */,\r
- THHUL_cqm_poll4cqe /* HHULIF_poll4cqe */,\r
- THHUL_cqm_poll_and_rearm_cq /* HHULIF_poll_and_rearm_cq */, \r
+ NULL /* HHULIF_poll4cqe */,\r
+ NULL /* HHULIF_poll_and_rearm_cq */, \r
THHUL_cqm_peek_cq /* HHULIF_peek_cq */,\r
THHUL_cqm_req_comp_notif /* HHULIF_req_comp_notif */,\r
THHUL_cqm_req_ncomp_notif /* HHULIF_req_ncomp_notif */,\r
#define _CL_QMAP_H_\r
\r
\r
+#include <complib/cl_rbmap.h>\r
#include <complib/cl_qpool.h>\r
\r
\r
*********/\r
\r
\r
-/****i* Component Library: Quick Map/cl_map_color_t\r
-* NAME\r
-* cl_map_color_t\r
-*\r
-* DESCRIPTION\r
-* The cl_map_color_t enumerated type is used to note the color of\r
-* nodes in a map.\r
-*\r
-* SYNOPSIS\r
-*/\r
-typedef enum _cl_map_color\r
-{\r
- CL_MAP_RED,\r
- CL_MAP_BLACK\r
-\r
-} cl_map_color_t;\r
-/*\r
-* VALUES\r
-* CL_MAP_RED\r
-* The node in the map is red.\r
-*\r
-* CL_MAP_BLACK\r
-* The node in the map is black.\r
-*\r
-* SEE ALSO\r
-* Quick Map, cl_map_item_t\r
-*********/\r
-\r
-\r
/****s* Component Library: Quick Map/cl_map_item_t\r
* NAME\r
* cl_map_item_t\r
--- /dev/null
+/*++\r
+Copyright © InfiniCon Systems, Inc. All rights reserved.\r
+\r
+THIS SOFTWARE IS PROVIDED BY INFINICON SYSTEMS, INC. ("INFINICON") TO EACH\r
+PERSON OR COMPANY ("RECIPIENT") ON AN "AS IS" BASIS. ANY EXPRESS OR IMPLIED\r
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\r
+IN NO EVENT SHALL INFINICON BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\r
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED OR ON ANY THEORY OF LIABILITY,\r
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\r
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\r
+OF THE POSSIBILITY OF SUCH DAMAGE.\r
+\r
+Any agreements between InfiniCon and the Recipient shall apply to Recipient's\r
+use of the Software.\r
+--*/\r
+\r
+\r
+/*\r
+ * Abstract:\r
+ * Declaration of primitive red/black map, a red/black tree where the caller\r
+ * always provides all necessary storage.\r
+ *\r
+ * This tree implementation exposes functions required for the client to\r
+ * manually walk the map, allowing clients to implement various methods\r
+ * of comparisson.\r
+ *\r
+ * Environment:\r
+ * All\r
+ *\r
+ * $Revision$\r
+ */\r
+\r
+\r
+#ifndef _CL_RBMAP_H_\r
+#define _CL_RBMAP_H_\r
+\r
+\r
+#include <complib/cl_types.h>\r
+\r
+\r
+/****h* Component Library/RB Map\r
+* NAME\r
+* RB Map\r
+*\r
+* DESCRIPTION\r
+* RB map implements a binary tree that stores user provided cl_rbmap_item_t\r
+* structures. Each item stored in a RB map has a unique key\r
+* (duplicates are not allowed). RB map provides the ability to\r
+* efficiently search for an item given a key.\r
+*\r
+* RB map does not allocate any memory, and can therefore not fail\r
+* any operations due to insufficient memory. RB map can thus be useful\r
+* in minimizing the error paths in code.\r
+*\r
+* RB map is not thread safe, and users must provide serialization when\r
+* adding and removing items from the map.\r
+*\r
+* The RB map functions operate on a cl_rbmap_t structure which should be\r
+* treated as opaque and should be manipulated only through the provided\r
+* functions.\r
+*\r
+* SEE ALSO\r
+* Structures:\r
+* cl_rbmap_t, cl_rbmap_item_t\r
+*\r
+* Initialization:\r
+* cl_rbmap_init\r
+*\r
+* Iteration:\r
+* cl_rbmap_root, cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*\r
+* Manipulation:\r
+* cl_rbmap_insert, cl_rbmap_get, cl_rbmap_remove_item, cl_rbmap_remove,\r
+* cl_rbmap_reset, cl_rbmap_merge, cl_rbmap_delta\r
+*\r
+* Search:\r
+* cl_rbmap_apply_func\r
+*\r
+* Attributes:\r
+* cl_rbmap_count, cl_is_rbmap_empty,\r
+*********/\r
+\r
+\r
+/****i* Component Library: RB Map/cl_map_color_t\r
+* NAME\r
+* cl_map_color_t\r
+*\r
+* DESCRIPTION\r
+* The cl_map_color_t enumerated type is used to note the color of\r
+* nodes in a map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef enum _cl_map_color\r
+{\r
+ CL_MAP_RED,\r
+ CL_MAP_BLACK\r
+\r
+} cl_map_color_t;\r
+/*\r
+* VALUES\r
+* CL_MAP_RED\r
+* The node in the map is red.\r
+*\r
+* CL_MAP_BLACK\r
+* The node in the map is black.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****s* Component Library: RB Map/cl_rbmap_item_t\r
+* NAME\r
+* cl_rbmap_item_t\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_item_t structure is used by maps to store objects.\r
+*\r
+* The cl_rbmap_item_t structure should be treated as opaque and should\r
+* be manipulated only through the provided functions.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _cl_rbmap_item\r
+{\r
+ struct _cl_rbmap_item *p_left;\r
+ struct _cl_rbmap_item *p_right;\r
+ struct _cl_rbmap_item *p_up;\r
+ cl_map_color_t color;\r
+#ifdef _DEBUG_\r
+ struct _cl_rbmap *p_map;\r
+#endif\r
+\r
+} cl_rbmap_item_t;\r
+/*\r
+* FIELDS\r
+* p_left\r
+* Pointer to the map item that is a child to the left of the node.\r
+*\r
+* p_right\r
+* Pointer to the map item that is a child to the right of the node.\r
+*\r
+* p_up\r
+* Pointer to the map item that is the parent of the node.\r
+*\r
+* color\r
+* Indicates whether a node is red or black in the map.\r
+*\r
+* NOTES\r
+* None of the fields of this structure should be manipulated by users, as\r
+* they are crititcal to the proper operation of the map in which they\r
+* are stored.\r
+*\r
+* To allow storing items in either a quick list, a quick pool, or a quick\r
+* map, the map implementation guarantees that the map item can be safely\r
+* cast to a pool item used for storing an object in a quick pool, or cast to\r
+* a list item used for storing an object in a quick list. This removes the\r
+* need to embed a map item, a list item, and a pool item in objects that need\r
+* to be stored in a quick list, a quick pool, and a RB map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_insert, cl_rbmap_key, cl_pool_item_t, cl_list_item_t\r
+*********/\r
+\r
+\r
+/****s* Component Library: RB Map/cl_rbmap_t\r
+* NAME\r
+* cl_rbmap_t\r
+*\r
+* DESCRIPTION\r
+* Quick map structure.\r
+*\r
+* The cl_rbmap_t structure should be treated as opaque and should\r
+* be manipulated only through the provided functions.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _cl_rbmap\r
+{\r
+ cl_rbmap_item_t root;\r
+ cl_rbmap_item_t nil;\r
+ cl_state_t state;\r
+ size_t count;\r
+\r
+} cl_rbmap_t;\r
+/*\r
+* PARAMETERS\r
+* root\r
+* Map item that serves as root of the map. The root is set up to\r
+* always have itself as parent. The left pointer is set to point to\r
+* the item at the root.\r
+*\r
+* nil\r
+* Map item that serves as terminator for all leaves, as well as providing\r
+* the list item used as quick list for storing map items in a list for\r
+* faster traversal.\r
+*\r
+* state\r
+* State of the map, used to verify that operations are permitted.\r
+*\r
+* count\r
+* Number of items in the map.\r
+*\r
+* SEE ALSO\r
+* RB Map\r
+*********/\r
+\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_count\r
+* NAME\r
+* cl_rbmap_count\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_count function returns the number of items stored\r
+* in a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE size_t CL_API\r
+cl_rbmap_count(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ return( p_map->count );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose item count to return.\r
+*\r
+* RETURN VALUE\r
+* Returns the number of items stored in the map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_is_rbmap_empty\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_is_rbmap_empty\r
+* NAME\r
+* cl_is_rbmap_empty\r
+*\r
+* DESCRIPTION\r
+* The cl_is_rbmap_empty function returns whether a RB map is empty.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE boolean_t CL_API\r
+cl_is_rbmap_empty(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+\r
+ return( p_map->count == 0 );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to test for emptiness.\r
+*\r
+* RETURN VALUES\r
+* TRUE if the RB map is empty.\r
+*\r
+* FALSE otherwise.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_count, cl_rbmap_reset\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_reset\r
+* NAME\r
+* cl_rbmap_reset\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_reset function removes all items in a RB map,\r
+* leaving it empty.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE void CL_API\r
+cl_rbmap_reset(\r
+ IN cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+\r
+ p_map->root.p_left = &p_map->nil;\r
+ p_map->count = 0;\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to empty.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_remove_item\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_init\r
+* NAME\r
+* cl_rbmap_init\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_init function initialized a RB map for use.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE void CL_API\r
+cl_rbmap_init(\r
+ IN cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+\r
+ /* special setup for the root node */\r
+ p_map->root.p_left = &p_map->nil;\r
+ p_map->root.p_right = &p_map->nil;\r
+ p_map->root.p_up = &p_map->root;\r
+ p_map->root.color = CL_MAP_BLACK;\r
+\r
+ /* Setup the node used as terminator for all leaves. */\r
+ p_map->nil.p_left = &p_map->nil;\r
+ p_map->nil.p_right = &p_map->nil;\r
+ p_map->nil.p_up = &p_map->nil;\r
+ p_map->nil.color = CL_MAP_BLACK;\r
+\r
+#ifdef _DEBUG_\r
+ p_map->root.p_map = p_map;\r
+ p_map->nil.p_map = p_map;\r
+#endif\r
+\r
+ p_map->state = CL_INITIALIZED;\r
+\r
+ p_map->count = 0;\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to initialize.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* NOTES\r
+* Allows calling RB map manipulation functions.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_insert, cl_rbmap_remove\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_root\r
+* NAME\r
+* cl_rbmap_root\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_root function returns the root of a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* const CL_API\r
+cl_rbmap_root(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ return( p_map->root.p_left );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose root to return.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the end of the map.\r
+*\r
+* NOTES\r
+* cl_rbmap_end is useful for determining the validity of map items returned\r
+* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map\r
+* item pointer returned by any of these functions compares to the end, the\r
+* end of the map was encoutered.\r
+* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that\r
+* the map is empty.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev,\r
+* cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_end\r
+* NAME\r
+* cl_rbmap_end\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_end function returns the end of a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE const cl_rbmap_item_t* const CL_API\r
+cl_rbmap_end(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ /* Nil is the end of the map. */\r
+ return( &p_map->nil );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose end to return.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the end of the map.\r
+*\r
+* NOTES\r
+* cl_rbmap_end is useful for determining the validity of map items returned\r
+* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map\r
+* item pointer returned by any of these functions compares to the end, the\r
+* end of the map was encoutered.\r
+* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that\r
+* the map is empty.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev\r
+* cl_rbmap_root, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_left\r
+* NAME\r
+* cl_rbmap_left\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_left function returns the map item to the left\r
+* of the specified map item.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* CL_API\r
+cl_rbmap_left(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ return( (cl_rbmap_item_t*)p_item->p_left );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item whose predecessor to return.\r
+*\r
+* RETURN VALUES\r
+* Pointer to the map item to the left in a RB map.\r
+*\r
+* Pointer to the map end if no item is to the left.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,\r
+* cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_right\r
+* NAME\r
+* cl_rbmap_right\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_right function returns the map item to the right\r
+* of the specified map item.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* CL_API\r
+cl_rbmap_right(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ return( (cl_rbmap_item_t*)p_item->p_right );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item whose predecessor to return.\r
+*\r
+* RETURN VALUES\r
+* Pointer to the map item to the right in a RB map.\r
+*\r
+* Pointer to the map end if no item is to the right.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,\r
+* cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_insert\r
+* NAME\r
+* cl_rbmap_insert\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_insert function inserts a map item into a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_EXPORT void CL_API\r
+cl_rbmap_insert(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_insert_at,\r
+ IN cl_rbmap_item_t* const p_item,\r
+ IN boolean_t left );\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure into which to add the item.\r
+*\r
+* p_insert_at\r
+* [in] Pointer to a cl_rbmap_item_t structure to serve as parent\r
+* to p_item.\r
+*\r
+* p_item\r
+* [in] Pointer to a cl_rbmap_item_t stucture to insert into the RB map.\r
+*\r
+* left\r
+* [in] Indicates that p_item should be inserted to the left of p_insert_at.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the item in the map with the specified key. If insertion\r
+* was successful, this is the pointer to the item. If an item with the\r
+* specified key already exists in the map, the pointer to that item is\r
+* returned.\r
+*\r
+* NOTES\r
+* Insertion operations may cause the RB map to rebalance.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_remove_item\r
+* NAME\r
+* cl_rbmap_remove_item\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_remove_item function removes the specified map item\r
+* from a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_EXPORT void CL_API\r
+cl_rbmap_remove_item(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item );\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item to remove from its RB map.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* In a debug build, cl_rbmap_remove_item asserts that the item being removed\r
+* is in the specified map.\r
+*\r
+* NOTES\r
+* Removes the map item pointed to by p_item from its RB map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_reset, cl_rbmap_insert\r
+*********/\r
+\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+\r
+#endif /* _CL_RBMAP_H_ */\r
typedef struct _al_query* __ptr64 ib_query_handle_t;\r
typedef struct _al_sub* __ptr64 ib_sub_handle_t;\r
typedef struct _al_listen* __ptr64 ib_listen_handle_t;\r
-typedef struct _al_conn* __ptr64 ib_cm_handle_t;\r
typedef struct _al_ioc* __ptr64 ib_ioc_handle_t;\r
typedef struct _al_svc_entry* __ptr64 ib_svc_handle_t;\r
typedef struct _al_pool_key* __ptr64 ib_pool_key_t;\r
typedef struct _al_pool* __ptr64 ib_pool_handle_t;\r
\r
\r
+typedef struct _ib_cm_handle\r
+{\r
+ ib_al_handle_t h_al;\r
+ ib_qp_handle_t h_qp;\r
+ net32_t cid;\r
+\r
+} ib_cm_handle_t;\r
+\r
+\r
/****s* Access Layer/ib_shmid_t\r
* NAME\r
* ib_shmid_t\r
*\r
* SYNOPSIS\r
*/\r
+#pragma warning(disable:4324)\r
typedef struct _ib_cm_req_rec\r
{\r
const void* __ptr64 context;\r
const void* __ptr64 sidr_context;\r
\r
} ib_cm_req_rec_t;\r
+#pragma warning(default:4324)\r
/*\r
* FIELDS\r
* context\r
{\r
const uint8_t* __ptr64 p_rtu_pdata;\r
\r
- ib_qp_type_t qp_type;\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the ready\r
* to use message.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the connection request.\r
*\r
\r
const uint8_t* __ptr64 p_rej_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the connection\r
* request reply.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with a connection request.\r
*\r
{\r
const uint8_t* __ptr64 p_mra_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* p_mra_pdata\r
* A reference to user-defined private data sent as part of the MRA.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with a connection request.\r
*\r
\r
const uint8_t* __ptr64 p_lap_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
const void* __ptr64 qp_context;\r
\r
} ib_cm_lap_rec_t;\r
* A reference to user-defined private data sent as part of the load\r
* alternate path request.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* qp_context\r
* The queue pair context associated with a connection request.\r
*\r
\r
const uint8_t* __ptr64 p_apr_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the alternate\r
* path response.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the alternate path response.\r
*\r
\r
const uint8_t* __ptr64 p_dreq_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
const void* __ptr64 qp_context;\r
\r
} ib_cm_dreq_rec_t;\r
* A reference to user-defined private data sent as part of the\r
* disconnect request.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* qp_context\r
* The queue pair context associated with the disconnect request.\r
*\r
\r
const uint8_t* __ptr64 p_drep_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the\r
* disconnect reply.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the disconnect reply.\r
*\r
ib_net16_t pkey;\r
\r
uint8_t* __ptr64 p_compare_buffer;\r
- uint32_t compare_offset;\r
- uint32_t compare_length;\r
+ uint8_t compare_offset;\r
+ uint8_t compare_length;\r
\r
ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
\r
ib_qp_type_t qp_type;\r
\r
- /* valid for rc, uc & rd qp_type only */\r
- ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
- ib_pfn_cm_rej_cb_t pfn_cm_rej_cb;\r
-\r
/* valid for ud qp_type only */\r
const void* __ptr64 sidr_context;\r
\r
ib_qp_handle_t h_qp;\r
\r
uint8_t* __ptr64 p_compare_buffer;\r
- uint32_t compare_offset;\r
- uint32_t compare_length;\r
+ uint8_t compare_offset;\r
+ uint8_t compare_length;\r
\r
uint8_t resp_res;\r
uint8_t init_depth;\r
uint8_t rnr_nak_timeout;\r
uint8_t rnr_retry_cnt;\r
\r
+ ib_pfn_cm_rej_cb_t pfn_cm_rej_cb;\r
+ ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb;\r
ib_pfn_cm_lap_cb_t pfn_cm_lap_cb;\r
ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb;\r
*****/\r
\r
\r
+typedef struct _ib_cep\r
+{\r
+ void *context;\r
+ net32_t cid;\r
+\r
+} ib_cep_t;\r
+\r
+\r
+/****s* Access Layer/ib_cep_listen_t\r
+* NAME\r
+* ib_cep_listen_t\r
+*\r
+* DESCRIPTION\r
+* Request to listen for incoming connection attempts.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ib_cep_listen\r
+{\r
+ net64_t svc_id;\r
+\r
+ net64_t port_guid;\r
+\r
+ uint8_t* __ptr64 p_cmp_buf;\r
+ uint8_t cmp_len;\r
+ uint8_t cmp_offset;\r
+\r
+} ib_cep_listen_t;\r
+/*\r
+* FIELDS\r
+* svc_id\r
+* The identifier of the service to register for incoming connection\r
+* requests.\r
+*\r
+* port_guid\r
+* Directs the communication manager to register the listen only\r
+* with the specified port. This should be set to IB_ALL_PORTS\r
+* if the listen is not directed to a particular port.\r
+*\r
+* p_cmp_buf\r
+* An optionally provided buffer that will be used to match incoming\r
+* connection requests with a registered service. Use of this buffer\r
+* permits multiple services to listen on the same service ID as long as\r
+* they provide different compare buffers. Incoming requests will\r
+* be matched against the compare buffer.\r
+*\r
+* cmp_len\r
+* Specifies the size of the compare buffer in bytes. The length must\r
+* be the same for all requests using the same service ID.\r
+*\r
+* cmp_offset\r
+* An offset into the user-defined data area of a connection request\r
+* which contains the start of the data that will be compared against.\r
+* The offset must be the same for all requests using the same service ID.\r
+*\r
+* NOTES\r
+* Users fill out this structure when listening on a service ID with the\r
+* local communication manager. The communication manager will use the given\r
+* service ID and compare buffer to route connection requests to the\r
+* appropriate client. Users may direct listens requests on a particular\r
+* channel adapter, port, or LID.\r
+*****/\r
+\r
+\r
/****f* Access Layer/ib_create_ioc\r
* NAME\r
* ib_create_ioc\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_listen_ioctl_t\r
+/****s* User-mode Access Layer/ual_create_cep_ioctl_t\r
* NAME\r
-* ual_cm_listen_ioctl_t\r
+* ual_create_cep_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters to\r
-* perform a CM listen request.\r
+* IOCTL structure containing the output parameters to\r
+* create a CEP.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_listen_ioctl\r
+typedef struct _ual_create_cep_ioctl\r
{\r
- struct _ual_cm_listen_ioctl_in\r
- {\r
- void* __ptr64 context;\r
- ib_cm_listen_t cm_listen;\r
- /* Compare data buffer follows IOCTL buffer immediately. */\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
\r
- } in;\r
- struct _ual_cm_listen_ioctl_out\r
- {\r
- ib_api_status_t status;\r
- uint64_t h_cm_listen;\r
-\r
- } out;\r
-\r
-} ual_cm_listen_ioctl_t;\r
+} ual_create_cep_ioctl_t;\r
/*\r
* FIELDS\r
-* in.listen_context\r
-* User-specified context information that is returned as a part of all\r
-* connection requests through the pfn_cm_req_cb routine. The context is\r
-* also returned through the error and destroy callbacks.\r
-*\r
-* in.cm_listen\r
-* Information used to direct the listen request to match incoming\r
-* connection requests.\r
-*\r
-* out.status\r
+* status\r
* Status of the operation.\r
*\r
-* out.h_cm_listen\r
-* Upon successful completion of this call, handle to the listen request.\r
-* This handle may be used to cancel the listen operation.\r
+* cid\r
+* CID of the created CEP.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_cancel_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_listen_ioctl_t\r
* NAME\r
-* ual_cm_cancel_ioctl_t\r
+* ual_cep_listen_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_cancel\r
+* IOCTL structure containing the input parameters to\r
+* perform a CM listen request.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_cancel_ioctl\r
+typedef struct _ual_cep_listen_ioctl\r
{\r
- struct _ual_cm_cancel_ioctl_in\r
- {\r
- uint64_t h_cm_listen;\r
+ net32_t cid;\r
+ ib_cep_listen_t cep_listen;\r
+ uint8_t compare[IB_REQ_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_cancel_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_cancel_ioctl_t;\r
+} ual_cep_listen_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm_listen\r
-* The listen handle that needs to be cancelled.\r
+* in.cid\r
+* CID of an existing CEP.\r
*\r
-* out.status\r
-* Status of the operation.\r
+* in.cep_listen\r
+* Information used to direct the listen request to match incoming\r
+* connection requests.\r
*****/\r
\r
\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
-* ib_cm_req\r
+* al_cep_pre_req call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_req_ioctl\r
+typedef union _ual_cep_req_ioctl\r
{\r
- struct _ual_cm_req_ioctl_in\r
+ struct _ual_cep_req_ioctl_in\r
{\r
- uint64_t h_qp; /* for CM */\r
+ net32_t cid;\r
ib_cm_req_t cm_req;\r
- ib_path_rec_t paths[1];\r
- /* If an alternate path is specified, it follows the primary path. */\r
- /* private data follows the IOCTL buffer immediately. */\r
- /* compare data follows private data immediately. */\r
+ ib_path_rec_t paths[2];\r
+ uint8_t pdata[IB_REQ_PDATA_SIZE];\r
+ uint8_t compare[IB_REQ_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_req_ioctl_out\r
+ struct _ual_cep_req_ioctl_out\r
{\r
ib_api_status_t status;\r
+ ib_qp_mod_t init;\r
\r
} out;\r
\r
-} ual_cm_req_ioctl_t;\r
+} ual_cep_req_ioctl_t;\r
/*\r
* FIELDS\r
+* in.cid\r
+* CID of the target CEP.\r
+*\r
* in.cm_req\r
* CM REQ parameters.\r
*\r
*\r
* out.status\r
* Status of the operation\r
+*\r
+* out.init\r
+* QP modify paramters for INIT state transition.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rep_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_rep_ioctl_t\r
* NAME\r
-* ual_cm_rep_ioctl_t\r
+* ual_cep_rep_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rep\r
+* al_cep_pre_rep call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rep_ioctl\r
+typedef union _ual_cep_rep_ioctl\r
{\r
- struct _ual_cm_rep_ioctl_in\r
+ struct _ual_cep_rep_ioctl_in\r
{\r
- uint64_t h_cm_req;\r
- uint64_t h_qp;\r
+ net32_t cid;\r
ib_cm_rep_t cm_rep;\r
- /* Private data follows immediately. */\r
+ uint8_t pdata[IB_REP_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_rep_ioctl_out\r
+ struct _ual_cep_rep_ioctl_out\r
{\r
ib_api_status_t status;\r
+ ib_qp_mod_t init;\r
\r
} out;\r
\r
-} ual_cm_rep_ioctl_t;\r
+} ual_cep_rep_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_req\r
*\r
* out.status\r
* Status of the operation.\r
+*\r
+* out.init\r
+* QP modify paramters for INIT state transition.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rtu_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_get_rtr_ioctl_t\r
* NAME\r
-* ual_cm_rtu_ioctl_t\r
+* ual_cep_get_rtr_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rtu\r
+* IOCTL structure containing the output parameters for\r
+* al_cep_get_rtr_attr call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rtu_ioctl\r
+typedef struct _ual_cep_get_rtr_ioctl\r
{\r
- struct _ual_cm_rtu_ioctl_in\r
- {\r
- uint64_t h_cm_rep;\r
- ib_cm_rtu_t cm_rtu;\r
- /* Private data follows IOCTL buffer. */\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t rtr;\r
\r
- } in;\r
- struct _ual_cm_rtu_ioctl_out\r
- {\r
- ib_api_status_t status;\r
+} ual_cep_get_rtr_ioctl_t;\r
+/*\r
+* FIELDS\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.rtr\r
+* QP modify paramters for RTR state transition.\r
+*****/\r
\r
- } out;\r
\r
-} ual_cm_rtu_ioctl_t;\r
+\r
+/****s* User-mode Access Layer/ual_cep_get_rts_ioctl_t\r
+* NAME\r
+* ual_cep_get_rts_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters for\r
+* al_cep_get_rts_attr call.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_get_rts_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t rts;\r
+\r
+} ual_cep_get_rts_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm_rep\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.rts\r
+* QP modify paramters for RTS state transition.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_rtu_ioctl_t\r
+* NAME\r
+* ual_cep_rtu_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input parameters for\r
+* al_cep_rtu call.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_rtu_ioctl\r
+{\r
+ net32_t cid;\r
+ uint8_t pdata_len;\r
+ uint8_t pdata[IB_RTU_PDATA_SIZE];\r
+\r
+} ual_cep_rtu_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.cid\r
* The cm_rep connection handle got on the callback.\r
*\r
-* in.cm_rtu\r
-* CM RTU parameters.\r
+* in.pdata_len\r
+* Length of private data.\r
*\r
-* out.status\r
-* Status of the operation\r
+* in.pdata\r
+* Private data.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rej_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_rej_ioctl_t\r
* NAME\r
-* ual_cm_rej_ioctl_t\r
+* ual_cep_rej_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rej\r
+* IOCTL structure containing the input parameters for\r
+* al_cep_rej\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rej_ioctl\r
+typedef struct _ual_cep_rej_ioctl\r
{\r
- struct _ual_cm_rej_ioctl_in\r
- {\r
- uint64_t h_cm;\r
- ib_cm_rej_t cm_rej;\r
- /* ARI and private data data follow IOCTL buffer immediately. */\r
+ net32_t cid;\r
\r
- } in;\r
- struct _ual_cm_rej_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
+ ib_rej_status_t rej_status;\r
+ uint8_t ari_len;\r
+ uint8_t pdata_len;\r
+ uint8_t ari[IB_ARI_SIZE];\r
+ uint8_t pdata[IB_REJ_PDATA_SIZE];\r
\r
-} ual_cm_rej_ioctl_t;\r
+} ual_cep_rej_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm\r
-* The connection handle got on the callback.\r
+* in.cid\r
+* The CID of the target CEP.\r
*\r
-* in.cm_rej\r
-* CM REJ parameters.\r
+* in.rej_status\r
+* Rejection status as defined in IB spec.\r
*\r
-* out.status\r
-* Status of the operation.\r
+* in.ari_len\r
+* Length of the ARI data.\r
+*\r
+* in.pdata_len\r
+* Length of the private data.\r
+*\r
+* in.ari\r
+* ARI data.\r
+*\r
+* in.pdata\r
+* Private data.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_handoff_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_handoff_ioctl_t\r
* NAME\r
-* ual_cm_handoff_ioctl_t\r
+* ual_cep_handoff_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_handoff_ioctl\r
+typedef union _ual_cep_handoff_ioctl\r
{\r
- struct _ual_cm_handoff_ioctl_in\r
+ struct _ual_cep_handoff_ioctl_in\r
{\r
uint64_t h_cm;\r
net64_t sid;\r
\r
} in;\r
- struct _ual_cm_handoff_ioctl_out\r
+ struct _ual_cep_handoff_ioctl_out\r
{\r
ib_api_status_t status;\r
\r
} out;\r
\r
-} ual_cm_handoff_ioctl_t;\r
+} ual_cep_handoff_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_mra_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_mra_ioctl_t\r
* NAME\r
-* ual_cm_mra_ioctl_t\r
+* ual_cep_mra_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_mra\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_mra_ioctl\r
+typedef struct _ual_cep_mra_ioctl\r
{\r
- struct _ual_cm_mra_ioctl_in\r
- {\r
- uint64_t h_cm;\r
- ib_cm_mra_t cm_mra;\r
- /* Private data follows IOCTL buffer immediately. */\r
+ net32_t cid;\r
+ ib_cm_mra_t cm_mra;\r
+ uint8_t pdata[IB_MRA_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_mra_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_mra_ioctl_t;\r
+} ual_cep_mra_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm\r
-* The connection handle got on the callback.\r
+* in.cid\r
+* The CID for the target CEP.\r
*\r
-* cm_mra\r
+* in.cm_mra\r
* CM MRA parameters.\r
-*\r
-* out.status\r
-* Status of the operation\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_lap_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_lap_ioctl_t\r
* NAME\r
-* ual_cm_lap_ioctl_t\r
+* ual_cep_lap_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_lap\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_lap_ioctl\r
+typedef struct _ual_cep_lap_ioctl\r
{\r
- struct _ual_cm_lap_ioctl_in\r
- {\r
- uint64_t h_qp;\r
- ib_cm_lap_t cm_lap;\r
- ib_path_rec_t alt_path;\r
- /* Private data follows IOCTL buffer immediately */\r
+ net32_t cid;\r
+ ib_cm_lap_t cm_lap;\r
+ ib_path_rec_t alt_path;\r
+ uint8_t pdata[IB_LAP_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_lap_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_lap_ioctl_t;\r
+} ual_cep_lap_ioctl_t;\r
/*\r
* FIELDS\r
* in.cm_lap\r
*\r
* in.alt_path\r
* Alternate path information.\r
-*\r
-* out.status\r
-* Status of the operation\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_apr_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_apr_ioctl_t\r
* NAME\r
-* ual_cm_apr_ioctl_t\r
+* ual_cep_apr_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_apr\r
+* IOCTL structure containing the input parameters for\r
+* ib_cep_apr\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_apr_ioctl\r
+typedef union _ual_cep_apr_ioctl\r
{\r
- struct _ual_cm_apr_ioctl_in\r
+ struct _ual_cep_apr_ioctl_in\r
{\r
- uint64_t h_cm_lap;\r
- uint64_t h_qp;\r
- ib_cm_apr_t cm_apr;\r
- /* Info and Private data follow IOCTL buffer immediately */\r
+ net32_t cid;\r
+ ib_cm_apr_t cm_apr;\r
+ uint8_t apr_info[IB_APR_INFO_SIZE];\r
+ uint8_t pdata[IB_APR_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_apr_ioctl_out\r
+\r
+ struct _ual_cep_apr_ioctl_out\r
{\r
- ib_api_status_t status;\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t apr;\r
\r
} out;\r
\r
-} ual_cm_apr_ioctl_t;\r
+} ual_cep_apr_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_lap\r
*\r
* in.cm_apr\r
* CM APR parameters.\r
-*\r
-* out.status\r
-* Status of the operation.\r
*****/\r
\r
\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_dreq_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_dreq_ioctl_t\r
* NAME\r
-* ual_cm_dreq_ioctl_t\r
+* ual_cep_dreq_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_dreq\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_dreq_ioctl\r
+typedef struct _ual_cep_dreq_ioctl\r
{\r
- struct _ual_cm_dreq_ioctl_in\r
- {\r
- uint64_t h_qp;\r
- ib_cm_dreq_t cm_dreq;\r
- /* Private data follows IOCTL buffer immediately */\r
-\r
- } in;\r
- struct _ual_cm_dreq_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
+ net32_t cid;\r
+ uint8_t pdata_len;\r
+ uint8_t pdata[IB_DREQ_PDATA_SIZE];\r
\r
-} ual_cm_dreq_ioctl_t;\r
+} ual_cep_dreq_ioctl_t;\r
/*\r
* FIELDS\r
* cm_dreq\r
* CM DREQ parameters.\r
-*\r
-* out.status\r
-* Status of the operation.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_drep_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_drep_ioctl_t\r
* NAME\r
-* ual_cm_drep_ioctl_t\r
+* ual_cep_drep_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_drep_ioctl\r
+typedef struct _ual_cep_drep_ioctl\r
{\r
- struct _ual_cm_drep_ioctl_in\r
- {\r
- uint64_t h_cm_dreq;\r
- ib_cm_drep_t cm_drep;\r
- /* Private data follows IOCTL buffer immediately. */\r
+ net32_t cid;\r
+ ib_cm_drep_t cm_drep;\r
+ uint8_t pdata[IB_DREP_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_drep_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_drep_ioctl_t;\r
+} ual_cep_drep_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_dreq\r
*\r
* in.cm_drep\r
* CM DREP parameters.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_get_timewait_ioctl_t\r
+* NAME\r
+* ual_cep_get_timewait_ioctl_t\r
*\r
-* out.status\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters for\r
+* ib_cep_get_timewait\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_get_timewait_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ uint64_t timewait_us;\r
+\r
+} ual_cep_get_timewait_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.status\r
+* Status of the request.\r
+*\r
+* in.timewait_us\r
+* Timewait value, in microseconds.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_poll_ioctl_t\r
+* NAME\r
+* ual_cep_poll_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters to\r
+* poll for incoming events on a CEP. The input parameter is the CID.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_poll_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ ib_cep_t new_cep;\r
+ ib_mad_element_t element;\r
+ ib_grh_t grh;\r
+ uint8_t mad_buf[MAD_BLOCK_SIZE];\r
+\r
+} ual_cep_poll_ioctl_t;\r
+/*\r
+* FIELDS\r
+* status\r
* Status of the operation.\r
+*\r
+* new_cep\r
+* For listen requests, CEP information of CEPs created in response\r
+* to incoming REQs.\r
+*\r
+* mad_buf\r
+* Payload of a received MAD (or failed send)\r
*****/\r
\r
\r
/*\r
* Information describing an I/O controller\r
*/\r
+#pragma warning(disable:4324)\r
typedef struct _ib_ioc_info\r
{\r
net64_t chassis_guid;\r
ib_ioc_profile_t profile;\r
\r
} ib_ioc_info_t;\r
+#pragma warning(default:4324)\r
+\r
\r
/*\r
* Defines known Communication management class versions\r
{\r
struct _qp_init\r
{\r
- ib_qp_opts_t opts;\r
uint8_t primary_port;\r
ib_net32_t qkey;\r
uint16_t pkey_index;\r
#endif\r
\r
#ifndef VER_FILEBUILD\r
-#define VER_FILEBUILD 31\r
+#define VER_FILEBUILD 32\r
#endif\r
\r
#ifndef VER_FILEREV\r
#endif\r
\r
/* 100ns to s conversion */\r
-#define HUNDREDNS_TO_SEC 10000000\r
+#define HUNDREDNS_TO_SEC 10000000ULL\r
/* s to µs conversion */\r
-#define SEC_TO_MICRO 1000000\r
+#define SEC_TO_MICRO 1000000ULL\r
\r
CL_INLINE uint64_t CL_API\r
cl_get_time_stamp( void )\r
*/\r
\r
\r
-\r
-\r
#ifndef _CL_DEBUG_OSD_H_\r
#define _CL_DEBUG_OSD_H_\r
\r
* CL_TRACE_EXIT, and CL_EXIT macros.\r
*/\r
#define _CL_DBG_ENTER \\r
- ("%s%s%s() [\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("0x%x:%s%s%s() [\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_EXIT \\r
- ("%s%s%s() ]\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("0x%x:%s%s%s() ]\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_INFO \\r
- ("%s%s%s(): ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("0x%x:%s%s%s(): ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_ERROR \\r
- ("%s%s%s() !ERROR!: ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("0x%x:%s%s%s() !ERROR!: ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define CL_CHK_STK\r
\r
#define DEST_QP 1\r
\r
\r
+#pragma warning(disable:4324)\r
typedef struct _alts_cm_ca_obj\r
{\r
ib_api_status_t status;\r
\r
ib_qp_attr_t qp_attr[MAX_QPS];\r
\r
-\r
ib_send_wr_t *p_send_wr;\r
ib_recv_wr_t *p_recv_wr;\r
size_t wr_send_size;\r
mem_region_t mem_region[10];\r
\r
} alts_cm_ca_obj_t;\r
+#pragma warning(default:4324)\r
\r
#define MAX_SERVER 500\r
\r
p_done_cl->wr_id,\r
ib_get_wc_status_str(p_done_cl->status) ));\r
\r
-\r
- if (p_done_cl->wc_type == IB_WC_RECV)\r
+ if( p_done_cl->status == IB_WCS_SUCCESS )\r
{\r
- ALTS_PRINT(ALTS_DBG_VERBOSE,\r
- ("message length..:%d bytes\n",\r
- p_done_cl->length ));\r
+ if (p_done_cl->wc_type == IB_WC_RECV)\r
+ {\r
+ ALTS_PRINT(ALTS_DBG_VERBOSE,\r
+ ("message length..:%d bytes\n",\r
+ p_done_cl->length ));\r
\r
- id = (uint32_t)p_done_cl->wr_id;\r
+ id = (uint32_t)p_done_cl->wr_id;\r
\r
- ALTS_PRINT(ALTS_DBG_VERBOSE,\r
- ("RecvRC info:\n"\r
- "\trecv_opt...:x%x\n"\r
- "\timm_data...:x%x\n",\r
- p_done_cl->recv.conn.recv_opt,\r
- p_done_cl->recv.ud.immediate_data ));\r
+ ALTS_PRINT(ALTS_DBG_VERBOSE,\r
+ ("RecvRC info:\n"\r
+ "\trecv_opt...:x%x\n"\r
+ "\timm_data...:x%x\n",\r
+ p_done_cl->recv.conn.recv_opt,\r
+ p_done_cl->recv.ud.immediate_data ));\r
\r
- if( p_ca_obj->rdma_enabled == TRUE )\r
+ if( p_ca_obj->rdma_enabled == TRUE )\r
+ {\r
+ process_response( p_ca_obj,\r
+ (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer,\r
+ (uint32_t)p_done_cl->wr_id );\r
+ }\r
+ }\r
+ else\r
+ if (p_done_cl->wc_type == IB_WC_RDMA_WRITE)\r
{\r
+ // convert request to read now\r
+ p_data =\r
+ (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer;\r
+ p_data->msg_type = 'R';\r
process_response( p_ca_obj,\r
- (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer,\r
- (uint32_t)p_done_cl->wr_id );\r
+ p_data,\r
+ (uint32_t)p_done_cl->wr_id );\r
+ }\r
+ else\r
+ if (p_done_cl->wc_type == IB_WC_RDMA_READ)\r
+ {\r
+ id = (uint32_t)p_done_cl->wr_id;\r
+ process_response( p_ca_obj,\r
+ (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer,\r
+ (uint32_t)p_done_cl->wr_id );\r
}\r
- }\r
- else\r
- if (p_done_cl->wc_type == IB_WC_RDMA_WRITE)\r
- {\r
- // convert request to read now\r
- p_data =\r
- (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer;\r
- p_data->msg_type = 'R';\r
- process_response( p_ca_obj,\r
- p_data,\r
- (uint32_t)p_done_cl->wr_id );\r
- }\r
- else\r
- if (p_done_cl->wc_type == IB_WC_RDMA_READ)\r
- {\r
- id = (uint32_t)p_done_cl->wr_id;\r
- process_response( p_ca_obj,\r
- (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer,\r
- (uint32_t)p_done_cl->wr_id );\r
}\r
\r
p_free_wcl = p_done_cl;\r
p_cm_rtu->access_ctrl |= IB_AC_RDMA_READ + IB_AC_RDMA_WRITE;\r
}\r
\r
- p_cm_rtu->sq_depth = 16;\r
- p_cm_rtu->rq_depth = 16;\r
+ if( p_ca_obj->p_ca_attr->modify_wr_depth )\r
+ {\r
+ p_cm_rtu->sq_depth = 16;\r
+ p_cm_rtu->rq_depth = 16;\r
+ }\r
p_cm_rtu->pfn_cm_apr_cb = alts_cm_apr_cb;\r
p_cm_rtu->pfn_cm_dreq_cb = alts_cm_dreq_cb;\r
\r
ib_status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, p_cm_rtu );\r
+\r
+ ALTS_PRINT( ALTS_DBG_VERBOSE,\r
+ ("ib_cm_rtu returned %s\n", ib_get_err_str( ib_status )) );\r
}\r
else\r
if ( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM )\r
p_cm_rep->flow_ctrl = TRUE;\r
p_cm_rep->rnr_nak_timeout = 7;\r
p_cm_rep->rnr_retry_cnt = 7;\r
+ p_cm_rep->pfn_cm_rej_cb = alts_cm_rej_cb;\r
+ p_cm_rep->pfn_cm_mra_cb = alts_cm_mra_cb;\r
p_cm_rep->pfn_cm_rtu_cb = alts_cm_rtu_cb;\r
p_cm_rep->pfn_cm_lap_cb = alts_cm_lap_cb;\r
p_cm_rep->pfn_cm_dreq_cb = alts_cm_dreq_cb;\r
cm_mra.svc_timeout = 21; // equals 8.5 sec wait + packet lifetime\r
\r
ib_status = ib_cm_mra( p_cm_req_rec->h_cm_req, &cm_mra );\r
+ ALTS_PRINT( ALTS_DBG_VERBOSE,\r
+ ("ib_cm_mra returned %s\n", ib_get_err_str( ib_status )) );\r
}\r
else\r
{\r
ib_status = ib_cm_rep( p_cm_req_rec->h_cm_req, p_cm_rep );\r
+ ALTS_PRINT( ALTS_DBG_VERBOSE,\r
+ ("ib_cm_rep returned %s\n", ib_get_err_str( ib_status )) );\r
}\r
}\r
else\r
p_listen->lid = p_ca_obj->dlid;\r
p_listen->pkey = p_ca_obj->p_dest_port_attr->p_pkey_table[0];\r
p_listen->pfn_cm_req_cb = alts_cm_req_cb;\r
- p_listen->pfn_cm_rej_cb = alts_cm_rej_cb;\r
\r
ib_status = ib_cm_listen(h_al, p_listen, alts_cm_err_cb,\r
p_ca_obj, &p_ca_obj->h_cm_listen );\r
p_req_client->pfn_cm_rej_cb = alts_cm_rej_cb;\r
p_req_client->pfn_cm_mra_cb = alts_cm_mra_cb;\r
p_req_client->h_qp = p_ca_obj->h_qp[SRC_QP];\r
+ p_req_client->local_resp_timeout = 12;\r
}\r
\r
ib_status = ib_cm_req(p_req_client);\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("alts_open_al failed status = %s\n", ib_get_err_str(ib_status)) );\r
+ ("alts_open_al failed status = %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("alts_open_ca failed status = %s\n", ib_get_err_str(ib_status)) );\r
+ ("alts_open_ca failed status = %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_INSUFFICIENT_MEMORY)\r
{\r
ALTS_PRINT(ALTS_DBG_ERROR,\r
- ("ib_query_ca failed with status = %s\n", ib_get_err_str(ib_status)) );\r
+ ("ib_query_ca failed with status = %s\n",\r
+ ib_get_err_str(ib_status)) );\r
ib_status = IB_ERROR;\r
break;\r
}\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query_ca failed with status = %s\n", ib_get_err_str(ib_status)) );\r
+ ("ib_query_ca failed with status = %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_query api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_query api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_query api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_query api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR,\r
- ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_query api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR, \r
- ("ib_reg_svc api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_reg_svc api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if(ib_status != IB_SUCCESS)\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR, \r
- ("ib_reg_svc api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_reg_svc api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
\r
if( ib_status != IB_SUCCESS )\r
{\r
ALTS_PRINT( ALTS_DBG_ERROR, \r
- ("ib_dereg_svc api failed with status %s\n",ib_get_err_str(ib_status)) );\r
+ ("ib_dereg_svc api failed with status %s\n",\r
+ ib_get_err_str(ib_status)) );\r
break;\r
}\r
#endif\r
ib_pd_handle_t h_pd;\r
\r
/* Input parameters to control test. */\r
- uint32_t num_nodes;\r
+ int32_t num_nodes;\r
uint32_t num_msgs;\r
boolean_t per_msg_buf;\r
cl_mutex_t mutex;\r
\r
cmtest_state_t state;\r
- uint32_t num_connected;\r
+ atomic32_t num_connected;\r
uint32_t conn_index; /* current connection id */\r
uint32_t total_sent;\r
uint32_t total_recv;\r
g_root.cm_rep.flow_ctrl = TRUE;\r
g_root.cm_rep.rnr_nak_timeout = 7;\r
g_root.cm_rep.rnr_retry_cnt = 6;\r
+ g_root.cm_rep.pfn_cm_rej_cb = __rej_cb;\r
+ g_root.cm_rep.pfn_cm_mra_cb = __mra_cb;\r
g_root.cm_rep.pfn_cm_rtu_cb = __rtu_cb;\r
g_root.cm_rep.pfn_cm_lap_cb = __lap_cb;\r
g_root.cm_rep.pfn_cm_dreq_cb = __dreq_cb;\r
{\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
\r
+ /*\r
+ * Note - because this callback exits the app, any output beyond the\r
+ * the first time may report junk. There have been instances where\r
+ * the callback is invoked more times than there are connection requests\r
+ * but that behavior disapeared if the call to exit below is removed.\r
+ */\r
printf( "Connection was rejected, status: 0x%x\n",\r
p_cm_rej_rec->rej_status );\r
\r
status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &g_root.cm_rtu );\r
if( status != IB_SUCCESS )\r
{\r
- printf( "Call to ib_cm_rtu failed\n" );\r
+ printf( "Call to ib_cm_rtu returned %s\n", ib_get_err_str( status ) );\r
exit( 1 );\r
}\r
\r
- g_root.num_connected++;\r
+ cl_atomic_inc( &g_root.num_connected );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
p_node->state = node_conn;\r
\r
__post_recvs( p_node );\r
- g_root.num_connected++;\r
+ cl_atomic_inc( &g_root.num_connected );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
CL_ASSERT( p_cm_dreq_rec );\r
p_node = (ib_node_t*)p_cm_dreq_rec->qp_context;\r
+ CL_ASSERT( p_node );\r
\r
/*\r
* Record that we've already received a DREQ to avoid trying to\r
if( status == IB_SUCCESS )\r
{\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
}\r
}\r
else\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
CL_ASSERT( p_cm_drep_rec );\r
p_node = (ib_node_t*)p_cm_drep_rec->qp_context;\r
+ CL_ASSERT( p_node );\r
\r
/* We're done with this connection. */\r
+ cl_mutex_acquire( &g_root.mutex );\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
+ cl_mutex_release( &g_root.mutex );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
__create_qps()\r
{\r
uint64_t start_time, total_time;\r
- uint32_t i;\r
+ int32_t i;\r
ib_api_status_t status;\r
\r
printf( "Creating QPs...\n" );\r
__destroy_qps()\r
{\r
uint64_t start_time, total_time;\r
- uint32_t i;\r
+ int32_t i;\r
\r
printf( "Destroying QPs...\n" );\r
start_time = cl_get_time_stamp();\r
return (FALSE);\r
if ( p_node->h_send_cq )\r
{\r
- status = ib_destroy_cq( p_node->h_send_cq, NULL );\r
+ status = ib_destroy_cq( p_node->h_send_cq, ib_sync_destroy );\r
p_node->h_send_cq = NULL;\r
if( status != IB_SUCCESS )\r
{\r
}\r
if (p_node->h_recv_cq)\r
{\r
- status = ib_destroy_cq( p_node->h_recv_cq, NULL );\r
+ status = ib_destroy_cq( p_node->h_recv_cq, ib_sync_destroy );\r
p_node->h_recv_cq = NULL;\r
if( status != IB_SUCCESS )\r
{\r
static boolean_t\r
__create_nodes()\r
{\r
- uint32_t i;\r
+ int32_t i;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
for( i = 0; i < g_root.num_nodes; i++ )\r
static boolean_t\r
__destroy_nodes()\r
{\r
- uint32_t i;\r
+ int32_t i;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
\r
return FALSE;\r
}\r
\r
- if( !__create_nodes() )\r
- {\r
- printf( "Unable to create nodes.\n" );\r
- return FALSE;\r
- }\r
-\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
return TRUE;\r
}\r
\r
cm_listen.qp_type = IB_QPT_RELIABLE_CONN;\r
\r
- cm_listen.pfn_cm_mra_cb = __mra_cb;\r
- cm_listen.pfn_cm_rej_cb = __rej_cb;\r
-\r
status = ib_cm_listen( g_root.h_al, &cm_listen, \r
__cm_listen_err_cb, &g_root, &g_root.h_listen );\r
if( status != IB_SUCCESS )\r
__conn_reqs()\r
{\r
ib_api_status_t status;\r
- uintn_t i;\r
+ int32_t i;\r
uint8_t pdata[IB_REQ_PDATA_SIZE];\r
\r
g_root.cm_req.p_req_pdata = pdata;\r
printf( "ib_cm_rep failed [%s]!\n", ib_get_err_str(status) );\r
return status;\r
}\r
- g_root.p_nodes[i].h_cm_req = NULL;\r
}\r
return IB_SUCCESS;\r
}\r
__disconnect()\r
{\r
ib_api_status_t status;\r
- uint32_t i;\r
+ int32_t i;\r
ib_node_t *p_node;\r
uint64_t total_time, start_time;\r
\r
{\r
case node_conn:\r
g_root.cm_dreq.h_qp = p_node->h_qp;\r
- ib_cm_dreq( &g_root.cm_dreq );\r
- cl_mutex_release( &g_root.mutex );\r
+ status = ib_cm_dreq( &g_root.cm_dreq );\r
+ if( status == IB_SUCCESS )\r
+ p_node->state = node_dreq_sent;\r
break;\r
\r
case node_dreq_rcvd:\r
- cl_mutex_release( &g_root.mutex );\r
status = ib_cm_drep( p_node->h_cm_dreq, &g_root.cm_drep );\r
- p_node->h_cm_dreq = NULL;\r
\r
/* If the DREP was successful, we're done with this connection. */\r
if( status == IB_SUCCESS )\r
{\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
}\r
break;\r
\r
default:\r
/* Node is already disconnected. */\r
- cl_mutex_release( &g_root.mutex );\r
break;\r
}\r
+ cl_mutex_release( &g_root.mutex );\r
}\r
\r
/* Wait for all disconnections to complete. */\r
__send_msgs()\r
{\r
ib_api_status_t status;\r
- uint32_t i, m;\r
+ int32_t i;\r
+ uint32_t m;\r
ib_send_wr_t send_wr;\r
ib_send_wr_t *p_send_failure;\r
ib_local_ds_t ds_array;\r
ib_wc_t *p_free_wc, *p_done_wc;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
- memset (free_wc, 0, sizeof(free_wc));\r
\r
while( status != IB_NOT_FOUND )\r
{\r
\r
/* Continue polling if nothing is done. */\r
if( status == IB_NOT_FOUND )\r
- return TRUE;\r
+ break;\r
\r
/* Abort if an error occurred. */\r
if( status != IB_SUCCESS )\r
}\r
p_done_wc = p_done_wc->p_next;\r
}\r
+ }\r
\r
- if( !g_root.is_polling )\r
+ if( !g_root.is_polling )\r
+ {\r
+ status = ib_rearm_cq(h_cq, FALSE);\r
+ if (status != IB_SUCCESS)\r
{\r
- status = ib_rearm_cq(h_cq, FALSE);\r
- if (status != IB_SUCCESS)\r
- {\r
- printf("Failed to rearm CQ %p\n", h_cq );\r
- return FALSE;\r
- }\r
+ printf("Failed to rearm CQ %p\n", h_cq );\r
+ return FALSE;\r
}\r
}\r
\r
__poll_send_cqs()\r
{\r
ib_node_t *p_node;\r
- uintn_t i;\r
+ int32_t i;\r
\r
for( i = 0; i < g_root.num_nodes; i++ )\r
{\r
__poll_recv_cqs()\r
{\r
ib_node_t *p_node;\r
- uintn_t i;\r
+ int32_t i;\r
\r
for( i = 0; i < g_root.num_nodes; i++ )\r
{\r
*\r
* PURPOSE: Utility defs & routines for the adapter data structure\r
*\r
- * $Id: dapl_adapter_util.h,v 1.42 2004/06/04 20:09:43 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.1\r
*\r
- * $Id: dapl_cno_create.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.2\r
*\r
- * $Id: dapl_cno_free.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.4\r
*\r
- * $Id: dapl_cno_modify_agent.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.5\r
*\r
- * $Id: dapl_cno_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage CNO Info structure\r
*\r
- * $Id: dapl_cno_util.c,v 1.15 2004/06/15 15:26:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ia_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the cno data structure\r
*\r
- * $Id: dapl_cno_util.h,v 1.7 2004/03/24 16:37:48 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.3\r
*\r
- * $Id: dapl_cno_wait.c,v 1.2 2003/12/02 18:19:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* - completions are delivered in order\r
*\r
- * $Id: dapl_cookie.c,v 1.13 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_cookie.h"\r
*\r
* PURPOSE: Utility defs & routines for the cookie data structure\r
*\r
- * $Id: dapl_cookie.h,v 1.7 2003/06/13 12:21:02 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_accept.c,v 1.26 2004/06/04 20:09:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Accepts asynchronous callbacks from the Communications Manager\r
* for EVDs that have been specified as the connection_evd.\r
*\r
- * $Id: dapl_cr_callback.c,v 1.74 2004/06/07 13:06:57 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
DAT_RETURN dat_status;\r
\r
dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK,\r
- "--> dapl_cr_callback! context: %p event: %x cm_handle %p\n",\r
+ "--> dapl_cr_callback! context: %p event: %x cm_handle %d\n",\r
context,\r
ib_cm_event,\r
- (void *) ib_cm_handle);\r
+ ib_cm_handle.cid );\r
\r
/*\r
* Passive side of the connection, context is a SP and\r
*/\r
dapl_os_lock ( &ep_ptr->header.lock );\r
ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED;\r
- ep_ptr->cm_handle = IB_INVALID_HANDLE;\r
+ cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) );\r
+ ep_ptr->cm_handle.cid = 0xFFFFFFFF;\r
dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event);\r
dapl_os_unlock ( &ep_ptr->header.lock );\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_handoff.c,v 1.4 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_query.c,v 1.10 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_reject.c,v 1.14 2003/10/07 11:22:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage CR (Connection Request) structure\r
*\r
- * $Id: dapl_cr_util.c,v 1.7 2003/08/08 19:20:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the CR data structure\r
*\r
- * $Id: dapl_cr_util.h,v 1.6 2003/06/13 12:21:03 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_connect.c,v 1.28 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the kDAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_create.c,v 1.32 2004/06/02 18:12:46 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_disconnect.c,v 1.23 2004/05/10 18:04:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_dup_connect.c,v 1.9 2004/04/23 19:06:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5.4\r
*\r
- * $Id: dapl_ep_free.c,v 1.29 2004/06/03 14:57:23 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_get_status.c,v 1.9 2003/07/30 18:13:37 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.0 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_modify.c,v 1.23 2003/07/11 18:42:17 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_rdma_read.c,v 1.9 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_rdma_write.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_recv.c,v 1.19 2004/01/19 21:24:49 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_send.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_query.c,v 1.9 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5.13\r
*\r
- * $Id: dapl_ep_reset.c,v 1.6 2003/07/08 14:23:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage EP Info structure\r
*\r
- * $Id: dapl_ep_util.c,v 1.49 2004/05/10 18:04:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
ep_ptr->qp_handle = IB_INVALID_HANDLE;\r
ep_ptr->qpn = 0;\r
ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED;\r
- ep_ptr->cm_handle = IB_INVALID_HANDLE;\r
+ cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) );\r
+ ep_ptr->cm_handle.cid = 0xFFFFFFFF;\r
\r
ep_ptr->req_count = 0;\r
ep_ptr->recv_count = 0;\r
*\r
* PURPOSE: Utility defs & routines for the EP data structure\r
*\r
- * $Id: dapl_ep_util.h,v 1.14 2004/01/05 13:39:05 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.4.8\r
*\r
- * $Id: dapl_evd_clear_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Accepts asynchronous callbacks from the Communications Manager\r
* for EVDs that have been specified as the connection_evd.\r
*\r
- * $Id: dapl_evd_connection_callb.c,v 1.45 2004/06/07 13:06:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
\r
dapl_dbg_log (\r
DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK,\r
- "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %p\n",\r
+ "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %d\n",\r
context,\r
ib_cm_event,\r
- (void *) ib_cm_handle);\r
+ ib_cm_handle.cid);\r
DAPL_CNTR(DCNT_EVD_CONN_CALLBACK);\r
\r
/*\r
*\r
* PURPOSE: implements CQ async_callbacks from verbs\r
*\r
- * $Id: dapl_evd_cq_async_error_callb.c,v 1.8 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_create.c,v 1.3 2004/02/09 20:34:33 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_dequeue.c,v 1.10 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_disable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements DTO callbacks from verbs\r
*\r
- * $Id: dapl_evd_dto_callb.c,v 1.18 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_enable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_free.c,v 1.13 2003/12/18 21:00:53 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_modify_cno.c,v 1.12 2003/12/17 11:31:53 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_post_se.c,v 1.7 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements QP callbacks from verbs\r
*\r
- * $Id: dapl_evd_qp_async_error_callb.c,v 1.17 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_resize.c,v 1.7 2004/01/15 20:34:44 addetia Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.4.7\r
*\r
- * $Id: dapl_evd_set_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements Unaffiliated callbacks from verbs\r
*\r
- * $Id: dapl_evd_un_async_error_callb.c,v 1.9 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage EVD Info structure\r
*\r
- * $Id: dapl_evd_util.c,v 1.56 2004/05/10 20:21:07 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_evd_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the EVD data structure\r
*\r
- * $Id: dapl_evd_util.h,v 1.11 2003/11/11 20:38:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API specification\r
*\r
- * $Id: dapl_evd_wait.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_get_consumer_context.c,v 1.6 2003/10/24 20:21:18 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_get_handle_type.c,v 1.5 2003/08/20 13:50:45 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* Provides a generic hash table with chaining.\r
*\r
- * $Id: dapl_hash.c,v 1.13 2004/04/28 15:29:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_hash.h"\r
*\r
* PURPOSE: Utility defs & routines for the hash data structure\r
*\r
- * $Id: dapl_hash.h,v 1.6 2004/05/07 11:43:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_HASH_H_\r
*\r
* PURPOSE: Manage HCA structure\r
*\r
- * $Id: dapl_hca_util.c,v 1.15 2004/04/15 15:36:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the HCA data structure\r
*\r
- * $Id: dapl_hca_util.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_HCA_UTIL_H_\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_close.c,v 1.9 2003/07/30 18:13:38 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_open.c,v 1.35 2004/04/13 17:11:31 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_query.c,v 1.25 2004/05/14 17:28:55 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage IA Info structure\r
*\r
- * $Id: dapl_ia_util.c,v 1.40 2004/04/13 17:11:31 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the IA data structure\r
*\r
- * $Id: dapl_ia_util.h,v 1.9 2003/07/25 19:24:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_IA_UTIL_H_\r
*\r
* PURPOSE: Prototypes for library-interface init and fini functions\r
*\r
- * $Id: dapl_init.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Note: Each of the remove functions takes an assertion failure if\r
* an element cannot be removed from the list.\r
*\r
- * $Id: dapl_llist.c,v 1.11 2004/05/04 14:02:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_create.c,v 1.3 2003/11/10 14:43:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_free.c,v 1.16 2003/11/10 12:51:26 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* PURPOSE: Memory management support routines\r
* Description: Support routines for LMR functions\r
*\r
- * $Id: dapl_lmr_util.c,v 1.8 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the LMR data structure\r
*\r
- * $Id: dapl_lmr_util.h,v 1.6 2003/06/30 16:25:59 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Common Memory Management functions and data structures\r
*\r
- * $Id: dapl_mr_util.c,v 1.9 2003/11/10 12:51:26 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_mr_util.h"\r
*\r
* PURPOSE: Utility defs & routines for memory registration functions\r
*\r
- * $Id: dapl_mr_util.h,v 1.5 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* PURPOSE: Provider function table\r
* Description: DAT Interfaces to this provider\r
*\r
- * $Id: dapl_provider.c,v 1.11 2003/11/18 18:55:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_provider.h"\r
* PURPOSE: Provider function table\r
* Description: DAT Interfaces to this provider\r
*\r
- * $Id: dapl_provider.h,v 1.5 2004/03/17 13:59:42 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_PROVIDER_H_\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_create.c,v 1.21 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_create_any.c,v 1.7 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_free.c,v 1.20 2003/10/07 11:22:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_query.c,v 1.8 2003/06/23 12:28:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_create.c,v 1.7 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_pz_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_free.c,v 1.9 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage PZ structure\r
*\r
- * $Id: dapl_pz_util.c,v 1.7 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_pz_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the PZ data structure\r
*\r
- * $Id: dapl_pz_util.h,v 1.4 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* PURPOSE: Ring buffer management\r
* Description: Support and management functions for ring buffers\r
*\r
- * $Id: dapl_ring_buffer_util.c,v 1.11 2004/03/24 16:30:52 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ring_buffer_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the ring buffer data structure\r
*\r
- * $Id: dapl_ring_buffer_util.h,v 1.6 2003/11/11 20:38:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_bind.c,v 1.18 2004/01/27 18:42:12 addetia Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_create.c,v 1.7 2003/11/04 17:08:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_rmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_free.c,v 1.15 2003/11/04 17:08:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_rmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_query.c,v 1.5 2003/06/16 17:53:34 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the RMR data structure\r
*\r
- * $Id: dapl_rmr_util.h,v 1.5 2003/06/30 16:25:59 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_create.c,v 1.16 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_free.c,v 1.20 2004/01/29 21:14:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_query.c,v 1.6 2003/06/16 17:53:34 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_set_consumer_context.c,v 1.6 2003/08/20 13:50:45 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage PSP Info structure\r
*\r
- * $Id: dapl_sp_util.c,v 1.12 2003/12/18 18:00:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
\r
do \r
{\r
- if ( cr_ptr->ib_cm_handle == ib_cm_handle )\r
+ if ( cr_ptr->ib_cm_handle.cid == ib_cm_handle.cid )\r
{\r
cr_ptr_fnd = cr_ptr;\r
\r
*\r
* PURPOSE: Utility defs & routines for the PSP & RSP data structure\r
*\r
- * $Id: dapl_sp_util.h,v 1.6 2003/12/18 18:00:43 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: IB Connection routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_cm.c 1.38 04/08/06 19:29:06-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
{\r
DAPL_HCA *hca_ptr;\r
\r
- hca_ptr = (DAPL_HCA *) p_reg_svc_rec->svc_context;\r
+ hca_ptr = (DAPL_HCA * __ptr64) p_reg_svc_rec->svc_context;\r
\r
dapl_os_assert (hca_ptr);\r
\r
dapls_ib_ns_create_gid_map (\r
IN DAPL_HCA *hca_ptr)\r
{\r
+ UNUSED_PARAM( hca_ptr );\r
return (DAT_SUCCESS);\r
}\r
\r
dapls_ib_ns_remove_gid_map (\r
IN DAPL_HCA *hca_ptr)\r
{\r
+ UNUSED_PARAM( hca_ptr );\r
return (DAT_SUCCESS);\r
}\r
\r
p_path_rec = ib_get_query_path_rec (p_query_rec->p_result_mad, 0);\r
if (p_path_rec) \r
{\r
- dapl_os_memcpy ((void *) p_query_rec->query_context, \r
+ dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, \r
(void *) p_path_rec,\r
sizeof (ib_path_rec_t));\r
dapl_dbg_log (\r
p_svc_rec = ib_get_query_svc_rec (p_query_rec->p_result_mad, 0);\r
if (p_svc_rec) \r
{\r
- dapl_os_memcpy ((void *) p_query_rec->query_context, \r
+ dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, \r
(void *) p_svc_rec,\r
sizeof (ib_service_record_t));\r
dapl_dbg_log (\r
{\r
ib_user_query_t *p_user_query;\r
\r
- p_user_query = (ib_user_query_t *) p_query_rec->query_context;\r
+ p_user_query = (ib_user_query_t * __ptr64) p_query_rec->query_context;\r
if (p_user_query)\r
{\r
switch (p_user_query->attr_id)\r
dapli_ibal_listen_err_cb (\r
IN ib_listen_err_rec_t *p_listen_err_rec )\r
{\r
+ UNUSED_PARAM( p_listen_err_rec );\r
dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> %s: CM callback listen error\n",\r
"DiLEcb");\r
}\r
dapli_ib_cm_apr_cb (\r
IN ib_cm_apr_rec_t *p_cm_apr_rec )\r
{\r
+ UNUSED_PARAM( p_cm_apr_rec );\r
dapl_dbg_log (DAPL_DBG_TYPE_CM, \r
"--> DiCAcb: CM callback APR (Alternate Path Request)\n");\r
}\r
dapli_ib_cm_lap_cb (\r
IN ib_cm_lap_rec_t *p_cm_lap_rec )\r
{\r
+ UNUSED_PARAM( p_cm_lap_rec );\r
dapl_dbg_log (DAPL_DBG_TYPE_CM, \r
"--> DiCLcb: CM callback LAP (Load Alternate Path)\n");\r
}\r
\r
dapl_os_assert (p_cm_dreq_rec);\r
\r
- ep_ptr = (DAPL_EP *) p_cm_dreq_rec->qp_context;\r
+ ep_ptr = (DAPL_EP * __ptr64) p_cm_dreq_rec->qp_context;\r
\r
if ( ep_ptr == NULL ||\r
ep_ptr->header.magic == DAPL_MAGIC_INVALID )\r
\r
dapls_cr_callback ( p_cm_dreq_rec->h_cm_dreq,\r
IB_CME_DISCONNECTED,\r
- (void *) p_cm_dreq_rec->p_dreq_pdata,\r
+ (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata,\r
(void *) sp_ptr,\r
NULL);\r
}\r
\r
dapl_evd_connection_callback ( p_cm_dreq_rec->h_cm_dreq,\r
IB_CME_DISCONNECTED,\r
- (void *) p_cm_dreq_rec->p_dreq_pdata,\r
+ (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata,\r
p_cm_dreq_rec->qp_context);\r
}\r
}\r
\r
dapl_os_assert (p_cm_drep_rec != NULL);\r
\r
- ep_ptr = (DAPL_EP *) p_cm_drep_rec->qp_context;\r
+ ep_ptr = (DAPL_EP * __ptr64) p_cm_drep_rec->qp_context;\r
\r
if (ep_ptr)\r
{\r
\r
dapls_cr_callback ( ep_ptr->cm_handle,\r
IB_CME_DISCONNECTED,\r
- (void *) p_cm_drep_rec->p_drep_pdata,\r
+ (void * __ptr64) p_cm_drep_rec->p_drep_pdata,\r
(void *) sp_ptr,\r
NULL);\r
}\r
\r
dapl_evd_connection_callback ( ep_ptr->cm_handle,\r
IB_CME_DISCONNECTED,\r
- (void *) p_cm_drep_rec->p_drep_pdata,\r
+ (void * __ptr64) p_cm_drep_rec->p_drep_pdata,\r
p_cm_drep_rec->qp_context);\r
}\r
}\r
\r
dapl_os_memzero (&cm_rtu, sizeof ( ib_cm_rtu_t ));\r
\r
- dapl_os_assert ( ((DAPL_HEADER *) p_cm_rep_rec->qp_context)->magic == \r
+ dapl_os_assert ( ((DAPL_HEADER * __ptr64) p_cm_rep_rec->qp_context)->magic == \r
DAPL_MAGIC_EP );\r
\r
- ep_ptr = (DAPL_EP *) p_cm_rep_rec->qp_context;\r
+ ep_ptr = (DAPL_EP * __ptr64) p_cm_rep_rec->qp_context;\r
dapl_dbg_log (DAPL_DBG_TYPE_CM, \r
"--> DiCRpcb: EP = %p local_max_rdma_read_in %d\n", \r
ep_ptr, p_cm_rep_rec->resp_res);\r
cm_cb_op = IB_CME_LOCAL_FAILURE;\r
}\r
\r
- prd_ptr = (DAPL_PRIVATE *) p_cm_rep_rec->p_rep_pdata;\r
+ prd_ptr = (DAPL_PRIVATE * __ptr64) p_cm_rep_rec->p_rep_pdata;\r
\r
#ifdef DAPL_DBG\r
#if 0\r
p_cm_rep_rec->h_cm_rep,\r
cm_cb_op,\r
(void *) prd_ptr,\r
- (void *) p_cm_rep_rec->qp_context);\r
+ (void * __ptr64) p_cm_rep_rec->qp_context);\r
}\r
\r
\r
\r
dapl_os_assert (p_cm_rej_rec);\r
\r
- ep_ptr = (DAPL_EP *) p_cm_rej_rec->qp_context;\r
+ ep_ptr = (DAPL_EP * __ptr64) p_cm_rej_rec->qp_context;\r
\r
dapl_dbg_log (DAPL_DBG_TYPE_CM, \r
"--> DiCRjcb: EP = %p QP = %p rej reason = 0x%x\n", \r
{\r
dapls_cr_callback ( ep_ptr->cm_handle,\r
cm_event,\r
- (void *) p_cm_rej_rec->p_rej_pdata,\r
+ (void * __ptr64) p_cm_rej_rec->p_rej_pdata,\r
(void *) ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr,\r
NULL);\r
}\r
else\r
{\r
- dapl_evd_connection_callback ( NULL,\r
+ dapl_evd_connection_callback ( ep_ptr->cm_handle,\r
cm_event,\r
- (void *) p_cm_rej_rec->p_rej_pdata,\r
- (void *) p_cm_rej_rec->qp_context);\r
+ (void * __ptr64) p_cm_rej_rec->p_rej_pdata,\r
+ (void * __ptr64) p_cm_rej_rec->qp_context);\r
}\r
\r
}\r
\r
dapl_os_assert (p_cm_req_rec);\r
\r
- sp_ptr = (DAPL_SP *) p_cm_req_rec->context;\r
+ sp_ptr = (DAPL_SP * __ptr64) p_cm_req_rec->context;\r
\r
dapl_os_assert (sp_ptr);\r
\r
\r
dapls_cr_callback ( p_cm_req_rec->h_cm_req,\r
IB_CME_CONNECTION_REQUEST_PENDING,\r
- (void *) p_cm_req_rec->p_req_pdata,\r
- (void *) p_cm_req_rec->context,\r
+ (void * __ptr64) p_cm_req_rec->p_req_pdata,\r
+ (void * __ptr64) p_cm_req_rec->context,\r
(DAT_IA_ADDRESS_PTR)&dest_ia_addr);\r
}\r
\r
dapli_ib_cm_mra_cb (\r
IN ib_cm_mra_rec_t *p_cm_mra_rec )\r
{\r
+ UNUSED_PARAM( p_cm_mra_rec );\r
dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, \r
"--> DiCMcb: CM callback MRA\n");\r
}\r
\r
dapl_os_assert (p_cm_rtu_rec != NULL);\r
\r
- ep_ptr = (DAPL_EP *) p_cm_rtu_rec->qp_context;\r
+ ep_ptr = (DAPL_EP * __ptr64) p_cm_rtu_rec->qp_context;\r
\r
dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, \r
"--> DiCRucb: EP = %p QP = %p\n", ep_ptr, ep_ptr->qp_handle); \r
\r
dapls_cr_callback ( ep_ptr->cm_handle,\r
IB_CME_CONNECTED,\r
- (void *) p_cm_rtu_rec->p_rtu_pdata,\r
+ (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata,\r
(void *) sp_ptr,\r
NULL);\r
\r
dapl_evd_connection_callback ( \r
ep_ptr->cm_handle,\r
IB_CME_CONNECTED,\r
- (void *) p_cm_rtu_rec->p_rtu_pdata,\r
+ (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata,\r
(void *) ep_ptr);\r
}\r
}\r
IN DAPL_PRIVATE *prd_ptr,\r
OUT DAT_SOCK_ADDR6 *remote_ia_address )\r
{\r
+ UNUSED_PARAM( dat_handle );\r
+ UNUSED_PARAM( prd_ptr );\r
+ UNUSED_PARAM( remote_ia_address );\r
return DAT_SUCCESS;\r
}\r
\r
ib_api_status_t ib_status;\r
ib_cm_dreq_t cm_dreq;\r
\r
+ UNUSED_PARAM( disconnect_flags );\r
+\r
ia_ptr = ep_ptr->header.owner_ia;\r
ib_status = IB_SUCCESS;\r
\r
* Register request or mra callback functions\r
*/\r
cm_listen.pfn_cm_req_cb = dapli_ib_cm_req_cb;\r
- cm_listen.pfn_cm_rej_cb = dapli_ib_cm_rej_cb;\r
- cm_listen.pfn_cm_mra_cb = dapli_ib_cm_mra_cb;\r
\r
ib_status = ib_cm_listen ( dapl_ibal_root.h_al,\r
&cm_listen,\r
ib_api_status_t ib_status;\r
DAT_RETURN dat_status = DAT_SUCCESS;\r
\r
+ UNUSED_PARAM( ia_ptr );\r
+\r
if (sp_ptr->cm_srvc_handle)\r
{\r
ib_status = ib_cm_cancel (sp_ptr->cm_srvc_handle, \r
if (ib_status != IB_SUCCESS)\r
{\r
dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsRjC: cm_handle = %p reject failed = %s\n", \r
- ib_cm_handle, ib_get_err_str(ib_status));\r
+ &ib_cm_handle, ib_get_err_str(ib_status));\r
}\r
\r
return ( dapl_ib_status_convert ( ib_status ) );\r
#endif\r
#endif\r
\r
+ cm_rep.pfn_cm_rej_cb = dapli_ib_cm_rej_cb;\r
+ cm_rep.pfn_cm_mra_cb = dapli_ib_cm_mra_cb;\r
cm_rep.pfn_cm_rtu_cb = dapli_ib_cm_rtu_cb;\r
cm_rep.pfn_cm_lap_cb = dapli_ib_cm_lap_cb;\r
cm_rep.pfn_cm_dreq_cb = dapli_ib_cm_dreq_cb;\r
\r
cr_ptr = (DAPL_CR *) cr_handle;\r
\r
- if (cr_ptr->ib_cm_handle == IB_INVALID_HANDLE)\r
+ if (cr_ptr->ib_cm_handle.cid == 0xFFFFFFFF)\r
{\r
dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p invalid cm handle\n", cr_ptr);\r
return DAT_INVALID_PARAMETER;\r
{\r
int size;\r
\r
+ UNUSED_PARAM( prd_ptr );\r
+\r
switch (conn_op)\r
{\r
case DAPL_PDATA_CONN_REQ:\r
* PURPOSE: Utility routines for data transfer operations using the\r
* IBAL APIs\r
*\r
- * $Id: dapl_ibal_dto.h 1.15 04/07/14 18:37:35-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_kmod.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_mrdb.c,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_mrdb.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: IB QP routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_qp.c 1.25 04/08/06 18:43:01-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_util.c 1.28 04/07/14 18:37:36-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_util.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* DAPL RI.\r
*\r
*\r
- * $Id: dapl.h,v 1.77 2004/06/04 13:20:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_H_\r
* Description:\r
*\r
*\r
- * $Id: dapl_debug.h,v 1.5 2003/12/18 17:55:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_DEBUG_H_\r
*\r
* NOTE: As implementations mature this may not be necessary.\r
*\r
- * $Id: dapl_ipoib_names.h,v 1.4 2003/06/13 12:21:13 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _IPOIB_NAMING_H_\r
* these values are returned in the DAT_IA_ATTR parameter of\r
* dat_ia_query()\r
*\r
- * $Id: dapl_vendor.h,v 1.3 2003/06/30 13:12:54 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/**********************************************************************\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_init.c,v 1.50 2004/01/06 14:21:59 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* dapl_name_service.h\r
*\r
- * $Id: dapl_name_service.c,v 1.2 2004/02/25 13:21:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/*\r
*\r
* PURPOSE: Utility defs & routines supporting name services\r
*\r
- * $Id: dapl_name_service.h,v 1.1 2003/10/28 14:43:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* This file also contains the timer handler thread,\r
* embodied in dapls_timer_thread().\r
*\r
- * $Id: dapl_timer_util.c,v 1.2 2004/01/05 20:50:21 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* PURPOSE: DAPL timer management\r
* Description: support for dapl_timer.h\r
*\r
- * $Id: dapl_timer_util.h,v 1.2 2004/01/05 13:39:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
void dapls_timer_init ( void );\r
* of common functions.\r
* \r
*\r
- * $Id: dapl_osd.c,v 1.26 2003/07/31 14:04:18 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_osd.h"\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dapl_osd.h,v 1.38 2003/08/20 14:08:57 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_OSD_H_\r
* of common functions.\r
* \r
*\r
- * $Id: dapl_osd.c,v 1.16 2003/07/16 17:54:27 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/*\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dapl_osd.h,v 1.20 2003/07/31 14:04:18 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_OSD_H_\r
*\r
* PURPOSE: Convert DAT_RETURN values to humman readable string\r
*\r
- * $Id: dat_strerror.c,v 1.3 2003/09/24 14:49:46 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <dat/udat.h>\r
* Description: Interfaces in this file are completely described in\r
*the kDAPL 1.0 API\r
*\r
- * $Id: dat_kdapl.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/ \r
\r
#include "dat_osd.h"\r
* kernel\r
* Description: a linux module implementation\r
*\r
- * $Id: dat_module.c,v 1.4 2003/06/13 11:10:36 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* of common functions.\r
* \r
*\r
- * $Id: dat_osd.c,v 1.5 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <linux/module.h>\r
* a canonical DAT interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.5 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
* interface. Designed to be portable and hide OS specific quirks\r
* of common functions.\r
*\r
- * $Id: dat_osd.c,v 1.8 2003/08/15 20:09:52 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* a canonical DAT interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.14 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
*\r
* PURPOSE: DAT Provider and Consumer registry functions.\r
*\r
- * $Id: udat.c,v 1.13 2003/08/20 14:28:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <dat/udat.h>\r
*\r
* PURPOSE: static registry parser\r
*\r
- * $Id: udat_sr_parser.c,v 1.1 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
\r
*\r
* PURPOSE: static registry (SR) parser inteface declarations\r
*\r
- * $Id: udat_sr_parser.h,v 1.1 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_SR_PARSER_H_\r
* interface. Designed to be portable and hide OS specific quirks\r
* of common functions.\r
*\r
- * $Id: dat_osd.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.12 2003/08/15 20:09:53 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
*\r
* PURPOSE: static registry (SR) platform specific inteface declarations\r
*\r
- * $Id: dat_osd_sr.h,v 1.1 2003/03/28 22:52:47 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_SR_H_\r
}\r
\r
\r
+ib_api_status_t\r
+ipoib_start_adapter(\r
+ IN ipoib_adapter_t* const p_adapter )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ IPOIB_ENTER( IPOIB_DBG_INIT );\r
+\r
+ status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_SYNC );\r
+\r
+ IPOIB_EXIT( IPOIB_DBG_INIT );\r
+ return status;\r
+}\r
+\r
+\r
void\r
ipoib_destroy_adapter(\r
IN ipoib_adapter_t* const p_adapter )\r
return status;\r
}\r
\r
- status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_SYNC );\r
-\r
IPOIB_EXIT( IPOIB_DBG_INIT );\r
return status;\r
}\r
case IB_PNP_PORT_ADD:\r
/* If we were initializing, we might have pended some OIDs. */\r
ipoib_resume_oids( p_adapter );\r
+ NdisMIndicateStatus( p_adapter->h_adapter,\r
+ NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 );\r
+ NdisMIndicateStatusComplete( p_adapter->h_adapter );\r
break;\r
\r
default:\r
* If we had a pending OID request for OID_GEN_LINK_SPEED,\r
* complete it now.\r
*/\r
- if( old_state == IB_PNP_PORT_ADD ||\r
- old_state == IB_PNP_PORT_REMOVE )\r
+ switch( old_state )\r
{\r
+ case IB_PNP_PORT_ADD:\r
+ ipoib_reg_addrs( p_adapter );\r
+ /* Fall through. */\r
+\r
+ case IB_PNP_PORT_REMOVE:\r
ipoib_resume_oids( p_adapter );\r
- }\r
- else\r
- {\r
+ break;\r
+\r
+ default:\r
/* Join all programmed multicast groups. */\r
for( i = 0; i < p_adapter->mcast_array_size; i++ )\r
{\r
OUT ipoib_adapter_t** const pp_adapter );\r
\r
\r
+ib_api_status_t\r
+ipoib_start_adapter(\r
+ IN ipoib_adapter_t* const p_adapter );\r
+\r
+\r
void\r
ipoib_destroy_adapter(\r
IN ipoib_adapter_t* const p_adapter );\r
\r
*p_selected_medium_index = medium_index;\r
\r
-\r
/* Create the adapter adapter */\r
ib_status = ipoib_create_adapter( wrapper_config_context, h_adapter, &p_adapter );\r
if( ib_status != IB_SUCCESS )\r
}\r
#endif\r
\r
+ /* Create the adapter adapter */\r
+ ib_status = ipoib_start_adapter( p_adapter );\r
+ if( ib_status != IB_SUCCESS )\r
+ {\r
+ ipoib_destroy_adapter( p_adapter );\r
+ IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR,\r
+ ("ipoib_start_adapter returned status %d.\n", ib_status ) );\r
+ return NDIS_STATUS_FAILURE;\r
+ }\r
+\r
IPOIB_EXIT( IPOIB_DBG_INIT );\r
return status;\r
}\r
}\r
cl_obj_unlock( &p_endpt->obj );\r
\r
- if( p_query_rec->status != IB_SUCCESS )\r
+ if( p_query_rec->status != IB_SUCCESS || !p_query_rec->result_cnt )\r
{\r
p_port->p_adapter->hung = TRUE;\r
ipoib_endpt_deref( p_endpt );\r
p_eth = &p_desc->p_buf->eth.pkt;\r
#endif /*IPOIB_INLINE_RECV */\r
\r
+ /* Don't report loopback traffic - we requested SW loopback. */\r
+ if( !cl_memcmp( &p_port->p_adapter->mac, &p_eth->hdr.src,\r
+ sizeof(p_port->p_adapter->mac) ) )\r
+ {\r
+ /*\r
+ * "This is not the packet you're looking for" - don't update\r
+ * receive statistics, the packet never happened.\r
+ */\r
+ cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item );\r
+ /* Dereference the port object on behalf of the failed receive. */\r
+ cl_obj_deref( &p_port->obj );\r
+ continue;\r
+ }\r
+\r
switch( p_ipoib->hdr.type )\r
{\r
case ETH_PROT_TYPE_IP:\r
\r
IPOIB_ENTER( IPOIB_DBG_RECV );\r
\r
+ UNUSED_PARAM( p_port );\r
+\r
/* Create the ethernet header. */\r
status = __recv_gen( p_ipoib, p_eth, p_src, p_dst );\r
if( status != IB_SUCCESS )\r
IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR, ("Invalid DHCP op code.\n") );\r
return IB_INVALID_SETTING;\r
}\r
- /* find a better way to check for echo packets ? */\r
- if ( p_port->p_adapter->mac.addr[0] == p_eth->hdr.src.addr[0] &&\r
- p_port->p_adapter->mac.addr[1] == p_eth->hdr.src.addr[1] &&\r
- p_port->p_adapter->mac.addr[2] == p_eth->hdr.src.addr[2] &&\r
- p_port->p_adapter->mac.addr[3] == p_eth->hdr.src.addr[3] &&\r
- p_port->p_adapter->mac.addr[4] == p_eth->hdr.src.addr[4] &&\r
- p_port->p_adapter->mac.addr[5] == p_eth->hdr.src.addr[5] )\r
- {\r
- IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR, ("Local echo dhcp msg.\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
\r
/*\r
* Find the client identifier option, making sure to skip\r
\r
IPOIB_ENTER( IPOIB_DBG_SEND );\r
\r
+ cl_spinlock_acquire( &p_port->send_lock );\r
for( i = 0; i < num_packets; i++ )\r
{\r
desc.p_pkt = p_packet_array[i];\r
continue;\r
}\r
\r
- cl_spinlock_acquire( &p_port->send_lock );\r
cl_perf_start( SendMgrQueue );\r
status = __send_mgr_queue( p_port, p_eth_hdr, &desc.p_endpt );\r
cl_perf_stop( &p_port->p_adapter->perf, SendMgrQueue );\r
IPOIB_LIST_ITEM_FROM_PACKET( p_packet_array[i++] ) );\r
}\r
cl_perf_stop( &p_port->p_adapter->perf, QueuePacket );\r
- cl_spinlock_release( &p_port->send_lock );\r
- return;\r
+ break;\r
}\r
- cl_spinlock_release( &p_port->send_lock );\r
if( status != NDIS_STATUS_SUCCESS )\r
{\r
ASSERT( status == NDIS_STATUS_NO_ROUTE_TO_DESTINATION );\r
continue;\r
}\r
\r
- /* No lock needed to build the work request. */\r
cl_perf_start( BuildSendDesc );\r
status = __build_send_desc( p_port, p_eth_hdr, p_buf, buf_len, &desc );\r
cl_perf_stop( &p_port->p_adapter->perf, BuildSendDesc );\r
\r
cl_atomic_inc( &p_port->send_mgr.depth );\r
}\r
+ cl_spinlock_release( &p_port->send_lock );\r
\r
IPOIB_EXIT( IPOIB_DBG_SEND );\r
}\r
typedef struct _srp_session *p_srp_session_t;\r
\r
\r
+#pragma warning(disable:4324)\r
typedef struct _srp_path_record\r
{\r
cl_list_item_t list_item;\r
ib_path_rec_t path_rec;\r
\r
} srp_path_record_t;\r
+#pragma warning(default:4324)\r
+\r
\r
typedef struct _srp_hba\r
{\r
\r
\r
/* Signals a select event to the switch. */\r
-static void\r
-post_select_event(\r
+void\r
+ibsp_post_select_event(\r
struct ibsp_socket_info *socket_info,\r
int event,\r
int error )\r
{\r
- CL_ENTER( IBSP_DBG_NEV, gdbg_lvl );\r
+ HANDLE h_event;\r
\r
- if( (socket_info->event_mask & event) == 0 )\r
- {\r
- /* This event is not requested. Since we capture only two important\r
- * event, this case should never occur. */\r
- CL_EXIT_ERROR( IBSP_DBG_NEV, gdbg_lvl,\r
- ("Hummm, tried to post an umasked event. (%x, %x)\n",\r
- socket_info->event_mask, event) );\r
- return;\r
- }\r
+ IBSP_ENTER( IBSP_DBG_NEV );\r
\r
- cl_spinlock_acquire( &socket_info->event_mutex );\r
-\r
- socket_info->network_events |= event;\r
+ CL_ASSERT( socket_info );\r
+ CL_ASSERT( event );\r
\r
switch( event )\r
{\r
case FD_CONNECT:\r
+ IBSP_TRACE1( IBSP_DBG_NEV,\r
+ ("socket %p FD_CONNECT\n", socket_info) );\r
socket_info->errno_connect = error;\r
break;\r
\r
case FD_ACCEPT:\r
- socket_info->errno_accept = error;\r
+ IBSP_TRACE1( IBSP_DBG_NEV,\r
+ ("socket %p FD_ACCEPT\n", socket_info) );\r
break;\r
\r
default:\r
break;\r
}\r
\r
- cl_spinlock_release( &socket_info->event_mutex );\r
+ _InterlockedOr( &socket_info->network_events, event );\r
\r
- SetEvent( socket_info->event_select );\r
+ h_event = InterlockedCompareExchangePointer(\r
+ &socket_info->event_select, NULL, NULL );\r
+ /* Check for event notification request and signal as needed. */\r
+ if( (socket_info->event_mask & event) && h_event )\r
+ {\r
+ IBSP_TRACE2( IBSP_DBG_NEV,\r
+ ("Signaling eventHandle %p at time %I64d.\n",\r
+ h_event, cl_get_time_stamp() ) );\r
+ SetEvent( h_event );\r
+ }\r
\r
- CL_EXIT( IBSP_DBG_NEV, gdbg_lvl );\r
+ IBSP_EXIT( IBSP_DBG_NEV );\r
}\r
\r
\r
mra.svc_timeout = 0x1F;\r
ib_cm_mra( p_cm_req_rec->h_cm_req, &mra );\r
\r
- post_select_event( socket_info, FD_ACCEPT, 0 );\r
+ ibsp_post_select_event( socket_info, FD_ACCEPT, 0 );\r
break;\r
\r
case IBSP_DUPLICATING_REMOTE:\r
/* Note: a REJ has been automatically sent. */\r
CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("ib_cm_rtu failed (0x%d)\n", status) );\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND );\r
- post_select_event( socket_info, FD_CONNECT, WSAENOBUFS );\r
+ ibsp_post_select_event( socket_info, FD_CONNECT, WSAENOBUFS );\r
}\r
else\r
{\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECTED );\r
- post_select_event( socket_info, FD_CONNECT, 0 );\r
+ ibsp_post_select_event( socket_info, FD_CONNECT, 0 );\r
}\r
}\r
else if( socket_info->socket_state == IBSP_DUPLICATING_NEW )\r
{\r
case IBSP_CONNECT:\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND );\r
- post_select_event( socket_info, FD_CONNECT, WSAECONNREFUSED );\r
+ ibsp_post_select_event( socket_info, FD_CONNECT, WSAECONNREFUSED );\r
break;\r
\r
case IBSP_ACCEPT:\r
socket_info->info.listen.listen_req_param.identifier));\r
\r
param.pfn_cm_req_cb = cm_req_callback;\r
- param.pfn_cm_mra_cb = cm_mra_callback;\r
- param.pfn_cm_rej_cb = cm_rej_callback;\r
\r
param.qp_type = IB_QPT_RELIABLE_CONN;\r
\r
cm_req.rnr_retry_cnt = QP_ATTRIB_RNR_RETRY;\r
cm_req.retry_cnt = QP_ATTRIB_RETRY_COUNT;\r
cm_req.p_alt_path = NULL;\r
- cm_req.pfn_cm_req_cb = cm_req_callback;\r
cm_req.pfn_cm_mra_cb = cm_mra_callback;\r
cm_req.pfn_cm_rej_cb = cm_rej_callback;\r
\r
cm_rep.flow_ctrl = cm_req_received->flow_ctrl;\r
cm_rep.rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT;\r
cm_rep.rnr_retry_cnt = cm_req_received->rnr_retry_cnt;\r
+ cm_rep.pfn_cm_mra_cb = cm_mra_callback;\r
+ cm_rep.pfn_cm_rej_cb = cm_rej_callback;\r
cm_rep.pfn_cm_rtu_cb = cm_rtu_callback;\r
cm_rep.pfn_cm_lap_cb = cm_lap_callback;\r
cm_rep.pfn_cm_dreq_cb = cm_dreq_callback;\r
// cl_spinlock_release( &socket_info->recv_lock );\r
//}\r
\r
- if( p_recv_wr->ds_array[0].length >= 40 )\r
+ if( wc->status == IB_SUCCESS && p_recv_wr->ds_array[0].length >= 40 )\r
{\r
- debug_dump_buffer( IBSP_DBG_WQ, gdbg_lvl, "RECV",\r
+ debug_dump_buffer( IBSP_DBG_WQ | IBSP_DBG_LEVEL4, "RECV",\r
(void * __ptr64)p_recv_wr->ds_array[0].vaddr, 40 );\r
}\r
\r
status = ib_query_qp( socket_info->qp, &qp_attr );\r
if( status == IB_SUCCESS )\r
{\r
- socket_info->max_inline = qp_attr.sq_max_inline;\r
+ socket_info->max_inline = min( g_max_inline, qp_attr.sq_max_inline );\r
}\r
else\r
{\r
void\r
debug_dump_buffer(\r
uint32_t level,\r
- uint32_t mask,\r
const char *name,\r
void *buf,\r
size_t len )\r
s = str;\r
*s = 0;\r
\r
- CL_PRINT( level, mask, ("HEX for %s:\n", name) );\r
+ CL_PRINT( level, gdbg_lvl, ("HEX for %s:\n", name) );\r
\r
for( i = 0; i < len; i++ )\r
{\r
s += sprintf( s, "%02x ", p[i] );\r
if( i % 16 == 15 )\r
{\r
- CL_PRINT( level, mask, ("HEX:%s: %s\n", name, str) );\r
+ CL_PRINT( level, gdbg_lvl, ("HEX:%s: %s\n", name, str) );\r
s = str;\r
*s = 0;\r
}\r
}\r
- CL_PRINT( level, mask, ("HEX:%s:%s\n", name, str) );\r
+ CL_PRINT( level, gdbg_lvl, ("HEX:%s: %s\n", name, str) );\r
}\r
\r
\r
#define IBSP_ENTER( l ) CL_ENTER( (l | IBSP_DBG_FUNC), gdbg_lvl )\r
#define IBSP_EXIT( l ) CL_EXIT( (l | IBSP_DBG_FUNC), gdbg_lvl )\r
\r
-#if defined UNIT_TEST\r
-\r
-#define BREAKPOINT(x)\r
-\r
-#define CL_ERROR(a, b, c) printf c\r
-#define CL_EXIT_ERROR(a, b, c) printf c\r
-\r
-#undef CL_TRACE\r
-#define CL_TRACE(a,b,c) printf c\r
-#undef CL_ENTER\r
-#define CL_ENTER(a,b) printf("Enter %s\n", __FUNCTION__)\r
-#undef CL_EXIT\r
-#define CL_EXIT(a,b) printf("Exit %s\n", __FUNCTION__)\r
-#undef CL_PRINT\r
-#define CL_PRINT(a,b,c) printf c\r
-\r
-#define fzprint(a)\r
-\r
-#define STAT_INC(name)\r
-#define STAT_DEC(name)\r
-#define free_socket_info(a)\r
-#define ib_deregister_all_mr(a)\r
-#define DebugPrintSockAddr(a,b,c)\r
-\r
-#elif defined _DEBUG_\r
+#if defined _DEBUG_\r
\r
//#define fzprint(a) CL_PRINT(IBSP_DBG_USER, IBSP_DBG_USER, a)\r
#define fzprint(a)\r
void\r
debug_dump_buffer(\r
uint32_t level,\r
- uint32_t mask,\r
const char *name,\r
void *buf,\r
size_t len );\r
};\r
\r
static DWORD no_read = 0;\r
+uint32_t g_max_inline = 0xFFFFFFFF;\r
\r
/*\r
* Function: DllMain\r
IN DWORD dwReason,\r
IN LPVOID lpvReserved )\r
{\r
-#ifdef _DEBUG_\r
- TCHAR dbg_lvl_str[16];\r
+ TCHAR env_var[16];\r
DWORD i;\r
-#endif\r
\r
CL_ENTER( IBSP_DBG_DLL, gdbg_lvl );\r
\r
CL_TRACE( IBSP_DBG_DLL, gdbg_lvl, ("DllMain: DLL_PROCESS_ATTACH\n") );\r
\r
#ifdef _DEBUG_\r
- i = GetEnvironmentVariable( "IBWSD_DBG", dbg_lvl_str, 16 );\r
+ i = GetEnvironmentVariable( "IBWSD_DBG", env_var, 16 );\r
if( i && i <= 16 )\r
{\r
- gdbg_lvl = _tcstoul( dbg_lvl_str, NULL, 16 );\r
+ gdbg_lvl = _tcstoul( env_var, NULL, 16 );\r
IBSP_TRACE( IBSP_DBG_DLL,\r
("Given IBWSD_DBG debug level:0x%X\n",\r
gdbg_lvl) );\r
/* See if the user wants to disable RDMA reads. */\r
no_read = GetEnvironmentVariable( "IBWSD_NO_READ", NULL, 0 );\r
\r
+ i = GetEnvironmentVariable( "IBWSD_INLINE", env_var, 16 );\r
+ if( i && i <= 16 )\r
+ g_max_inline = _tcstoul( env_var, NULL, 10 );\r
+\r
if( init_globals() )\r
return FALSE;\r
break;\r
WSABUF callee_id;\r
struct listen_incoming *incoming;\r
struct ibsp_port *port;\r
- BOOLEAN reject;\r
\r
- CL_ENTER( IBSP_DBG_CONN, gdbg_lvl );\r
+ IBSP_ENTER( IBSP_DBG_CONN );\r
\r
fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__,\r
__LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s));\r
\r
if( *addrlen < sizeof(struct sockaddr_in) )\r
{\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("invalid addrlen (%d, %d)\n", *addrlen, sizeof(struct sockaddr_in)) );\r
+ IBSP_ERROR_EXIT( ("invalid addrlen (%d, %d)\n",\r
+ *addrlen, sizeof(struct sockaddr_in)) );\r
*lpErrno = WSAEFAULT;\r
return INVALID_SOCKET;\r
}\r
if( socket_info->socket_state != IBSP_LISTEN )\r
{\r
cl_spinlock_release( &socket_info->mutex );\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("Socket is not in right socket_state (%s)\n",\r
+ IBSP_ERROR_EXIT( ("Socket is not in right socket_state (%s)\n",\r
IBSP_SOCKET_STATE_STR( socket_info->socket_state )) );\r
*lpErrno = WSAEINVAL;\r
return INVALID_SOCKET;\r
{\r
cl_spinlock_release( &socket_info->mutex );\r
\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("No pending connection found for this socket\n") );\r
+ IBSP_ERROR_EXIT( ("No pending connection found for this socket\n") );\r
*lpErrno = WSAEWOULDBLOCK;\r
return INVALID_SOCKET;\r
}\r
struct listen_incoming, item);\r
port = socket_info->port;\r
\r
- reject = FALSE;\r
-\r
/* Find the destination IP address */\r
if( port == NULL )\r
{\r
/* The socket was bound to INADDR_ANY. We must find the correct port\r
- * for the new socket. */\r
+ * for the new socket. */\r
port = get_port_from_ip_address( incoming->params.dest.sin_addr );\r
if( port == NULL )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("incoming destination IP address not local (%s)\n",\r
inet_ntoa( incoming->params.dest.sin_addr )) );\r
goto reject;\r
/* Cross-check with the path info to make sure we are conectiong correctly */\r
if( port->guid != ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid ) )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("GUIDs of port for destination IP address and primary path do not match (%016I64x, %016I64x)\n",\r
port->guid,\r
ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid )) );\r
- goto reject;\r
- }\r
\r
- if( reject )\r
- {\r
reject:\r
/* The request is invalid. Remove it from the list and reject it. */\r
cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item );\r
callee_id.buf = (char *)&incoming->params.dest;\r
callee_id.len = sizeof(incoming->params.dest);\r
\r
-#ifdef _DEBUG_\r
- {\r
- char buf[100];\r
- char *p = buf;\r
- p += sprintf( p, "got incoming connection from %s/%d-%d to",\r
- inet_ntoa( incoming->params.source.sin_addr ),\r
- cl_ntoh16( incoming->params.source.sin_port ),\r
- incoming->params.source.sin_family );\r
- p += sprintf( p, " %s/%d-%d",\r
- inet_ntoa( incoming->params.dest.sin_addr ),\r
- cl_ntoh16( incoming->params.dest.sin_port ),\r
- incoming->params.dest.sin_family );\r
-\r
- CL_TRACE( IBSP_DBG_CONN, gdbg_lvl, (buf) );\r
- }\r
-#endif\r
+ IBSP_TRACE( IBSP_DBG_CONN,\r
+ ("Got incoming conn from %s/%d-%d to %s/%d-%d\n",\r
+ inet_ntoa( incoming->params.source.sin_addr ),\r
+ cl_ntoh16( incoming->params.source.sin_port ),\r
+ incoming->params.source.sin_family,\r
+ inet_ntoa( incoming->params.dest.sin_addr ),\r
+ cl_ntoh16( incoming->params.dest.sin_port ),\r
+ incoming->params.dest.sin_family) );\r
\r
/* Call the conditional function */\r
- ret = lpfnCondition( &caller_id, NULL,\r
- NULL, NULL, &callee_id, NULL, NULL, dwCallbackData );\r
+ ret = lpfnCondition( &caller_id, NULL, NULL, NULL,\r
+ &callee_id, NULL, NULL, dwCallbackData );\r
\r
switch( ret )\r
{\r
cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item );\r
cl_spinlock_release( &socket_info->mutex );\r
\r
- ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP );\r
+ IBSP_TRACE1( IBSP_DBG_CONN,\r
+ ("Conditional routine returned CF_REJECT\n") );\r
+\r
+ ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_USER_DEFINED );\r
\r
HeapFree( g_ibsp.heap, 0, incoming );\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("Conditional routine rejected connection\n") );\r
*lpErrno = WSAECONNREFUSED;\r
+ IBSP_EXIT( IBSP_DBG_CONN );\r
return INVALID_SOCKET;\r
- break;\r
\r
case CF_DEFER:\r
cl_spinlock_release( &socket_info->mutex );\r
\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("Conditional routine returned defer\n") );\r
+ IBSP_TRACE1( IBSP_DBG_CONN,\r
+ ("Conditional routine returned CF_DEFER\n") );\r
/* TODO: Send MRA */\r
*lpErrno = WSATRY_AGAIN;\r
+ IBSP_EXIT( IBSP_DBG_CONN );\r
return INVALID_SOCKET;\r
- break;\r
\r
case CF_ACCEPT:\r
break;\r
default:\r
/* Should never happen */\r
cl_spinlock_release( &socket_info->mutex );\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("lpfnCondition returned undocumented code (%d)\n", ret) );\r
+ IBSP_ERROR(\r
+ ("Conditional routine returned undocumented code (%d)\n", ret) );\r
CL_ASSERT( 0 );\r
*lpErrno = WSAECONNREFUSED;\r
+ IBSP_EXIT( IBSP_DBG_CONN );\r
return INVALID_SOCKET;\r
}\r
\r
new_socket_info->local_addr = incoming->params.dest;\r
\r
cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item );\r
+ /* Signal the event again if there are more connection requests. */\r
+ if( cl_qlist_count( &socket_info->info.listen.list ) )\r
+ ibsp_post_select_event( socket_info, FD_ACCEPT, 0 );\r
\r
cl_spinlock_release( &socket_info->mutex );\r
\r
- /* Update the state of the socket context */\r
- IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_ACCEPT );\r
-\r
/* Copy the socket context info from parent socket context */\r
new_socket_info->socket_options = socket_info->socket_options;\r
\r
\r
new_socket_info->info.accept.event = CreateEvent( NULL, FALSE, FALSE, NULL );\r
\r
+ cl_spinlock_acquire( &new_socket_info->mutex );\r
+ /* Update the state of the socket context */\r
+ IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_ACCEPT );\r
+\r
ret = ib_accept( new_socket_info, &incoming->cm_req_received, lpErrno );\r
\r
if( ret )\r
{\r
+ IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_CREATE );\r
+ cl_spinlock_release( &new_socket_info->mutex );\r
/* Free the socket descriptor */\r
fzprint(("%s():%d:0x%x:0x%x: socket=0x%p calling lpWPUCloseSocketHandle=0x%p\n",\r
__FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),\r
socket_info, socket_info->switch_socket));\r
\r
if( g_ibsp.up_call_table.lpWPUCloseSocketHandle(\r
- new_socket_info->switch_socket, lpErrno ) == SOCKET_ERROR )\r
+ new_socket_info->switch_socket, &ret ) == SOCKET_ERROR )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
- ("WPUCloseSocketHandle failed: %d\n", *lpErrno) );\r
+ IBSP_ERROR( ("WPUCloseSocketHandle failed: %d\n", ret) );\r
}\r
else\r
{\r
ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP );\r
\r
HeapFree( g_ibsp.heap, 0, incoming );\r
- *lpErrno = WSAEACCES;\r
+ *lpErrno = ret;\r
\r
CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("ib_accept failed (%d)\n", ret) );\r
\r
}\r
else\r
{\r
+ cl_spinlock_release( &new_socket_info->mutex );\r
HeapFree( g_ibsp.heap, 0, incoming );\r
\r
if( WaitForSingleObject( new_socket_info->info.accept.event, INFINITE ) == WAIT_OBJECT_0 )\r
}\r
else\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("ib_accept failed - socket state is %s\n",\r
IBSP_SOCKET_STATE_STR( new_socket_info->socket_state )) );\r
\r
if( g_ibsp.up_call_table.lpWPUCloseSocketHandle(\r
new_socket_info->switch_socket, lpErrno ) == SOCKET_ERROR )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("WPUCloseSocketHandle failed: %d\n", *lpErrno) );\r
}\r
else\r
("returns new SocketID (0x%x)\n", new_socket) );\r
\r
return (SOCKET) new_socket_info;\r
-\r
}\r
else\r
{\r
/* Sanity checks */\r
if( namelen != sizeof(struct sockaddr_in) )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("invalid namelen (%d instead of %d)\n",\r
namelen, sizeof(struct sockaddr_in)) );\r
*lpErrno = WSAEFAULT;\r
\r
if( addr->sin_family != AF_INET )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("bad family for socket\n") );\r
+ IBSP_ERROR( ("bad family for socket\n") );\r
*lpErrno = WSAEFAULT;\r
goto error;\r
}\r
port = get_port_from_ip_address( addr->sin_addr );\r
if( port == NULL )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("This IP address does not belong to that host (%08x)\n",\r
addr->sin_addr.S_un.S_addr) );\r
*lpErrno = WSAEADDRNOTAVAIL;\r
\r
/* We are going to take this mutex for some time, \r
* but at this stage, it shouldn't impact anything. */\r
- cl_spinlock_acquire( &socket_info->event_mutex );\r
+ cl_spinlock_acquire( &socket_info->mutex );\r
\r
/* Verify the state of the socket */\r
if( socket_info->socket_state != IBSP_CREATE )\r
{\r
- cl_spinlock_release( &socket_info->event_mutex );\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ cl_spinlock_release( &socket_info->mutex );\r
+ IBSP_ERROR(\r
("Invalid socket state (%s)\n",\r
IBSP_SOCKET_STATE_STR( socket_info->socket_state )) );\r
*lpErrno = WSAEINVAL;\r
if( ret )\r
{\r
socket_info->port = NULL;\r
- cl_spinlock_release( &socket_info->event_mutex );\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("ib_create socket failed with %d\n", ret) );\r
+ cl_spinlock_release( &socket_info->mutex );\r
+ IBSP_ERROR( ("ib_create socket failed with %d\n", ret) );\r
*lpErrno = WSAENOBUFS;\r
goto error;\r
}\r
\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND );\r
\r
- cl_spinlock_release( &socket_info->event_mutex );\r
+ cl_spinlock_release( &socket_info->mutex );\r
\r
CL_EXIT( IBSP_DBG_CONN, gdbg_lvl );\r
return 0;\r
cl_atomic_inc( &g_ibsp.CloseSocket_count );\r
#endif\r
\r
- cl_spinlock_acquire( &socket_info->event_mutex );\r
-\r
+ cl_spinlock_acquire( &socket_info->mutex );\r
\r
old_state = socket_info->socket_state;\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CLOSING );\r
\r
- cl_spinlock_release( &socket_info->event_mutex );\r
+ cl_spinlock_release( &socket_info->mutex );\r
\r
shutdown_and_destroy_socket_info( socket_info, old_state );\r
\r
- cl_spinlock_acquire( &socket_info->event_mutex );\r
+ cl_spinlock_acquire( &socket_info->mutex );\r
IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CLOSED );\r
- cl_spinlock_release( &socket_info->event_mutex );\r
+ cl_spinlock_release( &socket_info->mutex );\r
\r
/* Take off socket_info_list and put on closed_socket_info_list */\r
cl_spinlock_acquire( &g_ibsp.socket_info_mutex );\r
/* Notify ib_cleanup_thread() to free this */\r
SetEvent( g_ibsp.ib_cleanup_event );\r
\r
-\r
CL_EXIT( IBSP_DBG_CONN, gdbg_lvl );\r
\r
*lpErrno = 0;\r
LPWSANETWORKEVENTS lpNetworkEvents,\r
LPINT lpErrno )\r
{\r
- struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s;\r
+ struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s;\r
\r
- CL_ENTER( IBSP_DBG_NEV, gdbg_lvl );\r
+ IBSP_ENTER( IBSP_DBG_NEV );\r
\r
- fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__,\r
- __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s));\r
-\r
- cl_spinlock_acquire( &socket_info->event_mutex );\r
+ ResetEvent( hEventObject );\r
\r
- lpNetworkEvents->lNetworkEvents = 0;\r
+ lpNetworkEvents->lNetworkEvents =\r
+ InterlockedExchange( &socket_info->network_events, 0 );\r
\r
- if( socket_info->network_events & FD_ACCEPT )\r
+ if( lpNetworkEvents->lNetworkEvents & FD_ACCEPT )\r
{\r
- CL_TRACE( IBSP_DBG_NEV, gdbg_lvl, ("FD_ACCEPT\n") );\r
- lpNetworkEvents->lNetworkEvents |= FD_ACCEPT;\r
- lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT] = socket_info->errno_accept;\r
+ IBSP_TRACE1( IBSP_DBG_NEV,\r
+ ("socket %p notify FD_ACCEPT at time %I64d\n",\r
+ socket_info, cl_get_time_stamp()) );\r
+ lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT] = 0;\r
}\r
\r
- if( socket_info->network_events & FD_CONNECT )\r
+ if( lpNetworkEvents->lNetworkEvents & FD_CONNECT )\r
{\r
- CL_TRACE( IBSP_DBG_NEV, gdbg_lvl, ("FD_CONNECT\n") );\r
- lpNetworkEvents->lNetworkEvents |= FD_CONNECT;\r
+ IBSP_TRACE1( IBSP_DBG_NEV,\r
+ ("socket %p notify FD_CONNECT %d at time %I64d\n",\r
+ socket_info, socket_info->errno_connect, cl_get_time_stamp()) );\r
lpNetworkEvents->iErrorCode[FD_CONNECT_BIT] = socket_info->errno_connect;\r
}\r
\r
- socket_info->network_events = 0;\r
-\r
- ResetEvent( hEventObject );\r
-\r
- cl_spinlock_release( &socket_info->event_mutex );\r
-\r
- CL_TRACE_EXIT( IBSP_DBG_NEV, gdbg_lvl,\r
- ("returning %x, accept=%d, connect=%d\n",\r
- lpNetworkEvents->lNetworkEvents,\r
- lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT],\r
- lpNetworkEvents->iErrorCode[FD_CONNECT_BIT]) );\r
-\r
*lpErrno = 0;\r
+ IBSP_EXIT( IBSP_DBG_NEV );\r
return 0;\r
}\r
\r
long lNetworkEvents,\r
LPINT lpErrno )\r
{\r
- struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s;\r
+ struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s;\r
+ long events;\r
\r
- CL_ENTER( IBSP_DBG_NEV, gdbg_lvl );\r
+ IBSP_ENTER( IBSP_DBG_NEV );\r
\r
- fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__,\r
- __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s));\r
+ IBSP_TRACE4( IBSP_DBG_NEV,\r
+ ("Socket %p requesting notifiction of %d on event %p.\n",\r
+ s, lNetworkEvents, hEventObject) );\r
\r
if( (lNetworkEvents & ~(FD_ACCEPT | FD_CONNECT)) != 0 )\r
{\r
- CL_TRACE_EXIT(IBSP_DBG_NEV, gdbg_lvl,\r
+ IBSP_TRACE_EXIT(IBSP_DBG_NEV,\r
("Unknown lNetworkEvents flag given (%x)\n", lNetworkEvents) );\r
*lpErrno = WSAEINVAL;\r
return SOCKET_ERROR;\r
}\r
\r
- CL_ASSERT( (hEventObject == NULL && socket_info->event_select != NULL) ||\r
- (hEventObject != NULL && socket_info->event_select == NULL) );\r
CL_ASSERT( lpErrno );\r
\r
- if( hEventObject )\r
- socket_info->event_select = hEventObject;\r
socket_info->event_mask = lNetworkEvents;\r
+ InterlockedExchangePointer( &socket_info->event_select, hEventObject );\r
\r
- CL_EXIT( IBSP_DBG_NEV, gdbg_lvl );\r
+ events = InterlockedCompareExchange( &socket_info->network_events, 0, 0 );\r
+ /* Check for existing events and signal as appropriate. */\r
+ if( (socket_info->event_mask & events) && hEventObject )\r
+ {\r
+ IBSP_TRACE2( IBSP_DBG_NEV,\r
+ ("Signaling eventHandle %p .\n", socket_info->event_select) );\r
+ SetEvent( hEventObject );\r
+ }\r
+\r
+ IBSP_EXIT( IBSP_DBG_NEV );\r
return 0;\r
}\r
\r
GUID SANRDMARead = WSAID_RDMAREAD;\r
GUID SANMemoryRegistrationCacheCallback = WSAID_MEMORYREGISTRATIONCACHECALLBACK;\r
\r
- CL_ENTER( IBSP_DBG_OPT, gdbg_lvl );\r
+ IBSP_ENTER( IBSP_DBG_OPT );\r
\r
UNUSED_PARAM( cbInBuffer );\r
UNUSED_PARAM( lpOverlapped );\r
UNUSED_PARAM( lpCompletionRoutine );\r
UNUSED_PARAM( lpThreadId );\r
\r
- fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p\n", __FUNCTION__,\r
- __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped));\r
-\r
if( dwIoControlCode == SIO_GET_EXTENSION_FUNCTION_POINTER )\r
{\r
/* This a special case. The socket handle passed is not valid. */\r
- CL_TRACE( IBSP_DBG_OPT, gdbg_lvl, ("Get extension function pointer\n") );\r
+ IBSP_TRACE1( IBSP_DBG_OPT, ("Get extension function pointer\n") );\r
\r
if( memcmp( lpvInBuffer, &SANRegisterMemory, sizeof(GUID) ) == 0 )\r
{\r
{\r
if( no_read )\r
{\r
+ IBSP_TRACE( IBSP_DBG_WARN | IBSP_DBG_OPT,\r
+ ("RDMA_READ disabled.\n") );\r
*lpErrno = WSAEOPNOTSUPP;\r
return SOCKET_ERROR;\r
}\r
}\r
else\r
{\r
- CL_EXIT_ERROR( IBSP_DBG_OPT, gdbg_lvl, ("invalid extension GUID\n") );\r
+ IBSP_ERROR_EXIT( ("invalid extension GUID\n") );\r
*lpErrno = WSAEINVAL;\r
return SOCKET_ERROR;\r
}\r
+ IBSP_EXIT( IBSP_DBG_OPT );\r
return 0;\r
}\r
\r
{\r
/* Seen in real life with overlap/client test. \r
* The switch closes a socket then calls this. Why? */\r
- IBSP_ERROR_EXIT( ("invalid socket handle %x\n", s) );\r
+ IBSP_TRACE_EXIT( IBSP_DBG_WARN | IBSP_DBG_IO,\r
+ ("invalid socket handle %x\n", s) );\r
*lpErrno = WSAENOTSOCK;\r
return SOCKET_ERROR;\r
}\r
CL_ASSERT( lpCompletionRoutine == NULL );\r
CL_ASSERT( lpOverlapped != NULL );\r
\r
+ cl_spinlock_acquire( &socket_info->mutex );\r
/* Check the state of the socket */\r
- if( socket_info->socket_state != IBSP_CONNECTED )\r
+ switch( socket_info->socket_state )\r
{\r
+ case IBSP_CONNECTED:\r
+ case IBSP_DISCONNECTED:\r
+ break;\r
+\r
+ default:\r
IBSP_ERROR_EXIT( ("Socket is not in connected socket_state \n") );\r
*lpErrno = WSAENOTCONN;\r
return SOCKET_ERROR;\r
}\r
+ cl_spinlock_release( &socket_info->mutex );\r
\r
if( socket_info->qp_error )\r
{\r
#ifdef _DEBUG_\r
if( lpBuffers[0].len >= 40 )\r
{\r
- debug_dump_buffer( IBSP_DBG_WQ, gdbg_lvl, "SEND", lpBuffers[0].buf, 40 );\r
+ debug_dump_buffer( IBSP_DBG_WQ | IBSP_DBG_LEVEL4, "SEND",\r
+ lpBuffers[0].buf, 40 );\r
}\r
#endif\r
\r
{\r
struct ibsp_socket_info *socket_info = NULL;\r
\r
- CL_ENTER( IBSP_DBG_CONN, gdbg_lvl );\r
+ IBSP_ENTER( IBSP_DBG_SI );\r
\r
UNUSED_PARAM( g );\r
\r
if( af != AF_INET )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("bad family %d instead of %d\n", af, AF_INET) );\r
*lpErrno = WSAEAFNOSUPPORT;\r
goto error;\r
\r
if( type != SOCK_STREAM )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("bad type %d instead of %d\n", type, SOCK_STREAM) );\r
*lpErrno = WSAEPROTOTYPE;\r
goto error;\r
\r
if( protocol != IPPROTO_TCP )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("bad protocol %d instead of %d\n", protocol, IPPROTO_TCP) );\r
*lpErrno = WSAEPROTONOSUPPORT;\r
goto error;\r
\r
if( (dwFlags != WSA_FLAG_OVERLAPPED) )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("dwFlags is not WSA_FLAG_OVERLAPPED (%x)\n", dwFlags) );\r
*lpErrno = WSAEINVAL;\r
goto error;\r
socket_info = create_socket_info();\r
if( socket_info == NULL )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("create_socket_info return NULL\n") );\r
+ IBSP_ERROR( ("create_socket_info return NULL\n") );\r
*lpErrno = WSAENOBUFS;\r
goto error;\r
}\r
ret = setup_duplicate_socket( socket_info, lpProtocolInfo->dwProviderReserved );\r
if( ret )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("setup_duplicate_socket failed with %d\n",ret) );\r
*lpErrno = ret;\r
goto error;\r
\r
if( socket_info->switch_socket != INVALID_SOCKET )\r
{\r
- fzprint(("%s():%d:0x%x:0x%x: socket_info=0x%p switch_socket=0x%p \n",\r
- __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),\r
- socket_info, socket_info->switch_socket));\r
+ IBSP_TRACE1( IBSP_DBG_SI, ("socket_info=0x%p switch_socket=0x%p \n",\r
+ socket_info, socket_info->switch_socket) );\r
\r
STAT_INC( wpusocket_num );\r
}\r
\r
if( socket_info->switch_socket == INVALID_SOCKET )\r
{\r
- CL_ERROR( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_ERROR(\r
("WPUCreateSocketHandle() failed: %d\n", *lpErrno) );\r
/* lpErrno has just been set */\r
goto error;\r
fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__,\r
__LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info));\r
\r
- CL_TRACE_EXIT( IBSP_DBG_CONN, gdbg_lvl,\r
+ IBSP_TRACE_EXIT( IBSP_DBG_SI,\r
("returning socket handle %p\n", socket_info) );\r
\r
return (SOCKET) socket_info;\r
\r
CL_ASSERT( *lpErrno != 0 );\r
\r
- CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("Returning error %d\n", *lpErrno) );\r
+ IBSP_ERROR_EXIT( ("Returning error %d\n", *lpErrno) );\r
\r
return INVALID_SOCKET;\r
}\r
#include <ws2spi.h>\r
#include <ws2san.h>\r
\r
-#include "iba/ib_al.h"\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_timer.h>\r
\r
#include "ibspdefines.h"\r
#include "ibspdebug.h"\r
\r
extern struct ibspdll_globals g_ibsp;\r
\r
+extern uint32_t g_max_inline;\r
+\r
#endif /* IBSPDLL_H */\r
ib_deregister_all_mr(\r
IN struct mr_list *mem_list );\r
\r
+void\r
+ibsp_post_select_event(\r
+ struct ibsp_socket_info *socket_info,\r
+ int event,\r
+ int error );\r
+\r
/* ibspdll.c */\r
extern int\r
init_globals( void );\r
} info;\r
\r
/* Variables associated with IBSPSelectEvent */\r
- cl_spinlock_t event_mutex;\r
WSAEVENT event_select; /* Handle to Event Object */\r
long event_mask; /* Events we care about */\r
long network_events; /* Events that happenned */\r
int errno_connect; /* errno code (if any) returned by connect */\r
- int errno_accept; /* errno code (if any) returned by accept */\r
\r
struct ibsp_socket_options socket_options; /* Socket Options */\r
\r
\r
cl_qlist_init( &socket_info->buf_mem_list.list );\r
cl_spinlock_init( &socket_info->buf_mem_list.mutex );\r
- cl_spinlock_init( &socket_info->event_mutex );\r
\r
cl_qlist_init( &socket_info->info.listen.list );\r
\r
IBSP_ENTER( IBSP_DBG_SI );\r
\r
cl_spinlock_destroy( &socket_info->buf_mem_list.mutex );\r
- cl_spinlock_destroy( &socket_info->event_mutex );\r
cl_spinlock_destroy( &socket_info->mutex );\r
\r
cl_spinlock_destroy( &socket_info->send_lock );\r