\r
#include "al.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_common.h"\r
#include "al_debug.h"\r
#include "al_mad_pool.h"\r
}\r
\r
\r
-\r
-static void\r
-__free_conns(\r
- IN const ib_al_handle_t h_al )\r
-{\r
- cl_list_item_t *p_list_item;\r
- ib_cm_handle_t h_conn;\r
-\r
- /*\r
- * Report any outstanding connections left lying around. We should\r
- * never enter the loop below if the code is written correctly.\r
- */\r
- for( p_list_item = cl_qlist_head( &h_al->conn_list );\r
- p_list_item != cl_qlist_end( &h_al->conn_list );\r
- p_list_item = cl_qlist_head( &h_al->conn_list ) )\r
- {\r
- CL_ASSERT( !p_list_item );\r
-\r
- h_conn = PARENT_STRUCT( p_list_item, al_conn_t, al_item );\r
-\r
- /* Release the connection object, so the CM can clean-up properly. */\r
- cm_cleanup_conn( h_conn );\r
- }\r
-}\r
-\r
-\r
-\r
void\r
free_al(\r
IN al_obj_t *p_obj )\r
__free_mads( h_al );\r
\r
/* Cleanup any left-over connections. */\r
- __free_conns( h_al );\r
+ al_cep_cleanup_al( h_al );\r
\r
#ifdef CL_KERNEL\r
cl_vector_destroy( &h_al->hdl_vector );\r
}\r
\r
\r
-\r
-\r
ib_api_status_t\r
ib_query_ca_by_guid(\r
IN const ib_al_handle_t h_al,\r
\r
\r
\r
-void\r
-al_insert_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- ref_al_obj( &h_al->obj );\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- h_conn->h_al = h_al;\r
- cl_qlist_insert_tail( &h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = al_hdl_insert( h_al, h_conn, AL_OBJ_TYPE_H_CONN );\r
-#endif\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
-}\r
-\r
-\r
-\r
-void\r
-al_remove_conn(\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- cl_spinlock_acquire( &h_conn->h_al->obj.lock );\r
- cl_qlist_remove_item( &h_conn->h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- al_hdl_free( h_conn->h_al, h_conn->hdl );\r
-#endif\r
- cl_spinlock_release( &h_conn->h_al->obj.lock );\r
-\r
- deref_al_obj( &h_conn->h_al->obj );\r
-\r
- h_conn->h_al = NULL;\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = AL_INVALID_HANDLE;\r
-#endif\r
-}\r
-\r
-\r
-\r
void\r
al_insert_mad(\r
IN const ib_al_handle_t h_al,\r
\r
cl_qlist_t key_list;\r
cl_qlist_t query_list;\r
- cl_qlist_t conn_list;\r
+ cl_qlist_t cep_list;\r
\r
#ifdef CL_KERNEL\r
/* Handle manager is only needed in the kernel. */\r
#include "al_mgr.h"\r
#include "al_pnp.h"\r
#include "al_qp.h"\r
-\r
-#if defined(CL_KERNEL)\r
-#include "al_cm.h"\r
-#endif\r
#include "ib_common.h"\r
\r
\r
-void\r
-ci_ca_comp_cb(\r
- IN void *cq_context );\r
-\r
void\r
ci_ca_process_event_cb(\r
IN cl_async_proc_item_t* p_async_item );\r
\r
\r
\r
-void\r
-ci_ca_comp_cb(\r
- IN void *cq_context )\r
-{\r
- ib_cq_handle_t h_cq = (ib_cq_handle_t)cq_context;\r
-\r
- if( h_cq->h_wait_obj )\r
- cl_waitobj_signal( h_cq->h_wait_obj );\r
- else\r
- h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context );\r
-}\r
-\r
-\r
-\r
-\r
ib_api_status_t\r
get_port_info(\r
IN al_ci_ca_t *p_ci_ca )\r
case IB_AE_QP_COMM:\r
case IB_AE_QP_APM:\r
case IB_AE_QP_APM_ERROR:\r
-#if defined(CL_KERNEL)\r
- cm_async_event_cb( &p_event_item->event_rec );\r
-#endif\r
- /* Fall through next case. */\r
-\r
case IB_AE_QP_FATAL:\r
case IB_AE_RQ_ERROR:\r
case IB_AE_SQ_ERROR:\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id:$\r
+ */\r
+\r
+\r
+#pragma once\r
+\r
+#ifndef _AL_CM_CEP_H_\r
+#define _AL_CM_CEP_H_\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al_common.h"\r
+\r
+\r
+#define CEP_EVENT_TIMEOUT 0x80000000\r
+#define CEP_EVENT_RECV 0x40000000\r
+#define CEP_EVENT_REQ 0x00000001\r
+#define CEP_EVENT_REP 0x00000002\r
+#define CEP_EVENT_RTU 0x00000004\r
+#define CEP_EVENT_DREQ 0x00000008\r
+#define CEP_EVENT_DREP 0x00000010\r
+#define CEP_EVENT_MRA 0x00000020\r
+#define CEP_EVENT_REJ 0x00000040\r
+#define CEP_EVENT_LAP 0x00000080\r
+#define CEP_EVENT_APR 0x00000100\r
+#define CEP_EVENT_SIDR 0x00800000\r
+\r
+\r
+#define AL_INVALID_CID 0xFFFFFFFF\r
+\r
+\r
+typedef ib_api_status_t\r
+(*al_pfn_cep_cb_t)(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep );\r
+/* PARAMETERS\r
+* h_al\r
+* [in] Handle to the AL instance to pass into the al_cep_poll call.\r
+*\r
+* p_cep\r
+* [in] Pointer to an ib_cep_t structure containing the CID and context\r
+* for the CEP on which the event occured. The CID should be passed\r
+* into the al_cep_poll call.\r
+*\r
+* RETURN VALUES:\r
+* IB_SUCCESS\r
+* Recipient successfully processed the event.\r
+*\r
+* IB_ERROR\r
+* The event could not be processed.\r
+*\r
+* NOTES\r
+* The callback is invoked at DISPATCH_LEVEL.\r
+*\r
+* Recipients of the callback are expected to call al_cep_poll to retrieve\r
+* event specific details until al_cep_poll returns IB_NOT_DONE. This may\r
+* be done in a different thread context.\r
+*********/\r
+\r
+\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj );\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al );\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid );\r
+/*\r
+* NOTES\r
+* This function may be invoked at DISPATCH_LEVEL\r
+*********/\r
+\r
+\r
+/* Destruction is asynchronous. */\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb );\r
+/*\r
+* NOTES\r
+* Destruction is synchronous.\r
+* Clients must not invoke this function from a CEP callback, but should\r
+* instead return IB_CANCELLED or other appropriate value.\r
+*\r
+* The reason parameter is passed as input to KeWaitForSingleObject.\r
+* The user-mode proxy sets this to UserRequest. Kernel clients should set\r
+* this to Executive.\r
+*********/\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata OPTIONAL,\r
+ IN const uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_migrate(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_established(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad );\r
+\r
+\r
+#ifdef CL_KERNEL\r
+ib_api_status_t\r
+al_cep_xchg_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_new_irp,\r
+ OUT IRP** const pp_old_irp );\r
+\r
+ib_api_status_t\r
+al_cep_cancel_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_irp );\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/****s* Access Layer/al_cep_sreq_t\r
+* NAME\r
+* al_cep_sreq_t\r
+*\r
+* DESCRIPTION\r
+* Connection request information used to establish a new connection.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_sreq\r
+{\r
+ ib_net64_t svc_id;\r
+\r
+ ib_path_rec_t* __ptr64 p_path;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ uint8_t pdata_len;\r
+\r
+ uint8_t max_cm_retries;\r
+ ib_net16_t pkey;\r
+ uint32_t timeout_ms;\r
+\r
+} al_cep_sreq_t;\r
+/*\r
+* FIELDS\r
+* svc_id\r
+* The ID of the remote service to which the SIDR request is\r
+* being made.\r
+*\r
+* p_path\r
+* Path information over which to send the request.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR request.\r
+*\r
+* pdata_len\r
+* Defines the size of the user-defined private data.\r
+*\r
+* max_cm_retries\r
+* The maximum number of times that either CM should\r
+* resend a SIDR message.\r
+*\r
+* timeout_ms\r
+* Timeout value in milli-seconds for the SIDR REQ to expire. The CM will\r
+* add twice packet lifetime to this value to determine the actual timeout\r
+* value used.\r
+*\r
+* pkey\r
+* pkey to be used as part of the request.\r
+*\r
+* SEE ALSO\r
+* al_cep_sreq\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_sreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_sreq_t* const p_sreq );\r
+\r
+\r
+/****s* Access Layer/al_cep_srep_t\r
+* NAME\r
+* al_cep_srep_t\r
+*\r
+* DESCRIPTION\r
+* SIDR reply information.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_srep\r
+{\r
+ net32_t qp_num;\r
+ net32_t qkey;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ const void* __ptr64 p_info;\r
+\r
+ uint8_t pdata_len;\r
+ uint8_t info_len;\r
+\r
+ ib_sidr_status_t status;\r
+\r
+} al_cep_srep_t;\r
+/*\r
+* FIELDS\r
+* qp_num\r
+* The number of the queue pair on which the requested service\r
+* is supported.\r
+*\r
+* qp_key\r
+* The QKEY of the returned queue pair.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR reply.\r
+*\r
+* p_info\r
+* Optional "additonal information" sent as part of the SIDR reply.\r
+*\r
+* pdata_len\r
+* Size of the user-defined private data.\r
+*\r
+* info_len\r
+* Size of the "additional information".\r
+*\r
+* status\r
+* sidr status value returned back to a previously received REQ.\r
+*\r
+* SEE ALSO\r
+* al_cep_srep\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_srep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_srep_t* const p_sreq );\r
+\r
+\r
+\r
+\r
+/*\r
+ * Return the local ACK timeout value based on the given packet lifetime\r
+ * and target ACK delay. Both input values are assumed to be in the form\r
+ * 4.096 x 2 ^ input.\r
+ */\r
+#define MAX_LOCAL_ACK_TIMEOUT 0x1F /* limited to 5 bits */\r
+\r
+inline uint8_t\r
+calc_lcl_ack_timeout(\r
+ IN const uint8_t round_trip_time,\r
+ IN const uint8_t target_ack_delay )\r
+{\r
+ uint64_t timeout;\r
+ uint8_t local_ack_timeout;\r
+\r
+ if( !target_ack_delay )\r
+ {\r
+ if( round_trip_time > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ else\r
+ return round_trip_time;\r
+ }\r
+\r
+ /*\r
+ * Since both input and the output values are in the same form, we\r
+ * can ignore the 4.096 portion by dividing it out.\r
+ */\r
+\r
+ /* The input parameter is the round trip time. */\r
+ timeout = (uint64_t)1 << round_trip_time;\r
+\r
+ /* Add in the target ack delay. */\r
+ if( target_ack_delay )\r
+ timeout += (uint64_t)1 << target_ack_delay;\r
+\r
+ /* Calculate the local ACK timeout. */\r
+ local_ack_timeout = 1;\r
+ while( (1ui64 << local_ack_timeout) <= timeout )\r
+ {\r
+ local_ack_timeout++;\r
+\r
+ /* Only 5-bits are valid. */\r
+ if( local_ack_timeout > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ }\r
+\r
+ return local_ack_timeout;\r
+}\r
+\r
+#endif /* _AL_CM_CEP_H_ */\r
uint8_t pdata[IB_REQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_req_t;\r
+C_ASSERT( sizeof(mad_cm_req_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_req->offset51 = (retries << 4);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_req_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
IN const uint8_t data_len,\r
\r
if( p_data )\r
{\r
+ if( data_len > IB_REQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_req->pdata, p_data, data_len );\r
- cl_memclr( p_req->pdata + data_len,\r
- IB_REQ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_req->pdata + data_len, IB_REQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_req->pdata, IB_REQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
IN OUT req_path_info_t* const p_path )\r
{\r
if( subn_lcl )\r
- p_path->offset42 = (p_path->offset42 & 0xF0);\r
- else\r
p_path->offset42 = ((p_path->offset42 & 0xF0) | 0x08);\r
+ else\r
+ p_path->offset42 = (p_path->offset42 & 0xF0);\r
}\r
\r
static inline uint8_t\r
uint8_t pdata[IB_MRA_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_mra_t;\r
+C_ASSERT( sizeof(mad_cm_mra_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_mra_t* const p_mra )\r
{\r
- if( p_data && data_len > IB_MRA_PDATA_SIZE )\r
- return IB_INVALID_SETTING;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_MRA_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_mra->pdata, p_data, data_len );\r
cl_memclr( p_mra->pdata + data_len, IB_MRA_PDATA_SIZE - data_len );\r
}\r
uint8_t pdata[IB_REJ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rej_t;\r
+C_ASSERT( sizeof(mad_cm_rej_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_rej_t* const p_rej )\r
{\r
- if( p_data && data_len > IB_REJ_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_REJ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_rej->pdata, p_data, data_len );\r
- cl_memclr( p_rej->pdata + data_len,\r
- IB_REJ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_rej->pdata + data_len, IB_REJ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
uint8_t pdata[IB_REP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rep_t;\r
+C_ASSERT( sizeof(mad_cm_rep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_rep->offset27 = (rnr_retry_cnt << 5);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rep_t* const p_rep )\r
{\r
CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
+\r
if( p_data )\r
{\r
- cl_memcpy( p_rep->pdata, p_data, rep_len );\r
- cl_memclr( p_rep->pdata + rep_len,\r
- IB_REP_PDATA_SIZE - rep_len );\r
+ if( data_len > IB_REP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rep->pdata, p_data, data_len );\r
+ cl_memclr( p_rep->pdata + data_len, IB_REP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rep->pdata, IB_REP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_RTU_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rtu_t;\r
+C_ASSERT( sizeof(mad_cm_rtu_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rtu_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rtu_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rtu_t* const p_rtu )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_rtu->pdata, p_data, rtu_len );\r
- cl_memclr( p_rtu->pdata + rtu_len, IB_RTU_PDATA_SIZE - rtu_len );\r
+ if( data_len > IB_RTU_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rtu->pdata, p_data, data_len );\r
+ cl_memclr( p_rtu->pdata + data_len, IB_RTU_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rtu->pdata, IB_RTU_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
/* DREQ */\r
uint8_t pdata[IB_DREQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_dreq_t;\r
+C_ASSERT( sizeof(mad_cm_dreq_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
__set_low24( &p_dreq->offset8, qpn );\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_dreq_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t dreq_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_dreq_t* const p_dreq )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_dreq->pdata, p_data, dreq_len );\r
- cl_memclr( p_dreq->pdata + dreq_len,\r
- IB_DREQ_PDATA_SIZE - dreq_len );\r
+ if( data_len > IB_DREQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_dreq->pdata, p_data, data_len );\r
+ cl_memclr( p_dreq->pdata + data_len, IB_DREQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_dreq->pdata, IB_DREQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_DREP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_drep_t;\r
+C_ASSERT( sizeof(mad_cm_drep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_drep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t drep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_drep_t* const p_drep )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_drep->pdata, p_data, drep_len );\r
- cl_memclr( p_drep->pdata + drep_len,\r
- IB_DREP_PDATA_SIZE - drep_len );\r
+ if( data_len > IB_DREP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_drep->pdata, p_data, data_len );\r
+ cl_memclr( p_drep->pdata + data_len, IB_DREP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_drep->pdata, IB_DREP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
\r
uint8_t pdata[IB_LAP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_lap_t;\r
+C_ASSERT( sizeof(mad_cm_lap_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_lap_t* const p_lap )\r
{\r
CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && data_len > IB_LAP_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
\r
cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE );\r
if( p_data )\r
{\r
+ if( data_len > IB_LAP_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_lap->pdata, p_data, data_len );\r
cl_memclr( p_lap->pdata + data_len,\r
IB_LAP_PDATA_SIZE - data_len );\r
uint8_t pdata[IB_APR_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_apr_t;\r
+C_ASSERT( sizeof(mad_cm_apr_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_apr_t* const p_apr )\r
{\r
CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && ( data_len > IB_APR_PDATA_SIZE ) )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_APR_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_apr->pdata, p_data, data_len );\r
cl_memclr( p_apr->pdata + data_len,\r
IB_APR_PDATA_SIZE - data_len );\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id:$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al.h"\r
+#include "al_qp.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_mgr.h"\r
+#include "al_debug.h"\r
+\r
+\r
+typedef struct _al_listen\r
+{\r
+ al_obj_t obj;\r
+ net32_t cid;\r
+\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ const void* __ptr64 sidr_context;\r
+\r
+} al_listen_t;\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+/*\r
+ * Structure for queuing received MADs to the asynchronous processing\r
+ * manager.\r
+ */\r
+typedef struct _cep_async_mad\r
+{\r
+ cl_async_proc_item_t item;\r
+ ib_al_handle_t h_al;\r
+ ib_cep_t cep;\r
+\r
+} cep_async_mad_t;\r
+\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the error state to flush all oustanding work\r
+ * requests and sets the timewait time. This function may be called\r
+ * when destroying the QP in order to flush all work requests, so we\r
+ * cannot call through the main API, or the call will fail since the\r
+ * QP is no longer in the initialize state.\r
+ */\r
+static void\r
+__cep_timewait_qp(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ uint64_t timewait = 0;\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * The CM should have set the proper timewait time-out value. Reset\r
+ * the QP and let it enter the timewait state.\r
+ */\r
+ if( al_cep_get_timewait( h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS )\r
+ {\r
+ /* Special checks on the QP state for error handling - see above. */\r
+ if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ ( (h_qp->obj.state != CL_INITIALIZED) && \r
+ (h_qp->obj.state != CL_DESTROYING) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_CM, ("IB_INVALID_QP_HANDLE\n") );\r
+ return;\r
+ }\r
+\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_ERROR;\r
+\r
+ /* Modify to error state using function pointers - see above. */\r
+ status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("pfn_modify_qp to IB_QPS_ERROR returned %s\n",\r
+ ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+#ifdef CL_KERNEL\r
+ /* Store the timestamp after which the QP exits timewait. */\r
+ h_qp->timewait = cl_get_time_stamp() + timewait;\r
+#endif /* CL_KERNEL */\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_path_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const req_path_info_t* const p_path,\r
+ OUT ib_path_rec_t* const p_path_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_path );\r
+ CL_ASSERT( p_path_rec );\r
+\r
+ /*\r
+ * Format a local path record. The local ack timeout specified in the\r
+ * REQ is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( p_path_rec,\r
+ &p_path->local_gid,\r
+ &p_path->remote_gid,\r
+ p_path->local_lid,\r
+ p_path->remote_lid,\r
+ 1, p_req->pkey,\r
+ conn_req_path_get_svc_lvl( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY, conn_req_get_mtu( p_req ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_req_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_req_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ p_path_rec->hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( p_path_rec, p_path->hop_limit,\r
+ conn_req_path_get_flow_lbl( p_path ), FALSE );\r
+ p_path_rec->tclass = p_path->traffic_class;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ OUT ib_cm_req_rec_t *p_req_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_req_rec );\r
+\r
+ cl_memclr( p_req_rec, sizeof(ib_cm_req_rec_t) );\r
+\r
+ /* format version specific data */\r
+ p_req_rec->p_req_pdata = p_req->pdata;\r
+\r
+ p_req_rec->qp_type = conn_req_get_qp_type( p_req );\r
+\r
+ p_req_rec->resp_res = conn_req_get_resp_res( p_req );\r
+ p_req_rec->flow_ctrl = conn_req_get_flow_ctrl( p_req );\r
+ p_req_rec->rnr_retry_cnt = conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ __format_req_path_rec( p_req, &p_req->primary_path,\r
+ &p_req_rec->primary_path );\r
+ __format_req_path_rec( p_req, &p_req->alternate_path,\r
+ &p_req_rec->alt_path );\r
+\r
+ /* These values are filled in later based on listen or peer connections\r
+ p_req_rec->context = ;\r
+ p_req_rec->h_cm_req = ;\r
+ p_req_rec->h_cm_listen = ;\r
+ */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle incoming REQs that matched to an outstanding listen.\r
+*\r
+*/\r
+\r
+\r
+static void\r
+__listen_req(\r
+ IN al_listen_t* const p_listen,\r
+ IN const ib_cep_t* const p_new_cep,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_listen );\r
+ CL_ASSERT( p_new_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update listen based rec */\r
+ req_rec.context = p_listen->obj.context;\r
+\r
+ req_rec.h_cm_req.cid = p_new_cep->cid;\r
+ req_rec.h_cm_req.h_al = p_listen->obj.h_al;\r
+ req_rec.h_cm_req.h_qp = p_new_cep->context;\r
+\r
+ req_rec.h_cm_listen = p_listen;\r
+\r
+ /* Invoke the user's callback. */\r
+ p_listen->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_listen(\r
+ IN al_listen_t* const p_listen,\r
+ IN ib_cep_t* const p_new_cep,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Context is a listen - MAD must be a REQ or SIDR REQ */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __listen_req(\r
+ p_listen, p_new_cep, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_SIDR_REQ_ATTR_ID:\r
+ /* TODO - implement SIDR. */\r
+ default:\r
+ CL_ASSERT( p_mad->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->attr_id == CM_SIDR_REQ_ATTR_ID );\r
+ /* Destroy the new CEP as it won't ever be reported to the user. */\r
+ al_destroy_cep( p_listen->obj.h_al, p_new_cep->cid, NULL );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle send timeouts:\r
+*\r
+*/\r
+\r
+/*\r
+ * callback to process a connection establishment timeout due to reply not\r
+ * being received. The connection object has a reference\r
+ * taken when the timer is set or when the send is sent.\r
+ */\r
+static void\r
+__proc_conn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * Format the reject record before aborting the connection since\r
+ * we need the QP context.\r
+ */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+ rej_rec.h_qp = h_qp;\r
+ rej_rec.qp_context = h_qp->obj.context;\r
+ rej_rec.rej_status = IB_REJ_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ /* Unbind the QP from the CEP. */\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Invoke the callback. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * callback to process a LAP timeout due to APR not being received.\r
+ */\r
+static void\r
+__proc_lap_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /* Report the timeout. */\r
+ cl_memclr( &apr_rec, sizeof(ib_cm_apr_rec_t) );\r
+ apr_rec.h_qp = h_qp;\r
+ apr_rec.qp_context = h_qp->obj.context;\r
+ apr_rec.cm_status = IB_TIMEOUT;\r
+ apr_rec.apr_status = IB_AP_REJECT;\r
+\r
+ /* Notify the user that the LAP failed. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Callback to process a disconnection timeout due to not receiving the DREP\r
+ * within allowable time.\r
+ */\r
+static void\r
+__proc_dconn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* No response. We're done. Deliver a DREP callback. */\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+ drep_rec.h_qp = h_qp;\r
+ drep_rec.qp_context = h_qp->obj.context;\r
+ drep_rec.cm_status = IB_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_failed_send(\r
+ IN ib_qp_handle_t h_qp,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Failure indicates a send. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ case CM_REP_ATTR_ID:\r
+ __proc_conn_timeout( h_qp );\r
+ break;\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap_timeout( h_qp );\r
+ break;\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dconn_timeout( h_qp );\r
+ break;\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM send MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle received MADs on a connection (not listen)\r
+*\r
+*/\r
+\r
+\r
+void\r
+__proc_peer_req(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ /* Must be peer-to-peer. */\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update peer based rec handles and context values */\r
+ req_rec.context = p_cm->h_qp->obj.context;\r
+ req_rec.h_cm_req = *p_cm;\r
+ req_rec.h_cm_listen = NULL;\r
+\r
+ /* Invoke the user's callback. User must call ib_cm_rep or ib_cm_rej. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_mra(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_mra_t* const p_mra )\r
+{\r
+ ib_cm_mra_rec_t mra_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb );\r
+\r
+ /* Format the MRA callback record. */\r
+ cl_memclr( &mra_rec, sizeof(ib_cm_mra_rec_t) );\r
+\r
+ mra_rec.h_qp = p_cm->h_qp;\r
+ mra_rec.qp_context = p_cm->h_qp->obj.context;\r
+ mra_rec.p_mra_pdata = p_mra->pdata;\r
+\r
+ /*\r
+ * Call the user back. Note that users will get a callback only\r
+ * for the first MRA received in response to a REQ, REP, or LAP.\r
+ */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb( &mra_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_rej(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_rej_t* const p_rej )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( p_cm->h_qp )\r
+ {\r
+ /* Format the REJ callback record. */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+\r
+ rej_rec.h_qp = p_cm->h_qp;\r
+ rej_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ rej_rec.p_rej_pdata = p_rej->pdata;\r
+ rej_rec.p_ari = p_rej->ari;\r
+ rej_rec.ari_length = conn_rej_get_ari_len( p_rej );\r
+ rej_rec.rej_status = p_rej->reason;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ /*\r
+ * Unbind the QP from the connection object. This allows the QP to\r
+ * be immediately reused in another connection request.\r
+ */\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == p_cm->cid || cid == AL_INVALID_CID );\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rep_t* const p_rep )\r
+{\r
+ ib_cm_rep_rec_t rep_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &rep_rec, sizeof(ib_cm_rep_rec_t) );\r
+\r
+ /* fill the rec callback data */\r
+ rep_rec.p_rep_pdata = p_rep->pdata;\r
+ rep_rec.qp_type = p_cm->h_qp->type;\r
+\r
+ rep_rec.h_cm_rep = *p_cm;\r
+ rep_rec.qp_context = p_cm->h_qp->obj.context;\r
+ rep_rec.resp_res = p_rep->resp_resources;\r
+ rep_rec.flow_ctrl = conn_rep_get_e2e_flow_ctl( p_rep );\r
+ rep_rec.apr_status = conn_rep_get_failover( p_rep );\r
+\r
+ /* Notify the user of the reply. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rep_cb( &rep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rtu(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rtu_t* const p_rtu )\r
+{\r
+ ib_cm_rtu_rec_t rtu_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ rtu_rec.p_rtu_pdata = p_rtu->pdata;\r
+ rtu_rec.h_qp = p_cm->h_qp;\r
+ rtu_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rtu_cb( &rtu_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_dreq(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_dreq_t* const p_dreq )\r
+{\r
+ ib_cm_dreq_rec_t dreq_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &dreq_rec, sizeof(ib_cm_dreq_rec_t) );\r
+\r
+ dreq_rec.h_cm_dreq = *p_cm;\r
+ dreq_rec.p_dreq_pdata = p_dreq->pdata;\r
+\r
+ dreq_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_dreq_cb( &dreq_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_drep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_drep_t* const p_drep )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+\r
+ /* Copy qp context before the connection is released */\r
+ drep_rec.cm_status = IB_SUCCESS;\r
+ drep_rec.p_drep_pdata = p_drep->pdata;\r
+ drep_rec.h_qp = p_cm->h_qp;\r
+ drep_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == p_cm->cid );\r
+\r
+ if( al_destroy_cep( p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_lap(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_lap_t* const p_lap )\r
+{\r
+ ib_cm_lap_rec_t lap_rec;\r
+ const lap_path_info_t* const p_path = &p_lap->alternate_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( p_lap );\r
+\r
+ cl_memclr( &lap_rec, sizeof(ib_cm_lap_rec_t) );\r
+ lap_rec.qp_context = p_cm->h_qp->obj.context;\r
+ lap_rec.h_cm_lap = *p_cm;\r
+\r
+ /*\r
+ * Format the path record. The local ack timeout specified in the\r
+ * LAP is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( &lap_rec.alt_path,\r
+ &p_lap->alternate_path.local_gid,\r
+ &p_lap->alternate_path.remote_gid,\r
+ p_lap->alternate_path.local_lid,\r
+ p_lap->alternate_path.remote_lid,\r
+ 1, IB_DEFAULT_PKEY,\r
+ conn_lap_path_get_svc_lvl( &p_lap->alternate_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ IB_MTU_2048,\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_lap_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_lap_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ lap_rec.alt_path.hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_lap_path_get_subn_lcl( &p_lap->alternate_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( &lap_rec.alt_path,\r
+ p_lap->alternate_path.hop_limit,\r
+ conn_lap_path_get_flow_lbl( &p_lap->alternate_path ),\r
+ FALSE );\r
+ lap_rec.alt_path.tclass =\r
+ conn_lap_path_get_tclass( &p_lap->alternate_path );\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_lap_cb( &lap_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_lap_qp(\r
+ IN ib_cm_handle_t* const p_cm )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_get_rts_attr( p_cm->h_al, p_cm->cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s.\n", ib_get_err_str(status)) );\r
+ goto done;\r
+ }\r
+\r
+ status = ib_modify_qp( p_cm->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__proc_apr(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_apr_t* const p_apr )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ apr_rec.h_qp = p_cm->h_qp;\r
+ apr_rec.qp_context = p_cm->h_qp->obj.context;\r
+ apr_rec.p_info = (const uint8_t*)&p_apr->info;\r
+ apr_rec.info_length = p_apr->info_len;\r
+ apr_rec.p_apr_pdata = p_apr->pdata;\r
+ apr_rec.apr_status = p_apr->status;\r
+\r
+ if( apr_rec.apr_status == IB_AP_SUCCESS )\r
+ {\r
+ apr_rec.cm_status = __cep_lap_qp( p_cm );\r
+ }\r
+ else\r
+ {\r
+ apr_rec.cm_status = IB_ERROR;\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_conn(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Success indicates a receive. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __proc_peer_req( p_cm, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_MRA_ATTR_ID:\r
+ __proc_mra( p_cm, (mad_cm_mra_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REJ_ATTR_ID:\r
+ __proc_rej( p_cm, (mad_cm_rej_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ __proc_rep( p_cm, (mad_cm_rep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_RTU_ATTR_ID:\r
+ __proc_rtu( p_cm, (mad_cm_rtu_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dreq( p_cm, (mad_cm_dreq_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREP_ATTR_ID:\r
+ __proc_drep( p_cm, (mad_cm_drep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap( p_cm, (mad_cm_lap_t*)p_mad );\r
+ break;\r
+\r
+ case CM_APR_ATTR_ID:\r
+ __proc_apr( p_cm, (mad_cm_apr_t*)p_mad );\r
+ break;\r
+\r
+ //case CM_SIDR_REQ_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
+ // break;\r
+\r
+ //case CM_SIDR_REP_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
+ // break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+/******************************************************************************\r
+* CEP callback handler.\r
+*\r
+*/\r
+\r
+#ifdef CL_KERNEL\r
+static void\r
+__process_cep_cb(\r
+#else\r
+static ib_api_status_t\r
+__cm_handler(\r
+#endif\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ ib_cep_t new_cep;\r
+ ib_mad_element_t *p_mad;\r
+ ib_cm_handle_t h_cm;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ for( status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad );\r
+ status == IB_SUCCESS;\r
+ status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad ) )\r
+ {\r
+ /* Something to do - WOOT!!! */\r
+ if( new_cep.cid != AL_INVALID_CID )\r
+ {\r
+ __proc_listen( (al_listen_t*)p_cep->context,\r
+ &new_cep, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else if( p_mad->status != IB_SUCCESS )\r
+ {\r
+ /* Context is a QP handle, and a sent MAD timed out. */\r
+ __proc_failed_send(\r
+ (ib_qp_handle_t)p_cep->context, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else\r
+ {\r
+ h_cm.h_al = h_al;\r
+ h_cm.cid = p_cep->cid;\r
+ h_cm.h_qp = (ib_qp_handle_t)p_cep->context;\r
+ __proc_conn( &h_cm, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ ib_put_mad( p_mad );\r
+ }\r
+#ifndef CL_KERNEL\r
+ return IB_SUCCESS;\r
+#endif\r
+}\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+static void\r
+__process_cep_async(\r
+ IN cl_async_proc_item_t *p_item )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = PARENT_STRUCT( p_item, cep_async_mad_t, item );\r
+\r
+ __process_cep_cb( p_async_mad->h_al, &p_async_mad->cep );\r
+\r
+ cl_free( p_async_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * The handler is invoked at DISPATCH_LEVEL in kernel mode. We need to switch\r
+ * to a passive level thread context to perform QP modify and invoke user\r
+ * callbacks.\r
+ */\r
+static ib_api_status_t\r
+__cm_handler(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = (cep_async_mad_t*)cl_zalloc( sizeof(cep_async_mad_t) );\r
+ if( !p_async_mad )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("failed to cl_zalloc cm_async_mad_t (%d bytes)\n",\r
+ sizeof(cep_async_mad_t)) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ p_async_mad->h_al = h_al;\r
+ p_async_mad->cep = *p_cep;\r
+ p_async_mad->item.pfn_callback = __process_cep_async;\r
+\r
+ /* Queue the MAD for asynchronous processing. */\r
+ cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the INIT state, if it is not already in the\r
+ * INIT state.\r
+ */\r
+ib_api_status_t\r
+__cep_init_qp(\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_qp_mod_t* const p_init )\r
+{\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ /*\r
+ * Move to the init state to allow posting of receive buffers.\r
+ * Chech the current state of the QP. The user may have already\r
+ * transitioned it and posted some receives to the QP, so we\r
+ * should not reset the QP if it is already in the INIT state.\r
+ */\r
+ if( h_qp->state != IB_QPS_INIT )\r
+ {\r
+ /* Reset the QP. */\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RESET;\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp to IB_QPS_RESET returned %s\n",\r
+ ib_get_err_str(status) ) );\r
+ }\r
+\r
+ /* Initialize the QP. */\r
+ status = ib_modify_qp( h_qp, p_init );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ return IB_SUCCESS;\r
+}\r
+\r
+static ib_api_status_t\r
+__cep_pre_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_pre_req( qp_get_al( p_cm_req->h_qp ),\r
+ ((al_conn_qp_t*)p_cm_req->h_qp)->cid, p_cm_req, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition QP through state machine */\r
+ /*\r
+ * Warning! Using all access rights. We need to modify\r
+ * the ib_cm_req_t to include this.\r
+ */\r
+ qp_mod.state.init.access_ctrl |=\r
+ IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_ATOMIC;\r
+ status = __cep_init_qp( p_cm_req->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_init_qp returned %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_req(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ //cl_status_t cl_status;\r
+ //cl_event_t sync_event;\r
+ //cl_event_t *p_sync_event = NULL;\r
+ al_conn_qp_t *p_qp;\r
+ net32_t cid, old_cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* event based mechanism */\r
+ if( p_cm_req->flags & IB_FLAGS_SYNC )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_UNSUPPORTED;\r
+ //cl_event_construct( &sync_event );\r
+ //cl_status = cl_event_init( &sync_event, FALSE );\r
+ //if( cl_status != CL_SUCCESS )\r
+ //{\r
+ // __deref_conn( p_conn );\r
+ // return ib_convert_cl_status( cl_status );\r
+ //}\r
+ //p_conn->p_sync_event = p_sync_event = &sync_event;\r
+ }\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_req->h_qp;\r
+\r
+ /* Get a CEP and bind it to the QP. */\r
+ status = al_create_cep( h_al, __cm_handler, p_cm_req->h_qp, &cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str( status )) );\r
+ goto done;\r
+ }\r
+\r
+ /* See if this QP has already been connected. */\r
+ old_cid = cl_atomic_comp_xchg( &p_qp->cid, AL_INVALID_CID, cid );\r
+ if( old_cid != AL_INVALID_CID )\r
+ {\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_STATE;\r
+ }\r
+\r
+ status = __cep_pre_req( p_cm_req );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ /* Store callback pointers. */\r
+ p_qp->pfn_cm_req_cb = p_cm_req->pfn_cm_req_cb;\r
+ p_qp->pfn_cm_rep_cb = p_cm_req->pfn_cm_rep_cb;\r
+ p_qp->pfn_cm_mra_cb = p_cm_req->pfn_cm_mra_cb;\r
+ p_qp->pfn_cm_rej_cb = p_cm_req->pfn_cm_rej_cb;\r
+\r
+ /* Send the REQ. */\r
+ status = al_cep_send_req( h_al, p_qp->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ //if( p_sync_event )\r
+ // cl_event_destroy( p_sync_event );\r
+\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_send_req returned %s.\n", ib_get_err_str(status)) );\r
+err:\r
+ ref_al_obj( &p_qp->qp.obj );\r
+ cl_atomic_xchg( &p_qp->cid, AL_INVALID_CID );\r
+ if( al_destroy_cep( h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_qp->qp.obj );\r
+ }\r
+\r
+ /* wait on event if synchronous operation */\r
+ //if( p_sync_event )\r
+ //{\r
+ // CL_TRACE( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("event blocked on REQ...\n") );\r
+ // cl_event_wait_on( p_sync_event, EVENT_NO_TIMEOUT, FALSE );\r
+\r
+ // cl_event_destroy( p_sync_event );\r
+ //}\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_req )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_req->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_req->h_qp->type != p_cm_req->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ status = __cep_conn_req( qp_get_al( p_cm_req->h_qp ), p_cm_req );\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ status = IB_UNSUPPORTED;\r
+// status = cm_sidr_req( p_cm_req->h_al, p_cm_req );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Note: we pass in the QP handle separately because it comes form different\r
+ * sources. It comes from the ib_cm_rep_t structure in the ib_cm_rep path, and\r
+ * from the ib_cm_handle_t structure in the ib_cm_rtu path.\r
+ */\r
+static ib_api_status_t\r
+__cep_rts_qp(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_access_t access_ctrl,\r
+ IN const uint32_t sq_depth,\r
+ IN const uint32_t rq_depth )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Set the QP to RTR. */\r
+ status = al_cep_get_rtr_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ if( access_ctrl )\r
+ {\r
+ qp_mod.state.rtr.access_ctrl = access_ctrl;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_ACCESS_CTRL;\r
+ }\r
+\r
+ if( sq_depth )\r
+ {\r
+ qp_mod.state.rtr.sq_depth = sq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_SQ_DEPTH;\r
+ }\r
+\r
+ if( rq_depth )\r
+ {\r
+ qp_mod.state.rtr.rq_depth = rq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_RQ_DEPTH;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ /* Set the QP to RTS. */\r
+ status = al_cep_get_rts_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_pre_rep(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_rep->h_qp;\r
+\r
+ status = al_cep_pre_rep(\r
+ h_cm.h_al, h_cm.cid, p_cm_rep->h_qp, p_cm_rep, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_rep returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition the QP to the INIT state. */\r
+ qp_mod.state.init.access_ctrl = p_cm_rep->access_ctrl;\r
+ status = __cep_init_qp( p_cm_rep->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cm_init_qp returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Prepost receives. */\r
+ if( p_cm_rep->p_recv_wr )\r
+ {\r
+ status = ib_post_recv( p_cm_rep->h_qp, p_cm_rep->p_recv_wr,\r
+ (ib_recv_wr_t** __ptr64)p_cm_rep->pp_recv_failure );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_post_recv returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ /* Transition the QP to the RTR and RTS states. */\r
+ status = __cep_rts_qp( h_cm, p_cm_rep->h_qp,\r
+ p_cm_rep->access_ctrl, p_cm_rep->sq_depth, p_cm_rep->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_rep(\r
+ IN ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cid = cl_atomic_comp_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID, h_cm.cid );\r
+\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ /* We don't destroy the CEP to allow the user to retry accepting. */\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("QP already connected.\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ /* Store the CM callbacks. */\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rej_cb = p_cm_rep->pfn_cm_rej_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_mra_cb = p_cm_rep->pfn_cm_mra_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rtu_cb = p_cm_rep->pfn_cm_rtu_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_lap_cb = p_cm_rep->pfn_cm_lap_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_dreq_cb = p_cm_rep->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_pre_rep( h_cm, p_cm_rep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s\n", ib_get_err_str(status)) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_send_rep( h_cm.h_al, h_cm.cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) );\r
+err:\r
+ cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID );\r
+\r
+ ref_al_obj( &p_cm_rep->h_qp->obj );\r
+\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &h_cm.h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rep(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ status = IB_SUCCESS;\r
+ switch( p_cm_rep->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ if( p_cm_rep->h_qp->obj.h_al != h_cm_req.h_al )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( ( p_cm_rep->status == IB_SIDR_SUCCESS ) &&\r
+ (AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ al_cep_rej(\r
+ h_cm_req.h_al, h_cm_req.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+ al_destroy_cep( h_cm_req.h_al, h_cm_req.cid, NULL );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ if( p_cm_rep->qp_type == IB_QPT_UNRELIABLE_DGRM )\r
+ status = IB_UNSUPPORTED;//status = cm_sidr_rep( p_conn, p_cm_rep );\r
+ else\r
+ status = __cep_conn_rep( h_cm_req, p_cm_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rtu(\r
+ IN const ib_cm_handle_t h_cm_rep,\r
+ IN const ib_cm_rtu_t* const p_cm_rtu )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rtu )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ///*\r
+ // * Call invalid if event is still processed.\r
+ // * User may have called rtu in rep callback.\r
+ // */\r
+ //if( p_conn->p_sync_event )\r
+ //{\r
+ // CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ // ("Connection in invalid state. Sync call in progress.\n" ) );\r
+\r
+ // cm_res_release( p_conn );\r
+ // __deref_conn( p_conn );\r
+ // return IB_INVALID_STATE;\r
+ //}\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_apr_cb = p_cm_rtu->pfn_cm_apr_cb;\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_dreq_cb = p_cm_rtu->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_rts_qp( h_cm_rep, h_cm_rep.h_qp,\r
+ p_cm_rtu->access_ctrl, p_cm_rtu->sq_depth, p_cm_rtu->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_rtu( h_cm_rep.h_al, h_cm_rep.cid,\r
+ p_cm_rtu->p_rtu_pdata, p_cm_rtu->rtu_length );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+err:\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej(\r
+ h_cm_rep.h_al, h_cm_rep.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ __cep_timewait_qp( h_cm_rep.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_rep.h_qp)->cid, AL_INVALID_CID );\r
+\r
+ CL_ASSERT( cid == h_cm_rep.cid );\r
+\r
+ ref_al_obj( &h_cm_rep.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_rep.h_al, h_cm_rep.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_rep.h_qp->obj );\r
+ }\r
+\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_mra(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_mra )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_mra( h_cm.h_al, h_cm.cid, p_cm_mra );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_mra returned %s\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rej(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rej_t* const p_cm_rej )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rej )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_rej( h_cm.h_al, h_cm.cid, p_cm_rej->rej_status,\r
+ p_cm_rej->p_ari->data, p_cm_rej->ari_length,\r
+ p_cm_rej->p_rej_pdata, p_cm_rej->rej_length );\r
+\r
+ if( h_cm.h_qp )\r
+ {\r
+ __cep_timewait_qp( h_cm.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == h_cm.cid );\r
+\r
+ ref_al_obj( &h_cm.h_qp->obj );\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &h_cm.h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_dreq(\r
+ IN const ib_cm_dreq_t* const p_cm_dreq )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_dreq )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_dreq->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_dreq->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_dreq->h_qp->type != p_cm_dreq->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ /* Store the callback pointers. */\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->pfn_cm_drep_cb =\r
+ p_cm_dreq->pfn_cm_drep_cb;\r
+\r
+ status = al_cep_dreq( p_cm_dreq->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->cid,\r
+ p_cm_dreq->p_dreq_pdata, p_cm_dreq->dreq_length );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_STATE:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_SETTING:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ case IB_SUCCESS:\r
+ /* Wait for the DREP or timeout. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * If we failed to send the DREQ, just release the connection. It's\r
+ * unreliable anyway. The local port may be down. Note that we could\r
+ * not send the DREQ, but we still could have received one. The DREQ\r
+ * will have a reference on the connection until the user calls\r
+ * ib_cm_drep.\r
+ */\r
+ __cep_timewait_qp( p_cm_dreq->h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, AL_INVALID_CID );\r
+ ref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ if( cid == AL_INVALID_CID || al_destroy_cep(\r
+ p_cm_dreq->h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ }\r
+ status = IB_SUCCESS;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_drep(\r
+ IN const ib_cm_handle_t h_cm_dreq,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_drep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_drep( h_cm_dreq.h_al, h_cm_dreq.cid, p_cm_drep );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_SETTING:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_STATE:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Some other out-of-resource error - continue as if we succeeded in\r
+ * sending the DREP.\r
+ */\r
+ status = IB_SUCCESS;\r
+ /* Fall through */\r
+ case IB_SUCCESS:\r
+ __cep_timewait_qp( h_cm_dreq.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == h_cm_dreq.cid );\r
+ ref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_lap(\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_lap )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_lap->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_lap->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_lap->h_qp->type != p_cm_lap->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_lap( p_cm_lap->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_lap->h_qp)->cid, p_cm_lap );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_lap returned %s.\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_apr(\r
+ IN const ib_cm_handle_t h_cm_lap,\r
+ IN const ib_cm_apr_t* const p_cm_apr )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_apr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_apr->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_apr->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_apr->h_qp->type != p_cm_apr->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_pre_apr( h_cm_lap.h_al, h_cm_lap.cid, p_cm_apr, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_apr returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Load alt path into QP */\r
+ status = ib_modify_qp( h_cm_lap.h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n",\r
+ ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+ \r
+ status = al_cep_send_apr( h_cm_lap.h_al, h_cm_lap.cid );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_force_apm(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_conn_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp );\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RTS;\r
+ qp_mod.state.rts.apm_state = IB_APM_MIGRATED;\r
+ qp_mod.state.rts.opts = IB_MOD_QP_APM_STATE;\r
+\r
+ /* Set the QP to RTS. */\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__destroying_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ al_listen_t *p_listen;\r
+\r
+ p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj );\r
+\r
+ /* Destroy the listen's CEP. */\r
+ ref_al_obj( p_obj );\r
+ if( al_destroy_cep(\r
+ p_obj->h_al, p_listen->cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( p_obj );\r
+ }\r
+}\r
+\r
+\r
+\r
+static void\r
+__free_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ destroy_al_obj( p_obj );\r
+ cl_free( PARENT_STRUCT( p_obj, al_listen_t, obj ) );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+ al_listen_t *p_listen;\r
+ ib_cep_listen_t cep_listen;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( pfn_listen_err_cb );\r
+\r
+ /* Allocate the listen object. */\r
+ p_listen = (al_listen_t*)cl_zalloc( sizeof(al_listen_t) );\r
+ if( !p_listen )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ /* Copy the listen request information for matching incoming requests. */\r
+ p_listen->pfn_cm_req_cb = p_cm_listen->pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ p_listen->sidr_context = p_cm_listen->sidr_context;\r
+\r
+ /*\r
+ * Cast of ib_cm_cancel to type al_pfn_destroy_t required for first\r
+ * paramter type mismatch.\r
+ */\r
+ construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN );\r
+ status = init_al_obj( &p_listen->obj, listen_context, TRUE,\r
+ __destroying_listen, NULL, __free_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_listen( &p_listen->obj );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ /* Add the listen to the AL instance's object list. */\r
+ status = attach_al_obj( &h_al->obj, &p_listen->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Create a CEP to listen on. */\r
+ status = al_create_cep( h_al, __cm_handler, p_listen, &p_listen->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cep_listen.cmp_len = p_cm_listen->compare_length;\r
+ cep_listen.cmp_offset = p_cm_listen->compare_offset;\r
+ cep_listen.p_cmp_buf = p_cm_listen->p_compare_buffer;\r
+ cep_listen.port_guid = p_cm_listen->port_guid;\r
+ cep_listen.svc_id = p_cm_listen->svc_id;\r
+\r
+ status = al_cep_listen( h_al, p_listen->cid, &cep_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_listen returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ *ph_cm_listen = p_listen;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ if( !p_cm_listen || !pfn_listen_err_cb || !ph_cm_listen )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context,\r
+ ph_cm_listen );\r
+\r
+ /* Release the reference taken in init_al_obj. */\r
+ if( status == IB_SUCCESS )\r
+ deref_al_obj( &(*ph_cm_listen)->obj );\r
+\r
+ CL_EXIT( AL_DBG_CM, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_cancel(\r
+ IN const ib_listen_handle_t h_cm_listen,\r
+ IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_cm_listen, AL_OBJ_TYPE_H_LISTEN ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_cm_listen->obj );\r
+ h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, pfn_destroy_cb );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_handoff(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_net64_t svc_id )\r
+{\r
+ UNUSED_PARAM( h_cm_req );\r
+ UNUSED_PARAM( svc_id );\r
+ return IB_UNSUPPORTED;\r
+}\r
#define IS_CM_IOCTL(cmd) \\r
((cmd) > AL_CM_OPS_START && (cmd) < AL_CM_MAXOPS)\r
\r
+\r
+enum _ual_cep_ops\r
+{\r
+ al_cep_ops_start = al_ioc_maxops,\r
+ ual_create_cep,\r
+ ual_destroy_cep,\r
+ ual_cep_listen,\r
+ ual_cep_pre_req,\r
+ ual_cep_send_req,\r
+ ual_cep_pre_rep,\r
+ ual_cep_send_rep,\r
+ ual_cep_get_rtr,\r
+ ual_cep_get_rts,\r
+ ual_cep_rtu,\r
+ ual_cep_rej,\r
+ ual_cep_mra,\r
+ ual_cep_lap,\r
+ ual_cep_pre_apr,\r
+ ual_cep_send_apr,\r
+ ual_cep_dreq,\r
+ ual_cep_drep,\r
+ ual_cep_get_timewait,\r
+ ual_cep_get_event,\r
+ ual_cep_poll,\r
+\r
+ al_cep_maxops\r
+\r
+} ual_cep_ops_t;\r
+\r
+#define UAL_CEP_OPS_START IOCTL_CODE(ALDEV_KEY, al_cep_ops_start)\r
+#define UAL_CEP_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cep_maxops)\r
+#define IS_CEP_IOCTL(cmd) \\r
+ ((cmd) > UAL_CEP_OPS_START && (cmd) < UAL_CEP_MAXOPS)\r
+\r
+\r
/* AL ioctls */\r
\r
typedef enum _al_dev_ops\r
{\r
- al_ops_start = al_cm_maxops,\r
+ al_ops_start = al_cep_maxops,\r
\r
ual_reg_shmid_cmd,\r
ual_get_ca_attr,\r
#define UAL_CM_APR IOCTL_CODE(ALDEV_KEY, ual_cm_apr_cmd)\r
#define UAL_CM_FORCE_APM IOCTL_CODE(ALDEV_KEY, ual_force_apm_cmd)\r
\r
+/* CEP Related IOCTL commands */\r
+#define UAL_CREATE_CEP IOCTL_CODE(ALDEV_KEY, ual_create_cep)\r
+#define UAL_DESTROY_CEP IOCTL_CODE(ALDEV_KEY, ual_destroy_cep)\r
+#define UAL_CEP_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cep_listen)\r
+#define UAL_CEP_PRE_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_pre_req)\r
+#define UAL_CEP_SEND_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_send_req)\r
+#define UAL_CEP_PRE_REP IOCTL_CODE(ALDEV_KEY, ual_cep_pre_rep)\r
+#define UAL_CEP_SEND_REP IOCTL_CODE(ALDEV_KEY, ual_cep_send_rep)\r
+#define UAL_CEP_GET_RTR IOCTL_CODE(ALDEV_KEY, ual_cep_get_rtr)\r
+#define UAL_CEP_GET_RTS IOCTL_CODE(ALDEV_KEY, ual_cep_get_rts)\r
+#define UAL_CEP_RTU IOCTL_CODE(ALDEV_KEY, ual_cep_rtu)\r
+#define UAL_CEP_REJ IOCTL_CODE(ALDEV_KEY, ual_cep_rej)\r
+#define UAL_CEP_MRA IOCTL_CODE(ALDEV_KEY, ual_cep_mra)\r
+#define UAL_CEP_LAP IOCTL_CODE(ALDEV_KEY, ual_cep_lap)\r
+#define UAL_CEP_PRE_APR IOCTL_CODE(ALDEV_KEY, ual_cep_pre_apr)\r
+#define UAL_CEP_SEND_APR IOCTL_CODE(ALDEV_KEY, ual_cep_send_apr)\r
+#define UAL_CEP_DREQ IOCTL_CODE(ALDEV_KEY, ual_cep_dreq)\r
+#define UAL_CEP_DREP IOCTL_CODE(ALDEV_KEY, ual_cep_drep)\r
+#define UAL_CEP_GET_TIMEWAIT IOCTL_CODE(ALDEV_KEY, ual_cep_get_timewait)\r
+#define UAL_CEP_GET_EVENT IOCTL_CODE(ALDEV_KEY, ual_cep_get_event)\r
+#define UAL_CEP_POLL IOCTL_CODE(ALDEV_KEY, ual_cep_poll)\r
+\r
#define UAL_GET_CA_ATTR_INFO IOCTL_CODE(ALDEV_KEY, ual_get_ca_attr)\r
\r
/* PnP related ioctl commands. */\r
__send_timer_cb(\r
IN void *context );\r
\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item );\r
-\r
static void\r
__check_send_queue(\r
IN ib_mad_svc_handle_t h_mad_svc );\r
\r
/* Construct the MAD service. */\r
construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );\r
- cl_async_proc_construct( &h_mad_svc->send_async_proc );\r
cl_timer_construct( &h_mad_svc->send_timer );\r
cl_timer_construct( &h_mad_svc->recv_timer );\r
cl_qlist_init( &h_mad_svc->send_list );\r
return ib_convert_cl_status( cl_status );\r
}\r
\r
- cl_status = cl_async_proc_init( &h_mad_svc->send_async_proc,\r
- 1, "MAD svc send timeout" );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
*ph_mad_svc = h_mad_svc;\r
\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
ib_mad_send_handle_t h_send;\r
cl_list_item_t *p_list_item;\r
int32_t timeout_ms;\r
+#ifdef CL_KERNEL\r
+ KIRQL old_irql;\r
+#endif\r
\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
CL_ASSERT( p_obj );\r
timeout_ms -= 10;\r
}\r
\r
- /*\r
- * Cancel all outstanding send requests. Stop the send timer to avoid\r
- * synchronizing with it.\r
- */\r
- cl_timer_stop( &h_mad_svc->send_timer );\r
- cl_async_proc_destroy( &h_mad_svc->send_async_proc );\r
- cl_timer_destroy( &h_mad_svc->send_timer );\r
-\r
/*\r
* Deregister from the MAD dispatcher. The MAD dispatcher holds\r
* a reference on the MAD service when invoking callbacks. Since we\r
if( h_mad_svc->h_mad_reg )\r
__mad_disp_dereg( h_mad_svc->h_mad_reg );\r
\r
+ /* Cancel all outstanding send requests. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
p_list_item = cl_qlist_next( p_list_item ) )\r
h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
h_send->canceled = TRUE;\r
}\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
\r
/*\r
* Invoke the timer callback to return the canceled MADs to the user.\r
* Since the MAD service is being destroyed, the user cannot be issuing\r
* sends.\r
*/\r
+#ifdef CL_KERNEL\r
+ old_irql = KeRaiseIrqlToDpcLevel();\r
+#endif\r
__check_send_queue( h_mad_svc );\r
+#ifdef CL_KERNEL\r
+ KeLowerIrql( old_irql );\r
+#endif\r
+\r
+ cl_timer_destroy( &h_mad_svc->send_timer );\r
\r
#ifdef CL_KERNEL\r
/*\r
if( h_mad_svc->obj.h_al->p_context )\r
{\r
cl_qlist_t *p_cblist;\r
- cl_list_item_t *p_list_item;\r
al_proxy_cb_info_t *p_cb_info;\r
\r
cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
}\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms )\r
+{\r
+#ifdef CL_KERNEL\r
+ cl_list_item_t *p_list_item;\r
+ ib_mad_send_handle_t h_send;\r
+#endif\r
+\r
+ AL_ENTER( AL_DBG_MAD_SVC );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+ if( !p_mad_element )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+#ifndef CL_KERNEL\r
+ UNUSED_PARAM( p_mad_element );\r
+ UNUSED_PARAM( delay_ms );\r
+ /* TODO: support for user-mode MAD QP's. */\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_UNSUPPORTED;\r
+#else\r
+ /* Search for the MAD in our MAD list. It may have already completed. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
+ p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
+ __mad_svc_find_send, p_mad_element );\r
+\r
+ if( !p_list_item )\r
+ {\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );\r
+ return IB_NOT_FOUND;\r
+ }\r
+\r
+ /* Mark the MAD as having been canceled. */\r
+ h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
+\r
+ if( h_send->retry_time == MAX_TIME )\r
+ h_send->delay = delay_ms;\r
+ else\r
+ h_send->retry_time += (delay_ms * 1000);\r
+\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_SUCCESS;\r
+#endif\r
+}\r
+\r
\r
/*\r
* Process a send completion.\r
__set_retry_time(\r
IN ib_mad_send_handle_t h_send )\r
{\r
- h_send->retry_time = h_send->p_send_mad->timeout_ms * 1000 +\r
+ h_send->retry_time =\r
+ (h_send->p_send_mad->timeout_ms + h_send->delay) * 1000 +\r
cl_get_time_stamp();\r
+ h_send->delay = 0;\r
}\r
\r
\r
__send_timer_cb(\r
IN void *context )\r
{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
\r
- /*\r
- If we haven't already queued the asynchronous processing item to\r
- check the send queue, do so now.\r
- */\r
- h_mad_svc = (ib_mad_svc_handle_t)context;\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- See if the asynchronous processing item is in use. If it is already\r
- in use, it means that we're about to check the send queue anyway, so\r
- just ignore the timer. Also, don't bother scheduling if the object\r
- state is not CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( !h_mad_svc->send_async_item.pfn_callback &&\r
- ( h_mad_svc->obj.state == CL_INITIALIZED ) )\r
- {\r
- /* Not in use, reference the service and queue the callback. */\r
- cl_atomic_inc( &h_mad_svc->ref_cnt );\r
- h_mad_svc->send_async_item.pfn_callback = __send_async_proc_cb;\r
- cl_async_proc_queue( &h_mad_svc->send_async_proc,\r
- &h_mad_svc->send_async_item );\r
- }\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-}\r
-\r
-\r
-\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item )\r
-{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-\r
- h_mad_svc = PARENT_STRUCT( p_send_async_item, al_mad_svc_t,\r
- send_async_item );\r
-\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- * Don't bother processing if the object state is not\r
- * CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( h_mad_svc->obj.state != CL_INITIALIZED )\r
- {\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
- return;\r
- }\r
-\r
- /* The send_async_item is available for use again. */\r
- h_mad_svc->send_async_item.pfn_callback = NULL;\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- __check_send_queue( h_mad_svc );\r
+ __check_send_queue( (ib_mad_svc_handle_t)context );\r
\r
- /* Release the reference held during async processing. */\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
}\r
\r
ib_pfn_mad_comp_cb_t pfn_user_send_cb;\r
ib_pfn_mad_comp_cb_t pfn_user_recv_cb;\r
\r
- cl_async_proc_t send_async_proc;\r
- cl_async_proc_item_t send_async_item;\r
cl_qlist_t send_list;\r
cl_timer_t send_timer;\r
\r
IN al_mad_wr_t* const p_mad_wr );\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms );\r
+\r
\r
#endif /* __IB_AL_MAD_H__ */\r
/* Absolute time that the request should be retried. */\r
uint64_t retry_time;\r
\r
+ /* Delay, in milliseconds, to add before the next retry. */\r
+ uint32_t delay;\r
+\r
/* Number of times that the request can be retried. */\r
uint32_t retry_cnt;\r
boolean_t canceled; /* indicates if send was canceled */\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
\r
+cl_status_t cep_ioctl(\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes );\r
+\r
cl_status_t ioc_ioctl(\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
#include "al.h"\r
#include "al_av.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_cq.h"\r
#include "al_debug.h"\r
#include "al_mad.h"\r
}\r
\r
\r
-/*\r
-static ib_api_status_t\r
-al_bad_leave_mcast(\r
- IN const ib_mcast_handle_t h_mcast )\r
-{\r
- UNUSED_PARAM( h_mcast );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-*/\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_call(\r
- IN OUT al_conn_t* const p_conn )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_pre_rep(\r
- IN OUT al_conn_t* const p_conn,\r
- IN OUT const ib_cm_rep_t* p_cm_rep )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- UNUSED_PARAM( p_cm_rep );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
ib_api_status_t\r
init_base_qp(\r
IN ib_qp_t* const p_qp,\r
cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
\r
- /*\r
- * Get the QP attributes. This works around a bug with create QP calls\r
- * above not reporting the correct attributes.\r
- */\r
-// ib_query_qp( h_qp, &qp_attr );\r
h_qp->num = qp_attr.num;\r
\r
return IB_SUCCESS;\r
/* Initialize the inherited QP first. */\r
status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID,\r
p_qp_create, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
- return status;\r
- }\r
\r
- return IB_SUCCESS;\r
+ p_conn_qp->cid = AL_INVALID_CID;\r
+\r
+ return status;\r
}\r
\r
\r
ib_qp_handle_t h_qp;\r
al_mad_qp_t *p_mad_qp;\r
al_qp_alias_t *p_qp_alias;\r
- al_conn_qp_t *p_conn_qp;\r
+ net32_t cid;\r
\r
CL_ASSERT( p_obj );\r
h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );\r
\r
case IB_QPT_RELIABLE_CONN:\r
case IB_QPT_UNRELIABLE_CONN:\r
- p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp);\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+ }\r
\r
- /* Disconnect the QP. */\r
- cm_conn_destroy( p_conn_qp );\r
/* Fall through. */\r
-\r
case IB_QPT_UNRELIABLE_DGRM:\r
default:\r
/* Multicast membership gets cleaned up by object hierarchy. */\r
CL_ASSERT( p_event_rec );\r
h_qp = (ib_qp_handle_t)p_event_rec->context;\r
\r
+#if defined(CL_KERNEL)\r
+ switch( p_event_rec->code )\r
+ {\r
+ case IB_AE_QP_COMM:\r
+ al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM:\r
+ al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM_ERROR:\r
+ //***TODO: Figure out how to handle these errors.\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+#endif\r
+\r
p_event_rec->context = (void*)h_qp->obj.context;\r
p_event_rec->handle.h_qp = h_qp;\r
\r
CL_EXIT( AL_DBG_MW, g_al_dbg_lvl );\r
return status;\r
}\r
-\r
-\r
-ib_al_handle_t\r
-qp_get_al(\r
- IN const ib_qp_handle_t h_qp )\r
-{\r
- /* AL the is great-grandparent of the QP. */\r
- return (ib_al_handle_t)\r
- h_qp->obj.p_parent_obj->p_parent_obj->p_parent_obj;\r
-}\r
\r
ib_cm_handle_t p_conn;\r
\r
+ atomic32_t cid;\r
+\r
+ /* Callback table. */\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+ ib_pfn_cm_rep_cb_t pfn_cm_rep_cb;\r
+ ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
+ ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb;\r
+ ib_pfn_cm_lap_cb_t pfn_cm_lap_cb;\r
+ ib_pfn_cm_apr_cb_t pfn_cm_apr_cb;\r
+ ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb;\r
+ ib_pfn_cm_drep_cb_t pfn_cm_drep_cb;\r
+ ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; /* If RTU times out */\r
+\r
+\r
} al_conn_qp_t;\r
\r
\r
\r
\r
/* Return the AL instance associated with this QP. */\r
-ib_al_handle_t\r
+static inline ib_al_handle_t\r
qp_get_al(\r
- IN const ib_qp_handle_t h_qp );\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ return h_qp->obj.h_al;\r
+}\r
\r
\r
#endif /* __AL_QP_H__ */\r
h_sa_reg->port_guid = p_reg_svc_req->port_guid;\r
\r
/* Copy the service registration information. */\r
- h_sa_reg->sa_req.user_context = h_sa_reg;\r
+ h_sa_reg->sa_req.user_context = p_reg_svc_req->svc_context;\r
h_sa_reg->pfn_reg_svc_cb = p_reg_svc_req->pfn_reg_svc_cb;\r
h_sa_reg->svc_rec = p_reg_svc_req->svc_rec;\r
\r
SOURCES= ibal.rc \\r
al_ca_pnp.c \\r
al_ci_ca.c \\r
- al_cm.c \\r
- al_cm_conn.c \\r
- al_cm_sidr.c \\r
+ al_cm_cep.c \\r
+ al_cm_qp.c \\r
al_dev.c \\r
al_driver.c \\r
al_ioc_pnp.c \\r
al_mr.c \\r
al_pnp.c \\r
al_proxy.c \\r
- al_proxy_cm.c \\r
+ al_proxy_cep.c \\r
al_proxy_ioc.c \\r
al_proxy_subnet.c \\r
al_proxy_verbs.c \\r
..\al_av.c \\r
..\al_ca.c \\r
..\al_ci_ca_shared.c \\r
- ..\al_cm_shared.c \\r
..\al_common.c \\r
..\al_cq.c \\r
..\al_dm.c \\r
\r
\r
\r
+void\r
+ci_ca_comp_cb(\r
+ IN void *cq_context )\r
+{\r
+ ib_cq_handle_t h_cq = (ib_cq_handle_t)cq_context;\r
+\r
+ if( h_cq->h_wait_obj )\r
+ KeSetEvent( h_cq->h_wait_obj, IO_NETWORK_INCREMENT, FALSE );\r
+ else\r
+ h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context );\r
+}\r
+\r
+\r
+\r
/*\r
* CI CA asynchronous event callback.\r
*/\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id:$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_vector.h>\r
+#include <complib/cl_rbmap.h>\r
+#include <complib/cl_qmap.h>\r
+#include <complib/cl_spinlock.h>\r
+#include "al_common.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_debug.h"\r
+#include "ib_common.h"\r
+#include "al_mgr.h"\r
+#include "al_ca.h"\r
+#include "al.h"\r
+#include "al_mad.h"\r
+#include "al_qp.h"\r
+\r
+\r
+/*\r
+ * The vector object uses a list item at the front of the buffers\r
+ * it allocates. Take the list item into account so that allocations\r
+ * are for full page sizes.\r
+ */\r
+#define CEP_CID_MIN \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+#define CEP_CID_GROW \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+\r
+/*\r
+ * We reserve the upper byte of the connection ID as a revolving counter so\r
+ * that connections that are retried by the client change connection ID.\r
+ * This counter is never zero, so it is OK to use all CIDs since we will never\r
+ * have a full CID (base + counter) that is zero.\r
+ * See the IB spec, section 12.9.8.7 for details about REJ retry.\r
+ */\r
+#define CEP_MAX_CID (0x00FFFFFF)\r
+#define CEP_MAX_CID_MASK (0x00FFFFFF)\r
+\r
+#define CEP_MAD_SQ_DEPTH (128)\r
+#define CEP_MAD_RQ_DEPTH (1) /* ignored. */\r
+#define CEP_MAD_SQ_SGE (1)\r
+#define CEP_MAD_RQ_SGE (1) /* ignored. */\r
+\r
+\r
+/* Global connection manager object. */\r
+typedef struct _al_cep_mgr\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_qmap_t port_map;\r
+\r
+ KSPIN_LOCK lock;\r
+\r
+ /* Bitmap of CEPs, indexed by CID. */\r
+ cl_vector_t cid_vector;\r
+ uint32_t free_cid;\r
+\r
+ /* List of active listens. */\r
+ cl_rbmap_t listen_map;\r
+\r
+ /* Map of CEP by remote CID and CA GUID. */\r
+ cl_rbmap_t conn_id_map;\r
+ /* Map of CEP by remote QPN, used for stale connection matching. */\r
+ cl_rbmap_t conn_qp_map;\r
+\r
+ NPAGED_LOOKASIDE_LIST cep_pool;\r
+ NPAGED_LOOKASIDE_LIST req_pool;\r
+\r
+ /*\r
+ * Periodically walk the list of connections in the time wait state\r
+ * and flush them as appropriate.\r
+ */\r
+ cl_timer_t timewait_timer;\r
+ cl_qlist_t timewait_list;\r
+\r
+ ib_pnp_handle_t h_pnp;\r
+\r
+} al_cep_mgr_t;\r
+\r
+\r
+/* Per-port CM object. */\r
+typedef struct _cep_port_agent\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_map_item_t item;\r
+\r
+ ib_ca_handle_t h_ca;\r
+ ib_pd_handle_t h_pd;\r
+ ib_qp_handle_t h_qp;\r
+ ib_pool_key_t pool_key;\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+\r
+ net64_t port_guid;\r
+ uint8_t port_num;\r
+ net16_t base_lid;\r
+\r
+} cep_agent_t;\r
+\r
+\r
+/*\r
+ * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively.\r
+ * This allows shifting 1 << msg_mraed from an MRA to figure out for what\r
+ * message the MRA was sent for.\r
+ */\r
+#define CEP_STATE_RCVD 0x10000000\r
+#define CEP_STATE_SENT 0x20000000\r
+#define CEP_STATE_MRA 0x01000000\r
+#define CEP_STATE_REQ 0x00000001\r
+#define CEP_STATE_REP 0x00000002\r
+#define CEP_STATE_LAP 0x00000004\r
+#define CEP_STATE_RTU 0x00000008\r
+#define CEP_STATE_DREQ 0x00000010\r
+#define CEP_STATE_DREP 0x00000020\r
+#define CEP_STATE_DESTROYING 0x00010000\r
+#define CEP_STATE_USER 0x00020000\r
+\r
+#define CEP_MSG_MASK 0x000000FF\r
+#define CEP_OP_MASK 0xF0000000\r
+\r
+#define CEP_STATE_PREP 0x00100000\r
+\r
+/* States match CM state transition diagrams from spec. */\r
+typedef enum _cep_state\r
+{\r
+ CEP_STATE_IDLE,\r
+ CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
+ CEP_STATE_LISTEN,\r
+ CEP_STATE_ESTABLISHED,\r
+ CEP_STATE_TIMEWAIT,\r
+ CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
+ CEP_STATE_SREQ_SENT,\r
+ CEP_STATE_SREQ_RCVD,\r
+ CEP_STATE_ERROR,\r
+ CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT,\r
+ CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD,\r
+ CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT,\r
+ CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT,\r
+ CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT,\r
+ CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD,\r
+ CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING\r
+\r
+} cep_state_t;\r
+\r
+\r
+/* Active side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* al_cep_pre_req -> PRE_REQ\r
+* al_cep_send_req -> REQ_SENT\r
+* Recv REQ MRA -> REQ_MRA_RCVD\r
+* Recv REP -> REP_RCVD\r
+* al_cep_mra -> REP_MRA_SENT\r
+* al_cep_rtu -> ESTABLISHED\r
+*\r
+* Passive side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* Recv REQ -> REQ_RCVD\r
+* al_cep_mra* -> REQ_MRA_SENT\r
+* al_cep_pre_rep -> PRE_REP\r
+* al_cep_mra* -> PRE_REP_MRA_SENT\r
+* al_cep_send_rep -> REP_SENT\r
+* Recv RTU -> ESTABLISHED\r
+*\r
+* *al_cep_mra can only be called once - either before or after PRE_REP.\r
+*/\r
+\r
+typedef struct _al_kcep_av\r
+{\r
+ ib_av_attr_t attr;\r
+ net64_t port_guid;\r
+ uint16_t pkey_index;\r
+\r
+} kcep_av_t;\r
+\r
+\r
+typedef struct _al_kcep\r
+{\r
+ ib_cep_t cep;\r
+\r
+ struct _cep_cid *p_cid;\r
+\r
+ net64_t sid;\r
+\r
+ /* Port guid for filtering incoming requests. */\r
+ net64_t port_guid;\r
+\r
+ uint8_t* __ptr64 p_cmp_buf;\r
+ uint8_t cmp_offset;\r
+ uint8_t cmp_len;\r
+\r
+ boolean_t p2p;\r
+\r
+ /* Used to store connection structure with owning AL instance. */\r
+ cl_list_item_t al_item;\r
+\r
+ /* Flag to indicate whether a user is processing events. */\r
+ boolean_t signalled;\r
+\r
+ /* Destroy callback. */\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+\r
+ ib_mad_element_t *p_mad_head;\r
+ ib_mad_element_t *p_mad_tail;\r
+ al_pfn_cep_cb_t pfn_cb;\r
+\r
+ IRP *p_irp;\r
+\r
+ /* MAP item for finding listen CEPs. */\r
+ cl_rbmap_item_t listen_item;\r
+\r
+ /* Map item for finding CEPs based on remote comm ID & CA GUID. */\r
+ cl_rbmap_item_t rem_id_item;\r
+\r
+ /* Map item for finding CEPs based on remote QP number. */\r
+ cl_rbmap_item_t rem_qp_item;\r
+\r
+ /* Communication ID's for the connection. */\r
+ net32_t local_comm_id;\r
+ net32_t remote_comm_id;\r
+\r
+ net64_t local_ca_guid;\r
+ net64_t remote_ca_guid;\r
+\r
+ /* Remote QP, used for stale connection checking. */\r
+ net32_t remote_qpn;\r
+\r
+ /* Parameters to format QP modification structure. */\r
+ net32_t sq_psn;\r
+ net32_t rq_psn;\r
+ uint8_t resp_res;\r
+ uint8_t init_depth;\r
+ uint8_t rnr_nak_timeout;\r
+\r
+ /*\r
+ * Local QP number, used for the "additional check" required\r
+ * of the DREQ.\r
+ */\r
+ net32_t local_qpn;\r
+\r
+ /* PKEY to make sure a LAP is on the same partition. */\r
+ net16_t pkey;\r
+\r
+ /* Initiator depth as received in the REQ. */\r
+ uint8_t req_init_depth;\r
+\r
+ /*\r
+ * Primary and alternate path info, used to create the address vectors for\r
+ * sending MADs, to locate the port CM agent to use for outgoing sends,\r
+ * and for creating the address vectors for transitioning QPs.\r
+ */\r
+ kcep_av_t av[2];\r
+ uint8_t idx_primary;\r
+\r
+ /* Temporary AV and CEP port GUID used when processing LAP. */\r
+ kcep_av_t alt_av;\r
+ uint8_t alt_2pkt_life;\r
+\r
+ /* maxium packet lifetime * 2 of any path used on a connection. */\r
+ uint8_t max_2pkt_life;\r
+ /* Given by the REP, used for alternate path setup. */\r
+ uint8_t target_ack_delay;\r
+ /* Stored to help calculate the local ACK delay in the LAP. */\r
+ uint8_t local_ack_delay;\r
+\r
+ /* Volatile to allow using atomic operations for state checks. */\r
+ cep_state_t state;\r
+\r
+ /*\r
+ * Flag that indicates whether a connection took the active role during\r
+ * establishment. \r
+ */\r
+ boolean_t was_active;\r
+\r
+ /*\r
+ * Handle to the sent MAD, used for cancelling. We store the handle to\r
+ * the mad service so that we can properly cancel. This should not be a\r
+ * problem since all outstanding sends should be completed before the\r
+ * mad service completes its destruction and the handle becomes invalid.\r
+ */\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+ ib_mad_element_t *p_send_mad;\r
+\r
+ /* Number of outstanding MADs. Delays destruction of CEP destruction. */\r
+ atomic32_t ref_cnt;\r
+\r
+ /* MAD transaction ID to use when sending MADs. */\r
+ uint64_t tid;\r
+\r
+ /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */\r
+ uint8_t max_cm_retries;\r
+ /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */\r
+ uint32_t retry_timeout;\r
+\r
+ /* Timer that will be signalled when the CEP exits timewait. */\r
+ KTIMER timewait_timer;\r
+ LARGE_INTEGER timewait_time;\r
+ cl_list_item_t timewait_item;\r
+\r
+ /*\r
+ * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls\r
+ * allocate and format the MAD, and the send_req, send_rep and send_apr\r
+ * calls send it.\r
+ */\r
+ ib_mad_element_t *p_mad;\r
+\r
+ /* Cache the last MAD sent for retransmission. */\r
+ union _mads\r
+ {\r
+ ib_mad_t hdr;\r
+ mad_cm_mra_t mra;\r
+ mad_cm_rtu_t rtu;\r
+ mad_cm_drep_t drep;\r
+\r
+ } mads;\r
+\r
+} kcep_t;\r
+\r
+\r
+/* Structures stored in the CID vector. */\r
+typedef struct _cep_cid\r
+{\r
+ /* Owning AL handle. NULL if invalid. */\r
+ ib_al_handle_t h_al;\r
+ /* Pointer to CEP, or index of next free entry if h_al is NULL. */\r
+ kcep_t *p_cep;\r
+ /* For REJ Retry support */\r
+ uint8_t modifier;\r
+\r
+} cep_cid_t;\r
+\r
+\r
+/* Global instance of the CM agent. */\r
+al_cep_mgr_t *gp_cep_mgr = NULL;\r
+\r
+\r
+static ib_api_status_t\r
+__format_drep(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT mad_cm_drep_t* const p_drep );\r
+\r
+static ib_api_status_t\r
+__cep_queue_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* p_mad );\r
+\r
+static inline void\r
+__process_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline uint32_t\r
+__calc_mad_timeout(\r
+ IN const uint8_t pkt_life );\r
+\r
+static inline void\r
+__calc_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__create_cep( void );\r
+\r
+static int32_t\r
+__cleanup_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__bind_cep(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context );\r
+\r
+static inline void\r
+__unbind_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__pre_destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__lookup_by_id(\r
+ IN net32_t remote_comm_id,\r
+ IN net64_t remote_ca_guid );\r
+\r
+static kcep_t*\r
+__lookup_listen(\r
+ IN net64_t sid,\r
+ IN net64_t port_guid,\r
+ IN void *p_pdata );\r
+\r
+static inline kcep_t*\r
+__lookup_cep(\r
+ IN ib_al_handle_t h_al OPTIONAL,\r
+ IN net32_t cid );\r
+\r
+static inline kcep_t*\r
+__insert_cep(\r
+ IN kcep_t* const p_new_cep );\r
+\r
+static inline void\r
+__remove_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__insert_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static ib_api_status_t\r
+__cep_send_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad );\r
+\r
+/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
+static cep_agent_t*\r
+__find_port_cep(\r
+ IN const ib_gid_t* const p_gid,\r
+ IN const net16_t lid,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index );\r
+\r
+static cep_cid_t*\r
+__get_lcid(\r
+ OUT net32_t* const p_cid );\r
+\r
+static void\r
+__process_cep_send_comp(\r
+ IN cl_async_proc_item_t *p_item );\r
+\r
+\r
+/******************************************************************************\r
+* Per-port CEP agent\r
+******************************************************************************/\r
+\r
+\r
+static inline void\r
+__format_mad_hdr(\r
+ IN ib_mad_t* const p_mad,\r
+ IN const kcep_t* const p_cep,\r
+ IN net16_t attr_id )\r
+{\r
+ p_mad->base_ver = 1;\r
+ p_mad->mgmt_class = IB_MCLASS_COMM_MGMT;\r
+ p_mad->class_ver = IB_MCLASS_CM_VER_2;\r
+ p_mad->method = IB_MAD_METHOD_SEND;\r
+ p_mad->status = 0;\r
+ p_mad->class_spec = 0;\r
+ p_mad->trans_id = p_cep->tid;\r
+ p_mad->attr_id = attr_id;\r
+ p_mad->resv = 0;\r
+ p_mad->attr_mod = 0;\r
+}\r
+\r
+\r
+/* Consumes the input MAD. */\r
+static void\r
+__reject_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN ib_rej_status_t reason )\r
+{\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID );\r
+\r
+ p_rej->local_comm_id = p_cep->local_comm_id;\r
+ p_rej->remote_comm_id = p_cep->remote_comm_id;\r
+ p_rej->reason = reason;\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 1, p_rej );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( reason == IB_REJ_TIMEOUT );\r
+ conn_rej_set_msg_rejected( 2, p_rej );\r
+ break;\r
+ }\r
+\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_timeout(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ ib_mad_element_t *p_rej_mad;\r
+ ib_mad_t *p_mad_buf;\r
+ ib_grh_t *p_grh;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s\n", ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+ /* Save the buffer pointers from the new element. */\r
+ p_mad_buf = p_rej_mad->p_mad_buf;\r
+ p_grh = p_rej_mad->p_grh;\r
+\r
+ /*\r
+ * Copy the input MAD element to the reject - this gives us\r
+ * all appropriate addressing information.\r
+ */\r
+ cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) );\r
+ cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) );\r
+\r
+ /* Restore the buffer pointers now that the copy is complete. */\r
+ p_rej_mad->p_mad_buf = p_mad_buf;\r
+ p_rej_mad->p_grh = p_grh;\r
+\r
+ status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+\r
+ /* Copy the local CA GUID into the ARI. */\r
+ switch( p_mad->p_mad_buf->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&((mad_cm_req_t*)p_mad->p_mad_buf)->local_ca_guid,\r
+ sizeof(net64_t), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&((mad_cm_rep_t*)p_mad->p_mad_buf)->local_ca_guid,\r
+ sizeof(net64_t), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID );\r
+ ib_put_mad( p_rej_mad );\r
+ return;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN const ib_rej_status_t reason )\r
+{\r
+ mad_cm_req_t *p_req;\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_mad );\r
+ CL_ASSERT( reason != 0 );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /*\r
+ * Format the reject information, overwriting the REQ data and send\r
+ * the response.\r
+ */\r
+ p_rej->hdr.attr_id = CM_REJ_ATTR_ID;\r
+ p_rej->remote_comm_id = p_req->local_comm_id;\r
+ p_rej->local_comm_id = 0;\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ p_rej->reason = reason;\r
+ conn_rej_set_ari( NULL, 0, p_rej );\r
+ conn_rej_set_pdata( NULL, 0, p_rej );\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+\r
+ p_mad->retry_cnt = 0;\r
+ p_mad->send_opt = 0;\r
+ p_mad->timeout_ms = 0;\r
+ p_mad->resp_expected = FALSE;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_av(\r
+ IN kcep_t* const p_cep,\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const uint8_t idx )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ const req_path_info_t *p_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) );\r
+\r
+ p_path = &((&p_req->primary_path)[idx]);\r
+\r
+ p_port_cep = __find_port_cep( &p_path->remote_gid,\r
+ p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ if( !idx )\r
+ p_cep->local_ca_guid = 0;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( !idx )\r
+ p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
+\r
+ /* Check that CA GUIDs match if formatting the alternate path. */\r
+ if( idx &&\r
+ p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Pkey indeces must match if formating the alternat path - the QP\r
+ * modify structure only allows for a single PKEY index to be specified.\r
+ */\r
+ if( idx &&\r
+ p_cep->av[0].pkey_index != p_cep->av[1].pkey_index )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ p_cep->av[idx].port_guid = p_port_cep->port_guid;\r
+ p_cep->av[idx].attr.port_num = p_port_cep->port_num;\r
+\r
+ p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path );\r
+ p_cep->av[idx].attr.dlid = p_path->local_lid;\r
+\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = TRUE;\r
+ p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) );\r
+ p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit;\r
+ p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid;\r
+ p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = FALSE;\r
+ }\r
+ p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path );\r
+ p_cep->av[idx].attr.path_bits =\r
+ (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users.\r
+ */\r
+ p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req );\r
+ p_cep->av[idx].attr.conn.local_ack_timeout =\r
+ conn_req_path_get_lcl_ack_timeout( p_path );\r
+ p_cep->av[idx].attr.conn.seq_err_retry_cnt =\r
+ conn_req_get_retry_cnt( p_req );\r
+ p_cep->av[idx].attr.conn.rnr_retry_cnt =\r
+ conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * + Validates the path information provided in the REQ and stores the\r
+ * associated CA attributes and port indeces.\r
+ * + Transitions a connection object from active to passive in the peer case.\r
+ * + Sets the path information in the connection and sets the CA GUID\r
+ * in the REQ callback record.\r
+ */\r
+static void\r
+__save_wire_req(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN OUT mad_cm_req_t* const p_req )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep->state = CEP_STATE_REQ_RCVD;\r
+ p_cep->was_active = FALSE;\r
+\r
+ p_cep->sid = p_req->sid;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_req->local_comm_id;\r
+ p_cep->remote_ca_guid = p_req->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req );\r
+ p_cep->local_qpn = 0;\r
+\r
+ p_cep->retry_timeout =\r
+ __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) );\r
+\r
+ /* Store the retry count. */\r
+ p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req );\r
+\r
+ /*\r
+ * Copy the paths from the req_rec into the connection for\r
+ * future use. Note that if the primary path is invalid,\r
+ * the REP will fail.\r
+ */\r
+ __format_req_av( p_cep, p_req, 0 );\r
+\r
+ if( p_req->alternate_path.local_lid )\r
+ __format_req_av( p_cep, p_req, 1 );\r
+ else\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+\r
+ p_cep->idx_primary = 0;\r
+\r
+ /* Store the maximum packet lifetime, used to calculate timewait. */\r
+ p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path );\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
+ conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) );\r
+\r
+ /*\r
+ * Make sure the target ack delay is cleared - the above\r
+ * "packet life" includes it.\r
+ */\r
+ p_cep->target_ack_delay = 0;\r
+\r
+ /* Store the requested initiator depth. */\r
+ p_cep->req_init_depth = conn_req_get_init_depth( p_req );\r
+\r
+ /*\r
+ * Store the provided responder resources. These turn into the local\r
+ * QP's initiator depth.\r
+ */\r
+ p_cep->init_depth = conn_req_get_resp_res( p_req );\r
+\r
+ p_cep->sq_psn = conn_req_get_starting_psn( p_req );\r
+\r
+ p_cep->tid = p_req->hdr.trans_id;\r
+ /* copy mad info for cm handoff */\r
+ /* TODO: Do need to support CM handoff? */\r
+ //p_cep->mads.req = *p_req;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/* Must be called with the CEP lock held. */\r
+static void\r
+__repeat_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_mad );\r
+\r
+ /* Repeat the last mad sent for the connection. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */\r
+ case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */\r
+ case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */\r
+ case CEP_STATE_ESTABLISHED: /* resend RTU */\r
+ case CEP_STATE_TIMEWAIT: /* resend the DREP */\r
+ cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE );\r
+ p_mad->send_context1 = NULL;\r
+ p_mad->send_context2 = NULL;\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ /* Return the MAD to the mad pool */\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_req_t *p_req;\r
+ kcep_t *p_cep, *p_new_cep, *p_stale_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ ib_rej_status_t reason;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+\r
+ if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN )\r
+ {\r
+ /* Reserved value. Reject. */\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid transport type received.\n") );\r
+ reason = IB_REJ_INVALID_XPORT;\r
+ goto reject;\r
+ }\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid );\r
+ if( p_cep )\r
+ {\r
+ /* Already received the REQ. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Send a reject. */\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ received for connection in TIME_WAIT state.\n") );\r
+ __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN );\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Let regular retries repeat the MAD. If our last message was\r
+ * dropped, resending only adds to the congestion. If it wasn't\r
+ * dropped, then the remote CM will eventually process it, and\r
+ * we'd just be adding traffic.\r
+ */\r
+ AL_TRACE( AL_DBG_CM, ("Duplicate REQ received.\n") );\r
+ ib_put_mad( p_mad );\r
+ }\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Allocate a new CEP for the new request. This will\r
+ * prevent multiple identical REQs from queueing up for processing.\r
+ */\r
+ p_new_cep = __create_cep();\r
+ if( !p_new_cep )\r
+ {\r
+ /* Reject the request for insufficient resources. */\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep failed\nREJ sent for insufficient resources.\n") );\r
+ goto reject;\r
+ }\r
+\r
+ __save_wire_req( p_new_cep, p_req );\r
+\r
+ /*\r
+ * Match against listens using SID and compare data, also provide the receiving\r
+ * MAD service's port GUID so we can properly filter.\r
+ */\r
+ p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata );\r
+ if( p_cep )\r
+ {\r
+ __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL );\r
+\r
+ /* Add the new CEP to the map so that repeated REQs match up. */\r
+ p_stale_cep = __insert_cep( p_new_cep );\r
+ if( p_stale_cep != p_new_cep )\r
+ {\r
+ /* Duplicate - must be a stale connection. */\r
+ /* TODO: Fail the CEP in p_stale_cep */\r
+ reason = IB_REJ_STALE_CONN;\r
+ goto unbind;\r
+ }\r
+\r
+ /*\r
+ * Queue the mad - the return value indicates whether we should\r
+ * invoke the callback.\r
+ */\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ switch( status )\r
+ {\r
+ case IB_SUCCESS:\r
+ case IB_PENDING:\r
+ p_mad->send_context1 = p_new_cep;\r
+ break;\r
+\r
+ default:\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ goto unbind;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("No listens active!\n") );\r
+\r
+ /* Match against peer-to-peer requests using SID and compare data. */\r
+ //p_cep = __lookup_peer();\r
+ //if( p_cep )\r
+ //{\r
+ // p_mad->send_context2 = NULL;\r
+ // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list,\r
+ // __match_peer, p_req );\r
+ // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) )\r
+ // {\r
+ // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item );\r
+ // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad );\r
+ // cl_free( p_async_mad );\r
+ // CL_TRACE_EXIT( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("REQ matched a peer-to-peer request.\n") );\r
+ // return;\r
+ // }\r
+ // reason = IB_REJ_INVALID_SID;\r
+ // goto free;\r
+ //}\r
+ //else\r
+ {\r
+ /* No match found. Reject. */\r
+ reason = IB_REJ_INVALID_SID;\r
+ AL_TRACE( AL_DBG_CM, ("REQ received but no match found.\n") );\r
+ goto cleanup;\r
+ }\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ /* Process any queued MADs for the CEP. */\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+unbind:\r
+ __unbind_cep( p_new_cep );\r
+\r
+cleanup:\r
+ /*\r
+ * Move the CEP in the idle state so that we don't send a reject\r
+ * for it when cleaning up. Also clear the RQPN and RCID so that\r
+ * we don't try to remove it from our maps (since it isn't inserted).\r
+ */\r
+ p_new_cep->state = CEP_STATE_IDLE;\r
+ p_new_cep->remote_comm_id = 0;\r
+ p_new_cep->remote_qpn = 0;\r
+ __cleanup_cep( p_new_cep );\r
+\r
+reject:\r
+ __reject_req( p_port_cep, p_mad, reason );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__save_wire_rep(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN const mad_cm_rep_t* const p_rep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* The send should have been cancelled during MRA processing. */\r
+ p_cep->state = CEP_STATE_REP_RCVD;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_rep->local_comm_id;\r
+ p_cep->remote_ca_guid = p_rep->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep );\r
+\r
+ /* Store the remote endpoint's target ACK delay. */\r
+ p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep );\r
+\r
+ /* Update the local ACK delay stored in the AV's. */\r
+ p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay );\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep );\r
+\r
+ if( p_cep->av[1].port_guid )\r
+ {\r
+ p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[1].attr.conn.local_ack_timeout,\r
+ p_cep->target_ack_delay );\r
+ p_cep->av[1].attr.conn.rnr_retry_cnt =\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt;\r
+ }\r
+\r
+ p_cep->init_depth = p_rep->resp_resources;\r
+ p_cep->resp_res = p_rep->initiator_depth;\r
+\r
+ p_cep->sq_psn = conn_rep_get_starting_psn( p_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_mra(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_mra_t *p_mra;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_mra->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id )\r
+ {\r
+ if( p_cep->remote_comm_id != p_mra->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+ }\r
+\r
+ if( !(p_cep->state & CEP_STATE_SENT) ||\r
+ (1 << conn_mra_get_msg_mraed( p_mra ) !=\r
+ (p_cep->state & CEP_MSG_MASK)) )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("MRA received in invalid state.\n") );\r
+ goto err;\r
+ }\r
+\r
+ /* Delay the current send. */\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad, __calc_mad_timeout(\r
+ conn_mra_get_svc_timeout( p_mra ) + p_cep->max_2pkt_life - 1 ) );\r
+\r
+ /* We only invoke a single callback for MRA. */\r
+ if( p_cep->state & CEP_STATE_MRA )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("Already received MRA.\n") );\r
+ goto err;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_MRA;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rej(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rej_t *p_rej;\r
+ kcep_t *p_cep = NULL;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ net64_t ca_guid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /* Either one of the communication IDs must be set. */\r
+ if( !p_rej->remote_comm_id && !p_rej->local_comm_id )\r
+ goto err1;\r
+\r
+ /* Check the pending list by the remote CA GUID and connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ if( p_rej->remote_comm_id )\r
+ {\r
+ p_cep = __lookup_cep( NULL, p_rej->remote_comm_id );\r
+ }\r
+ else if( p_rej->reason == IB_REJ_TIMEOUT &&\r
+ conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) )\r
+ {\r
+ cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) );\r
+ p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid );\r
+ }\r
+\r
+ if( !p_cep )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id &&\r
+ p_cep->remote_comm_id != p_rej->local_comm_id )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ /*\r
+ * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will\r
+ * continue to retry (up to max_cm_retries) to connect to the remote\r
+ * side. This is required to support peer-to-peer connections and\r
+ * clients that try to connect before the server comes up.\r
+ */\r
+ if( p_rej->reason == IB_REJ_INVALID_SID )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Request rejected (invalid SID) - retrying.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REP_SENT:\r
+ /* Cancel any outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ /* Abort connection establishment. No transition to timewait. */\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ /* Ignore the REJ. */\r
+ AL_TRACE( AL_DBG_CM, ("REJ received in invalid state.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err2:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+err1:\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rep(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rep_t *p_rep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_state_t old_state;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rep->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_CM,\r
+ ("REP received that could not be matched.\n") );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REQ_SENT:\r
+ old_state = p_cep->state;\r
+ /* Save pertinent information and change state. */\r
+ __save_wire_rep( p_cep, p_rep );\r
+\r
+ if( __insert_cep( p_cep ) != p_cep )\r
+ {\r
+ /* Roll back the state change. */\r
+ p_cep->state = old_state;\r
+ __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN );\r
+ /* TODO: Handle stale connection. */\r
+ break;\r
+ }\r
+\r
+ /*\r
+ * Cancel any outstanding send. Note that we do this only after\r
+ * inserting the CEP - if we failed, then we the send will timeout\r
+ * and we'll finish our way through the state machine.\r
+ */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ /* Repeate the MRA or RTU. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_CM, ("REP received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rtu(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rtu_t *p_rtu;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) );\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("RTU received that could not be matched.\n") );\r
+ goto done;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel any outstanding send. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Update timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("RTU received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_dreq(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_dreq_t *p_dreq;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("DREQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) );\r
+\r
+ /* Find the connection by connection IDs. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id );\r
+ if( !p_cep ||\r
+ p_cep->remote_comm_id != p_dreq->local_comm_id ||\r
+ p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received that could not be matched.\n") );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_DREQ_SENT:\r
+ /* Cancel the outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through and process as DREQ received case. */\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ p_cep->state = CEP_STATE_DREQ_RCVD;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Store the TID for use in the reply DREP. */\r
+ p_cep->tid = p_dreq->hdr.trans_id;\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Repeat the DREP. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received in invalid state.\n") );\r
+ case CEP_STATE_DREQ_RCVD:\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_drep(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_drep_t *p_drep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf;\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_drep->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_drep->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREP received that could not be matched.\n") );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( p_cep->state != CEP_STATE_DREQ_SENT &&\r
+ p_cep->state != CEP_STATE_DREQ_DESTROY )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREP received in invalid state.\n") );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /* Cancel the DREQ. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ if( p_cep->state == CEP_STATE_DREQ_SENT )\r
+ {\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+ }\r
+ else\r
+ {\r
+ /* State is DREQ_DESTROY - move to DESTROY to allow cleanup. */\r
+ CL_ASSERT( p_cep->state == CEP_STATE_DREQ_DESTROY );\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+\r
+ ib_put_mad( p_mad );\r
+ status = IB_INVALID_STATE;\r
+ }\r
+\r
+ __insert_timewait( p_cep );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static boolean_t\r
+__format_lap_av(\r
+ IN kcep_t* const p_cep,\r
+ IN const lap_path_info_t* const p_path )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_path );\r
+\r
+ cl_memclr( &p_cep->alt_av, sizeof(kcep_av_t) );\r
+\r
+ p_port_cep = __find_port_cep( &p_path->remote_gid, p_path->remote_lid,\r
+ p_cep->pkey, &p_cep->alt_av.pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return FALSE;\r
+ }\r
+\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return FALSE;\r
+ }\r
+\r
+ p_cep->alt_av.port_guid = p_port_cep->port_guid;\r
+ p_cep->alt_av.attr.port_num = p_port_cep->port_num;\r
+\r
+ p_cep->alt_av.attr.sl = conn_lap_path_get_svc_lvl( p_path );\r
+ p_cep->alt_av.attr.dlid = p_path->local_lid;\r
+\r
+ if( !conn_lap_path_get_subn_lcl( p_path ) )\r
+ {\r
+ p_cep->alt_av.attr.grh_valid = TRUE;\r
+ p_cep->alt_av.attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, conn_lap_path_get_tclass( p_path ),\r
+ conn_lap_path_get_flow_lbl( p_path ) );\r
+ p_cep->alt_av.attr.grh.hop_limit = p_path->hop_limit;\r
+ p_cep->alt_av.attr.grh.dest_gid = p_path->local_gid;\r
+ p_cep->alt_av.attr.grh.src_gid = p_path->remote_gid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->alt_av.attr.grh_valid = FALSE;\r
+ }\r
+ p_cep->alt_av.attr.static_rate = conn_lap_path_get_pkt_rate( p_path );\r
+ p_cep->alt_av.attr.path_bits =\r
+ (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users. For the LAP, we\r
+ * first copy the settings from the current primary - MTU and retry\r
+ * counts are only specified in the REQ.\r
+ */\r
+ p_cep->alt_av.attr.conn = p_cep->av[p_cep->idx_primary].attr.conn;\r
+ p_cep->alt_av.attr.conn.local_ack_timeout =\r
+ conn_lap_path_get_lcl_ack_timeout( p_path );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return TRUE;\r
+}\r
+\r
+\r
+static void\r
+__process_lap(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_lap_t *p_lap;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_lap->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_lap->local_comm_id )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_CM, ("LAP received that could not be matched.\n") );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /*\r
+ * These two cases handle the RTU being dropped. Receipt of\r
+ * a LAP indicates that the connection is established.\r
+ */\r
+ case CEP_STATE_ESTABLISHED:\r
+ /*\r
+ * We don't check for other "established" states related to\r
+ * alternate path management (CEP_STATE_LAP_RCVD, etc)\r
+ */\r
+\r
+ /* We only support receiving LAP if we took the passive role. */\r
+ if( p_cep->was_active )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ /* Store the transaction ID for use during the LAP exchange. */\r
+ p_cep->tid = p_lap->hdr.trans_id;\r
+\r
+ /*\r
+ * Copy the path record into the connection for use when\r
+ * sending the APR and loading the path.\r
+ */\r
+ if( !__format_lap_av( p_cep, &p_lap->alternate_path ) )\r
+ {\r
+ /* Trap an invalid path. */\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_LAP_RCVD;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("LAP received in invalid state.\n") );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_apr(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_apr_t *p_apr;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_apr->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_apr->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("APR received that could not be matched.\n") );\r
+ goto done;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ /* Cancel sending the LAP. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Copy the temporary alternate AV. */\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1] = p_cep->alt_av;\r
+\r
+ /* Update the maximum packet lifetime. */\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
+\r
+ /* Update the timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("APR received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__cep_mad_recv_cb(\r
+ IN ib_mad_svc_handle_t h_mad_svc,\r
+ IN void *context,\r
+ IN ib_mad_element_t *p_mad )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_t *p_hdr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ UNUSED_PARAM( h_mad_svc );\r
+ p_port_cep = (cep_agent_t*)context;\r
+\r
+ CL_ASSERT( p_mad->p_next == NULL );\r
+\r
+ p_hdr = (ib_mad_t*)p_mad->p_mad_buf;\r
+\r
+ /*\r
+ * TODO: Add filtering in all the handlers for unsupported class version.\r
+ * See 12.6.7.2 Rejection Reason, code 31.\r
+ */\r
+\r
+ switch( p_hdr->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __process_req( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_MRA_ATTR_ID:\r
+ __process_mra( p_mad );\r
+ break;\r
+\r
+ case CM_REJ_ATTR_ID:\r
+ __process_rej( p_mad );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ __process_rep( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_RTU_ATTR_ID:\r
+ __process_rtu( p_mad );\r
+ break;\r
+\r
+ case CM_DREQ_ATTR_ID:\r
+ __process_dreq( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_DREP_ATTR_ID:\r
+ __process_drep( p_mad );\r
+ break;\r
+\r
+ case CM_LAP_ATTR_ID:\r
+ __process_lap( p_port_cep, p_mad );\r
+ break;\r
+\r
+ case CM_APR_ATTR_ID:\r
+ __process_apr( p_mad );\r
+ break;\r
+\r
+ case CM_SIDR_REQ_ATTR_ID:\r
+// p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
+// break;\r
+//\r
+ case CM_SIDR_REP_ATTR_ID:\r
+// p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
+// break;\r
+//\r
+ default:\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Invalid CM MAD attribute ID.\n") );\r
+ return;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static inline cep_agent_t*\r
+__get_cep_agent(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cl_map_item_t *p_item;\r
+\r
+ CL_ASSERT( p_cep );\r
+\r
+ /* Look up the primary CEP port agent */\r
+ p_item = cl_qmap_get( &gp_cep_mgr->port_map,\r
+ p_cep->av[p_cep->idx_primary].port_guid );\r
+ if( p_item == cl_qmap_end( &gp_cep_mgr->port_map ) )\r
+ return NULL;\r
+\r
+ return PARENT_STRUCT( p_item, cep_agent_t, item );\r
+}\r
+\r
+\r
+static inline void\r
+__format_mad_av(\r
+ OUT ib_mad_element_t* const p_mad,\r
+ IN kcep_av_t* const p_av )\r
+{\r
+ /* Set the addressing information in the MAD. */\r
+ p_mad->grh_valid = p_av->attr.grh_valid;\r
+ if( p_av->attr.grh_valid )\r
+ cl_memcpy( p_mad->p_grh, &p_av->attr.grh, sizeof(ib_grh_t) );\r
+\r
+ p_mad->remote_sl = p_av->attr.sl;\r
+ p_mad->remote_lid = p_av->attr.dlid;\r
+ p_mad->path_bits = p_av->attr.path_bits;\r
+ p_mad->pkey_index = p_av->pkey_index;\r
+ p_mad->remote_qp = IB_QP1;\r
+ p_mad->send_opt = IB_SEND_OPT_SIGNALED;\r
+ p_mad->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;\r
+ /* Let the MAD service manage the AV for us. */\r
+ p_mad->h_av = NULL;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_send_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_mad );\r
+\r
+ /* Use the mad's attributes already present */\r
+ p_mad->resp_expected = FALSE;\r
+ p_mad->retry_cnt = 0;\r
+ p_mad->timeout_ms = 0;\r
+\r
+ /* Clear the contexts since the send isn't associated with a CEP. */\r
+ p_mad->context1 = NULL;\r
+ p_mad->context2 = NULL;\r
+\r
+ status = ib_send_mad( p_port_cep->h_mad_svc, p_mad, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_send_retry(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_mad );\r
+ CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_LAP_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_DREQ_ATTR_ID );\r
+\r
+ /*\r
+ * REQ, REP, and DREQ are retried until either a response is\r
+ * received or the operation times out.\r
+ */\r
+ p_mad->resp_expected = TRUE;\r
+ p_mad->retry_cnt = p_cep->max_cm_retries;\r
+ p_mad->timeout_ms = p_cep->retry_timeout;\r
+\r
+ CL_ASSERT( !p_cep->p_send_mad );\r
+\r
+ /* Store the mad & mad service handle in the CEP for cancelling. */\r
+ p_cep->h_mad_svc = p_port_cep->h_mad_svc;\r
+ p_cep->p_send_mad = p_mad;\r
+\r
+ /* reference the connection for which we are sending the MAD. */\r
+ cl_atomic_inc( &p_cep->ref_cnt );\r
+\r
+ /* Set the context. */\r
+ p_mad->context1 = p_cep;\r
+ p_mad->context2 = NULL;\r
+\r
+ /* Fire in the hole! */\r
+ status = ib_send_mad( p_cep->h_mad_svc, p_mad, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ /*\r
+ * Note that we don't need to check for destruction here since\r
+ * we're holding the global lock.\r
+ */\r
+ cl_atomic_dec( &p_cep->ref_cnt );\r
+ p_cep->p_send_mad = NULL;\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__cep_mad_send_cb(\r
+ IN ib_mad_svc_handle_t h_mad_svc,\r
+ IN void *context,\r
+ IN ib_mad_element_t *p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_port_cep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+ void *cep_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( h_mad_svc );\r
+ CL_ASSERT( p_mad->p_next == NULL );\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_port_cep = (cep_agent_t*)context;\r
+\r
+ p_cep = (kcep_t* __ptr64)p_mad->context1;\r
+\r
+ /*\r
+ * The connection context is not set when performing immediate responses,\r
+ * such as repeating MADS.\r
+ */\r
+ if( !p_cep )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ p_mad->context1 = NULL;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ /* Clear the sent MAD pointer so that we don't try cancelling again. */\r
+ if( p_cep->p_send_mad == p_mad )\r
+ p_cep->p_send_mad = NULL;\r
+\r
+ switch( p_mad->status )\r
+ {\r
+ case IB_WCS_SUCCESS:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+\r
+ case IB_WCS_CANCELED:\r
+ if( p_cep->state != CEP_STATE_REQ_SENT &&\r
+ p_cep->state != CEP_STATE_REQ_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_REP_SENT &&\r
+ p_cep->state != CEP_STATE_REP_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_LAP_SENT &&\r
+ p_cep->state != CEP_STATE_LAP_MRA_RCVD &&\r
+ p_cep->state != CEP_STATE_DREQ_SENT &&\r
+ p_cep->state != CEP_STATE_SREQ_SENT )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+ /* Treat as a timeout so we don't stall the state machine. */\r
+ p_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
+\r
+ /* Fall through. */\r
+ case IB_WCS_TIMEOUT_RETRY_ERR:\r
+ default:\r
+ /* Timeout. Reject the connection. */\r
+ if( p_cep->state == CEP_STATE_REQ_SENT ||\r
+ p_cep->state == CEP_STATE_REQ_MRA_RCVD ||\r
+ p_cep->state == CEP_STATE_REP_SENT ||\r
+ p_cep->state == CEP_STATE_REP_MRA_RCVD )\r
+ {\r
+ /* Send the REJ. */\r
+ __reject_timeout( p_port_cep, p_cep, p_mad );\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ else if( p_cep->state == CEP_STATE_DREQ_DESTROY )\r
+ {\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ __insert_timewait( p_cep );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+ else if( p_cep->state == CEP_STATE_DREQ_SENT )\r
+ {\r
+ /*\r
+ * Make up a DREP mad so we can respond if we receive\r
+ * a DREQ while in timewait.\r
+ */\r
+ __format_mad_hdr( &p_cep->mads.drep.hdr, p_cep, CM_DREP_ATTR_ID );\r
+ __format_drep( p_cep, NULL, 0, &p_cep->mads.drep );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+ break;\r
+ }\r
+\r
+ pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
+ cep_context = p_cep->cep.context;\r
+\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
+ pfn_destroy_cb( cep_context );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__cep_qp_event_cb(\r
+ IN ib_async_event_rec_t *p_event_rec )\r
+{\r
+ UNUSED_PARAM( p_event_rec );\r
+\r
+ /*\r
+ * Most of the QP events are trapped by the real owner of the QP.\r
+ * For real events, the CM may not be able to do much anyways!\r
+ */\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__init_data_svc(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN const ib_port_attr_t* const p_port_attr )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_create_t qp_create;\r
+ ib_mad_svc_t mad_svc;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /*\r
+ * Create the PD alias. We use the port CM's al_obj_t as the context\r
+ * to allow using deref_al_obj as the destroy callback.\r
+ */\r
+ status = ib_alloc_pd( p_port_cep->h_ca, IB_PDT_ALIAS, &p_port_cep->obj,\r
+ &p_port_cep->h_pd );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_alloc_pd failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Reference the port object on behalf of the PD. */\r
+ ref_al_obj( &p_port_cep->obj );\r
+\r
+ /* Create the MAD QP. */\r
+ cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
+ qp_create.qp_type = IB_QPT_QP1_ALIAS;\r
+ qp_create.rq_depth = CEP_MAD_RQ_DEPTH;\r
+ qp_create.sq_depth = CEP_MAD_SQ_DEPTH;\r
+ qp_create.rq_sge = CEP_MAD_RQ_SGE;\r
+ qp_create.sq_sge = CEP_MAD_SQ_SGE;\r
+ qp_create.sq_signaled = TRUE;\r
+ /*\r
+ * We use the port CM's al_obj_t as the context to allow using\r
+ * deref_al_obj as the destroy callback.\r
+ */\r
+ status = ib_get_spl_qp( p_port_cep->h_pd, p_port_attr->port_guid,\r
+ &qp_create, &p_port_cep->obj, __cep_qp_event_cb, &p_port_cep->pool_key,\r
+ &p_port_cep->h_qp );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_spl_qp failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Reference the port object on behalf of the QP. */\r
+ ref_al_obj( &p_port_cep->obj );\r
+\r
+ /* Create the MAD service. */\r
+ cl_memclr( &mad_svc, sizeof(mad_svc) );\r
+ mad_svc.mad_svc_context = p_port_cep;\r
+ mad_svc.pfn_mad_recv_cb = __cep_mad_recv_cb;\r
+ mad_svc.pfn_mad_send_cb = __cep_mad_send_cb;\r
+ mad_svc.support_unsol = TRUE;\r
+ mad_svc.mgmt_class = IB_MCLASS_COMM_MGMT;\r
+ mad_svc.mgmt_version = IB_MCLASS_CM_VER_2;\r
+ mad_svc.method_array[IB_MAD_METHOD_SEND] = TRUE;\r
+ status =\r
+ ib_reg_mad_svc( p_port_cep->h_qp, &mad_svc, &p_port_cep->h_mad_svc );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_reg_mad_svc failed with status %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Performs immediate cleanup of resources.\r
+ */\r
+static void\r
+__destroying_port_cep(\r
+ IN al_obj_t *p_obj )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
+\r
+ if( p_port_cep->port_guid )\r
+ {\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ cl_qmap_remove_item( &gp_cep_mgr->port_map, &p_port_cep->item );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ }\r
+\r
+ if( p_port_cep->h_qp )\r
+ {\r
+ ib_destroy_qp( p_port_cep->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ p_port_cep->h_qp = NULL;\r
+ }\r
+\r
+ if( p_port_cep->h_pd )\r
+ {\r
+ ib_dealloc_pd( p_port_cep->h_pd, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ p_port_cep->h_pd = NULL;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+\r
+/*\r
+ * Release all resources allocated by a port CM agent. Finishes any cleanup\r
+ * for a port agent.\r
+ */\r
+static void\r
+__free_port_cep(\r
+ IN al_obj_t *p_obj )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_port_attr_mod_t port_attr_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
+\r
+ if( p_port_cep->h_ca )\r
+ {\r
+ /* Update local port attributes */\r
+ port_attr_mod.cap.cm = FALSE;\r
+ ib_modify_ca( p_port_cep->h_ca, p_port_cep->port_num,\r
+ IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
+\r
+ deref_al_obj( &p_port_cep->h_ca->obj );\r
+ }\r
+\r
+ destroy_al_obj( &p_port_cep->obj );\r
+ cl_free( p_port_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Create a port agent for a given port.\r
+ */\r
+static ib_api_status_t\r
+__create_port_cep(\r
+ IN ib_pnp_port_rec_t *p_pnp_rec )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_api_status_t status;\r
+ ib_port_attr_mod_t port_attr_mod;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* calculate size of port_cm struct */\r
+ p_port_cep = (cep_agent_t*)cl_zalloc( sizeof(cep_agent_t) );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed to cl_zalloc port CM agent.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &p_port_cep->obj, AL_OBJ_TYPE_CM );\r
+\r
+ status = init_al_obj( &p_port_cep->obj, p_port_cep, TRUE,\r
+ __destroying_port_cep, NULL, __free_port_cep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_port_cep( &p_port_cep->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Attach to the global CM object. */\r
+ status = attach_al_obj( &gp_cep_mgr->obj, &p_port_cep->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ p_port_cep->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
+ p_port_cep->port_num = p_pnp_rec->p_port_attr->port_num;\r
+ p_port_cep->base_lid = p_pnp_rec->p_port_attr->lid;\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ cl_qmap_insert(\r
+ &gp_cep_mgr->port_map, p_port_cep->port_guid, &p_port_cep->item );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ /* Get a reference to the CA on which we are loading. */\r
+ p_port_cep->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
+ if( !p_port_cep->h_ca )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
+ return IB_INVALID_GUID; }\r
+\r
+ status = __init_data_svc( p_port_cep, p_pnp_rec->p_port_attr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__init_data_svc failed with status %s.\n",\r
+ ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Update local port attributes */\r
+ cl_memclr( &port_attr_mod, sizeof(ib_port_attr_mod_t) );\r
+ port_attr_mod.cap.cm = TRUE;\r
+ status = ib_modify_ca( p_port_cep->h_ca, p_pnp_rec->p_port_attr->port_num,\r
+ IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
+\r
+ /* Update the PNP context to reference this port. */\r
+ p_pnp_rec->pnp_rec.context = p_port_cep;\r
+\r
+ /* Release the reference taken in init_al_obj. */\r
+ deref_al_obj( &p_port_cep->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Global CEP manager\r
+******************************************************************************/\r
+\r
+static cep_cid_t*\r
+__get_lcid(\r
+ OUT net32_t* const p_cid )\r
+{\r
+ cl_status_t status;\r
+ uint32_t size, cid;\r
+ cep_cid_t *p_cep_cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ size = (uint32_t)cl_vector_get_size( &gp_cep_mgr->cid_vector );\r
+ cid = gp_cep_mgr->free_cid;\r
+ if( gp_cep_mgr->free_cid == size )\r
+ {\r
+ /* Grow the vector pool. */\r
+ status =\r
+ cl_vector_set_size( &gp_cep_mgr->cid_vector, size + CEP_CID_GROW );\r
+ if( status != CL_SUCCESS )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+ }\r
+ /*\r
+ * Return the the start of the free list since the\r
+ * entry initializer incremented it.\r
+ */\r
+ gp_cep_mgr->free_cid = size;\r
+ }\r
+\r
+ /* Get the next free entry. */\r
+ p_cep_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid );\r
+\r
+ /* Update the next entry index. */\r
+ gp_cep_mgr->free_cid = (uint32_t)(uintn_t)p_cep_cid->p_cep;\r
+\r
+ *p_cid = cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep_cid;\r
+}\r
+\r
+\r
+static inline kcep_t*\r
+__lookup_cep(\r
+ IN ib_al_handle_t h_al OPTIONAL,\r
+ IN net32_t cid )\r
+{\r
+ cep_cid_t *p_cid;\r
+\r
+ /* Mask off the counter bits so we get the index in our vector. */\r
+ cid &= CEP_MAX_CID_MASK;\r
+\r
+ /*\r
+ * Remove the CEP from the CID vector - no further API calls\r
+ * will succeed for it.\r
+ */\r
+ if( cid > cl_vector_get_size( &gp_cep_mgr->cid_vector ) )\r
+ return NULL;\r
+\r
+ p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid );\r
+ if( p_cid->h_al && (!h_al || p_cid->h_al == h_al) )\r
+ return p_cid->p_cep;\r
+\r
+ /* Not the correct owner. */\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * Lookup a CEP by remote comm ID and CA GUID.\r
+ */\r
+static kcep_t*\r
+__lookup_by_id(\r
+ IN net32_t remote_comm_id,\r
+ IN net64_t remote_ca_guid )\r
+{\r
+ cl_rbmap_item_t *p_item;\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( remote_comm_id < p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else if( remote_comm_id > p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_right( p_item );\r
+ else if( remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else if( remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item );\r
+ else\r
+ return p_cep;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * Lookup a CEP by Service ID and private data.\r
+ */\r
+static kcep_t*\r
+__lookup_listen(\r
+ IN net64_t sid,\r
+ IN net64_t port_guid,\r
+ IN uint8_t *p_pdata )\r
+{\r
+ cl_rbmap_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ intn_t cmp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
+\r
+ if( sid == p_cep->sid )\r
+ goto port_cmp;\r
+ else if( sid < p_cep->sid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ continue;\r
+\r
+port_cmp:\r
+ if( p_cep->port_guid != IB_ALL_PORTS )\r
+ {\r
+ if( port_guid == p_cep->port_guid )\r
+ goto pdata_cmp;\r
+ else if( port_guid < p_cep->port_guid )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ continue;\r
+ }\r
+\r
+pdata_cmp:\r
+ if( p_cep->p_cmp_buf && p_pdata )\r
+ {\r
+ cmp = cl_memcmp( &p_pdata[p_cep->cmp_offset],\r
+ p_cep->p_cmp_buf, p_cep->cmp_len );\r
+\r
+ if( !cmp )\r
+ goto match;\r
+ else if( cmp < 0 )\r
+ p_item = cl_rbmap_left( p_item );\r
+ else\r
+ p_item = cl_rbmap_right( p_item );\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Svc ID match but compare buffer mismatch.\n") );\r
+ continue;\r
+ }\r
+\r
+match:\r
+ /* Everything matched. */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+static kcep_t*\r
+__insert_by_id(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( p_new_cep->remote_comm_id < p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_comm_id > p_cep->remote_comm_id )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ goto done;\r
+ }\r
+\r
+ cl_rbmap_insert(\r
+ &gp_cep_mgr->conn_id_map, p_insert_at, &p_new_cep->rem_id_item, left );\r
+ p_cep = p_new_cep;\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static kcep_t*\r
+__insert_by_qpn(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->conn_qp_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_qp_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
+\r
+ if( p_new_cep->remote_qpn < p_cep->remote_qpn )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_qpn > p_cep->remote_qpn )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ goto done;\r
+ }\r
+\r
+ cl_rbmap_insert(\r
+ &gp_cep_mgr->conn_qp_map, p_insert_at, &p_new_cep->rem_qp_item, left );\r
+ p_cep = p_new_cep;\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline kcep_t*\r
+__insert_cep(\r
+ IN kcep_t* const p_new_cep )\r
+{\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = __insert_by_qpn( p_new_cep );\r
+ if( p_cep != p_new_cep )\r
+ goto done;\r
+\r
+ p_cep = __insert_by_id( p_new_cep );\r
+ if( p_cep != p_new_cep )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_qp_map, &p_new_cep->rem_qp_item );\r
+ p_cep->remote_qpn = 0;\r
+ }\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline void\r
+__remove_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( p_cep->remote_comm_id )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_id_map, &p_cep->rem_id_item );\r
+ p_cep->remote_comm_id = 0;\r
+ }\r
+ if( p_cep->remote_qpn )\r
+ {\r
+ cl_rbmap_remove_item(\r
+ &gp_cep_mgr->conn_qp_map, &p_cep->rem_qp_item );\r
+ p_cep->remote_qpn = 0;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static boolean_t\r
+__is_lid_valid(\r
+ IN ib_net16_t lid,\r
+ IN ib_net16_t port_lid,\r
+ IN uint8_t lmc )\r
+{\r
+ uint16_t lid1;\r
+ uint16_t lid2;\r
+ uint16_t path_bits;\r
+\r
+ if(lmc)\r
+ {\r
+ lid1 = CL_NTOH16(lid);\r
+ lid2 = CL_NTOH16(port_lid);\r
+ path_bits = 0;\r
+\r
+ if( lid1 < lid2 )\r
+ return FALSE;\r
+\r
+ while( lmc-- )\r
+ path_bits = (uint16_t)( (path_bits << 1) | 1 );\r
+\r
+ lid2 |= path_bits;\r
+\r
+ if( lid1 > lid2)\r
+ return FALSE;\r
+ }\r
+ else\r
+ {\r
+ if (lid != port_lid)\r
+ return FALSE;\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
+\r
+static inline boolean_t\r
+__is_gid_valid(\r
+ IN const ib_port_attr_t* const p_port_attr,\r
+ IN const ib_gid_t* const p_gid )\r
+{\r
+ uint16_t idx;\r
+\r
+ for( idx = 0; idx < p_port_attr->num_gids; idx++ )\r
+ {\r
+ if( !cl_memcmp(\r
+ p_gid, &p_port_attr->p_gid_table[idx], sizeof(ib_gid_t) ) )\r
+ {\r
+ return TRUE;\r
+ }\r
+ }\r
+ return FALSE;\r
+}\r
+\r
+\r
+static inline boolean_t\r
+__get_pkey_index(\r
+ IN const ib_port_attr_t* const p_port_attr,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index )\r
+{\r
+ uint16_t idx;\r
+\r
+ for( idx = 0; idx < p_port_attr->num_pkeys; idx++ )\r
+ {\r
+ if( p_port_attr->p_pkey_table[idx] == pkey )\r
+ {\r
+ *p_pkey_index = idx;\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+\r
+/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
+static cep_agent_t*\r
+__find_port_cep(\r
+ IN const ib_gid_t* const p_gid,\r
+ IN const net16_t lid,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ cl_list_item_t *p_item;\r
+ const ib_port_attr_t *p_port_attr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ for( p_item = cl_qlist_head( &gp_cep_mgr->obj.obj_list );\r
+ p_item != cl_qlist_end( &gp_cep_mgr->obj.obj_list );\r
+ p_item = cl_qlist_next( p_item ) )\r
+ {\r
+ p_port_cep = PARENT_STRUCT( p_item, cep_agent_t, obj.pool_item );\r
+\r
+ CL_ASSERT( p_port_cep->port_num );\r
+\r
+ ci_ca_lock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+\r
+ p_port_attr = p_port_cep->h_ca->obj.p_ci_ca->p_pnp_attr->p_port_attr;\r
+ p_port_attr += (p_port_cep->port_num - 1);\r
+\r
+ if( __is_lid_valid( lid, p_port_attr->lid, p_port_attr->lmc ) &&\r
+ __is_gid_valid( p_port_attr, p_gid ) &&\r
+ __get_pkey_index( p_port_attr, pkey, p_pkey_index ) )\r
+ {\r
+ ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_port_cep;\r
+ }\r
+\r
+ ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
+ }\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * PnP callback for port event notifications.\r
+ */\r
+static ib_api_status_t\r
+__cep_pnp_cb(\r
+ IN ib_pnp_rec_t *p_pnp_rec )\r
+{\r
+ ib_api_status_t status = IB_SUCCESS;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ switch( p_pnp_rec->pnp_event )\r
+ {\r
+ case IB_PNP_PORT_ADD:\r
+ /* Create the port agent. */\r
+ CL_ASSERT( !p_pnp_rec->context );\r
+ status = __create_port_cep( (ib_pnp_port_rec_t*)p_pnp_rec );\r
+ break;\r
+\r
+ case IB_PNP_PORT_REMOVE:\r
+ CL_ASSERT( p_pnp_rec->context );\r
+\r
+ /* Destroy the port agent. */\r
+ ref_al_obj( &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj );\r
+ ((cep_agent_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy(\r
+ &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj, NULL );\r
+ break;\r
+\r
+ default:\r
+ break; /* Ignore other PNP events. */\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static inline int64_t\r
+__min_timewait(\r
+ IN int64_t current_min,\r
+ IN kcep_t* const p_cep )\r
+{\r
+ /*\r
+ * The minimum timer interval is 50 milliseconds. This means\r
+ * 500000 100ns increments. Since __process_timewait divides the\r
+ * result in half (so that the worst cast timewait interval is 150%)\r
+ * we compensate for this here. Note that relative time values are\r
+ * expressed as negative.\r
+ */\r
+#define MIN_TIMEWAIT_100NS -1000000\r
+\r
+ /* Still in timewait - try again next time. */\r
+ if( !current_min )\r
+ {\r
+ return min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS );\r
+ }\r
+ else\r
+ {\r
+ return max( current_min,\r
+ min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ) );\r
+ }\r
+}\r
+\r
+\r
+/*\r
+ * Timer callback to process CEPs in timewait state. Returns time in ms.\r
+ */\r
+static uint32_t\r
+__process_timewait()\r
+{\r
+ cl_list_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ LARGE_INTEGER timeout;\r
+ int64_t min_timewait = 0;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ timeout.QuadPart = 0;\r
+\r
+ p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
+ while( p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
+ p_item = cl_qlist_next( p_item );\r
+\r
+ CL_ASSERT( p_cep->state == CEP_STATE_DESTROY ||\r
+ p_cep->state == CEP_STATE_TIMEWAIT );\r
+\r
+ CL_ASSERT( !p_cep->p_mad );\r
+\r
+ if( KeWaitForSingleObject( &p_cep->timewait_timer, Executive,\r
+ KernelMode, FALSE, &timeout ) != STATUS_SUCCESS )\r
+ {\r
+ /* Still in timewait - try again next time. */\r
+ min_timewait = __min_timewait( min_timewait, p_cep );\r
+ continue;\r
+ }\r
+\r
+ if( p_cep->ref_cnt )\r
+ {\r
+ /* Send outstanding or destruction in progress. */\r
+ min_timewait = __min_timewait( min_timewait, p_cep );\r
+ continue;\r
+ }\r
+\r
+ /* Remove from the timewait list. */\r
+ cl_qlist_remove_item( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
+\r
+ /*\r
+ * Not in timewait. Remove the CEP from the maps - it should\r
+ * no longer be matched against.\r
+ */\r
+ __remove_cep( p_cep );\r
+\r
+ if( p_cep->state == CEP_STATE_DESTROY )\r
+ {\r
+ __destroy_cep( p_cep );\r
+ }\r
+ else\r
+ {\r
+ /* Move the CEP to the IDLE state so that it can be used again. */\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return (uint32_t)(min_timewait / -20000);\r
+}\r
+\r
+\r
+/*\r
+ * Timer callback to process CEPs in timewait state.\r
+ */\r
+static void\r
+__cep_timewait_cb(\r
+ IN void *context )\r
+{\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ uint32_t min_timewait;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+\r
+ min_timewait = __process_timewait();\r
+\r
+ if( cl_qlist_count( &gp_cep_mgr->timewait_list ) )\r
+ {\r
+ /*\r
+ * Reset the timer for half of the shortest timeout - this results\r
+ * in a worst case timeout of 150% of timewait.\r
+ */\r
+ cl_timer_trim( &gp_cep_mgr->timewait_timer, min_timewait );\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Starts immediate cleanup of the CM. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__destroying_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ ib_api_status_t status;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cl_list_item_t *p_item;\r
+ kcep_t *p_cep;\r
+ LARGE_INTEGER timeout;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+ UNUSED_PARAM( p_obj );\r
+\r
+ /* Deregister from PnP notifications. */\r
+ if( gp_cep_mgr->h_pnp )\r
+ {\r
+ status = ib_dereg_pnp(\r
+ gp_cep_mgr->h_pnp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ ("ib_dereg_pnp failed with status %s.\n",\r
+ ib_get_err_str(status)) );\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+ }\r
+ }\r
+\r
+ /* Cancel all timewait timers. */\r
+ timeout.QuadPart = 0;\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ for( p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
+ p_item != cl_qlist_end( &gp_cep_mgr->timewait_list );\r
+ p_item = cl_qlist_next( p_item ) )\r
+ {\r
+ p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
+ KeSetTimer( &p_cep->timewait_timer, timeout, NULL );\r
+ }\r
+ __process_timewait();\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Frees the global CEP agent. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__free_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+ /* All listen request should have been cleaned up by this point. */\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->listen_map ) );\r
+ /* All connections should have been cancelled/disconnected by now. */\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_id_map ) );\r
+ CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_qp_map ) );\r
+\r
+ cl_vector_destroy( &gp_cep_mgr->cid_vector );\r
+\r
+ cl_timer_destroy( &gp_cep_mgr->timewait_timer );\r
+\r
+ /*\r
+ * All CM port agents should have been destroyed by now via the\r
+ * standard child object destruction provided by the al_obj.\r
+ */\r
+ ExDeleteNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
+ destroy_al_obj( p_obj );\r
+\r
+ cl_free( gp_cep_mgr );\r
+ gp_cep_mgr = NULL;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static cl_status_t\r
+__cid_init(\r
+ IN void* const p_element,\r
+ IN void* context )\r
+{\r
+ cep_cid_t *p_cid;\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ p_cid = (cep_cid_t*)p_element;\r
+\r
+ p_cid->h_al = NULL;\r
+ p_cid->p_cep = (kcep_t*)(uintn_t)++gp_cep_mgr->free_cid;\r
+ p_cid->modifier = 0;\r
+\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Allocates and initialized the global CM agent.\r
+ */\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj )\r
+{\r
+ ib_api_status_t status;\r
+ cl_status_t cl_status;\r
+ ib_pnp_req_t pnp_req;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( gp_cep_mgr == NULL );\r
+\r
+ /* Allocate the global CM agent. */\r
+ gp_cep_mgr = (al_cep_mgr_t*)cl_zalloc( sizeof(al_cep_mgr_t) );\r
+ if( !gp_cep_mgr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed allocation of global CM agent.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM );\r
+ ExInitializeNPagedLookasideList( &gp_cep_mgr->cep_pool, NULL, NULL,\r
+ 0, sizeof(kcep_t), 'PECK', 0 );\r
+ cl_qmap_init( &gp_cep_mgr->port_map );\r
+ cl_rbmap_init( &gp_cep_mgr->listen_map );\r
+ cl_rbmap_init( &gp_cep_mgr->conn_id_map );\r
+ cl_rbmap_init( &gp_cep_mgr->conn_qp_map );\r
+ cl_qlist_init( &gp_cep_mgr->timewait_list );\r
+ /* Timer initialization can't fail in kernel-mode. */\r
+ cl_timer_init( &gp_cep_mgr->timewait_timer, __cep_timewait_cb, NULL );\r
+ cl_vector_construct( &gp_cep_mgr->cid_vector );\r
+\r
+ status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE,\r
+ __destroying_cep_mgr, NULL, __free_cep_mgr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_cep_mgr( &gp_cep_mgr->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Attach to the parent object. */\r
+ status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cl_status = cl_vector_init( &gp_cep_mgr->cid_vector,\r
+ CEP_CID_MIN, CEP_CID_GROW, sizeof(cep_cid_t), __cid_init, NULL, NULL );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cl_vector_init failed with status %s.\n",\r
+ CL_STATUS_MSG(cl_status)) );\r
+ return ib_convert_cl_status( cl_status );\r
+ }\r
+\r
+ gp_cep_mgr->free_cid = 0;\r
+\r
+ /* Register for port PnP notifications. */\r
+ cl_memclr( &pnp_req, sizeof(pnp_req) );\r
+ pnp_req.pnp_class = IB_PNP_PORT;\r
+ pnp_req.pnp_context = &gp_cep_mgr->obj;\r
+ pnp_req.pfn_pnp_cb = __cep_pnp_cb;\r
+ status = ib_reg_pnp( gh_al, &pnp_req, &gp_cep_mgr->h_pnp );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_reg_pnp failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /*\r
+ * Leave the reference taken in init_al_obj oustanding since PnP\r
+ * deregistration is asynchronous. This replaces a call to ref and\r
+ * deref the object.\r
+ */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+/******************************************************************************\r
+* CEP manager API\r
+******************************************************************************/\r
+\r
+/* Called with the CEP and CEP manager locks held */\r
+static ib_api_status_t\r
+__cep_queue_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( !p_mad->p_next );\r
+\r
+ if( p_cep->state == CEP_STATE_DESTROY )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_STATE;\r
+ }\r
+\r
+ /* Queue this MAD for processing. */\r
+ if( p_cep->p_mad_head )\r
+ {\r
+ CL_ASSERT( p_cep->signalled );\r
+ /*\r
+ * If there's already a MAD at the head of the list, we will not\r
+ * invoke the callback. Just queue and exit.\r
+ */\r
+ CL_ASSERT( p_cep->p_mad_tail );\r
+ p_cep->p_mad_tail->p_next = p_mad;\r
+ p_cep->p_mad_tail = p_mad;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_PENDING;\r
+ }\r
+\r
+ p_cep->p_mad_head = p_mad;\r
+ p_cep->p_mad_tail = p_mad;\r
+\r
+ if( p_cep->signalled )\r
+ {\r
+ /* signalled was already non-zero. Don't invoke the callback again. */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_PENDING;\r
+ }\r
+\r
+ p_cep->signalled = TRUE;\r
+\r
+ /* Take a reference since we're about to invoke the callback. */\r
+ cl_atomic_inc( &p_cep->ref_cnt );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static inline void\r
+__process_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+ void *context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ /* Signal to the user there are callback waiting. */\r
+ p_cep->pfn_cb( p_cep->p_cid->h_al, &p_cep->cep );\r
+\r
+ pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
+ context = p_cep->cep.context;\r
+\r
+ /*\r
+ * Release the reference for the callback and invoke the destroy\r
+ * callback if necessary.\r
+ */\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
+ pfn_destroy_cb( context );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static uint32_t\r
+__calc_mad_timeout(\r
+ IN const uint8_t pkt_life )\r
+{\r
+ /*\r
+ * Calculate the retry timeout.\r
+ * All timeout values in micro seconds are expressed as 4.096 * 2^x,\r
+ * where x is the timeout. The formula to approximates this to\r
+ * milliseconds using just shifts and subtraction is:\r
+ * timeout_ms = 67 << (x - 14)\r
+ * The results are off by 0.162%.\r
+ *\r
+ * Note that we will never return less than 1 millisecond.\r
+ */\r
+ if( pkt_life > 14 )\r
+ return 67 << (pkt_life - 14);\r
+ else if( pkt_life > 8 )\r
+ return 67 >> (14 - pkt_life);\r
+ else\r
+ return 1;\r
+}\r
+\r
+\r
+/* CEP manager lock is held when calling this function. */\r
+static kcep_t*\r
+__create_cep()\r
+{\r
+ kcep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = ExAllocateFromNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
+ return NULL;\r
+ }\r
+\r
+ cl_memclr( p_cep, sizeof(kcep_t) );\r
+\r
+ KeInitializeTimer( &p_cep->timewait_timer );\r
+\r
+ p_cep->state = CEP_STATE_IDLE;\r
+\r
+ /*\r
+ * Pre-charge the reference count to 1. The code will invoke the\r
+ * destroy callback once the ref count reaches to zero.\r
+ */\r
+ p_cep->ref_cnt = 1;\r
+ p_cep->signalled = FALSE;\r
+\r
+ /* Find a free entry in the CID vector. */\r
+ p_cep->p_cid = __get_lcid( &p_cep->cep.cid );\r
+\r
+ if( !p_cep->p_cid )\r
+ {\r
+ ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to get CID.\n") );\r
+ return NULL;\r
+ }\r
+\r
+ p_cep->p_cid->modifier++;\r
+ /*\r
+ * We don't ever want a modifier of zero for the CID at index zero\r
+ * since it would result in a total CID of zero.\r
+ */\r
+ if( !p_cep->cep.cid && !p_cep->p_cid->modifier )\r
+ p_cep->p_cid->modifier++;\r
+\r
+ p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24);\r
+ p_cep->tid = p_cep->local_comm_id;\r
+\r
+ p_cep->p_cid->p_cep = p_cep;\r
+\r
+ ref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_cep;\r
+}\r
+\r
+\r
+static inline void\r
+__bind_cep(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context )\r
+{\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cep->p_cid );\r
+ CL_ASSERT( h_al );\r
+\r
+ p_cep->p_cid->h_al = h_al;\r
+ p_cep->pfn_cb = pfn_cb;\r
+ p_cep->cep.context = context;\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+}\r
+\r
+\r
+static inline void\r
+__unbind_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cep->p_cid );\r
+ CL_ASSERT( p_cep->p_cid->h_al );\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &p_cep->p_cid->h_al->obj.lock );\r
+ cl_qlist_remove_item( &p_cep->p_cid->h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &p_cep->p_cid->h_al->obj.lock );\r
+\r
+ /*\r
+ * Set to the internal AL handle - it needs to be non-NULL to indicate it's\r
+ * a valid entry, and it can't be a user's AL instance to prevent using a\r
+ * destroyed CEP.\r
+ */\r
+ p_cep->p_cid->h_al = gh_al;\r
+#ifdef _DEBUG_\r
+ p_cep->pfn_cb = NULL;\r
+#endif /* _DEBUG_ */\r
+}\r
+\r
+\r
+static inline void\r
+__calc_timewait(\r
+ IN kcep_t* const p_cep )\r
+{\r
+\r
+ /*\r
+ * Use the CEP's stored packet lifetime to calculate the time at which\r
+ * the CEP exits timewait. Packet lifetime is expressed as\r
+ * 4.096 * 2^pkt_life microseconds, and we need a timeout in 100ns\r
+ * increments. The formual using just shifts and subtraction is this:\r
+ * timeout = (41943 << (pkt_life - 10));\r
+ * The results are off by .0001%, which should be more than adequate.\r
+ */\r
+ if( p_cep->max_2pkt_life > 10 )\r
+ {\r
+ p_cep->timewait_time.QuadPart =\r
+ -(41943i64 << (p_cep->max_2pkt_life - 10));\r
+ }\r
+ else\r
+ {\r
+ p_cep->timewait_time.QuadPart =\r
+ -(41943i64 >> (10 - p_cep->max_2pkt_life));\r
+ }\r
+ if( p_cep->target_ack_delay > 10 )\r
+ {\r
+ p_cep->timewait_time.QuadPart -=\r
+ (41943i64 << (p_cep->target_ack_delay - 10));\r
+ }\r
+ else\r
+ {\r
+ p_cep->timewait_time.QuadPart -=\r
+ (41943i64 >> (10 - p_cep->target_ack_delay));\r
+ }\r
+}\r
+\r
+\r
+/* Called with CEP manager and CEP locks held. */\r
+static inline void\r
+__insert_timewait(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cl_qlist_insert_tail( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
+\r
+ KeSetTimer( &p_cep->timewait_timer, p_cep->timewait_time, NULL );\r
+\r
+ /*\r
+ * Reset the timer for half of the shortest timeout - this results\r
+ * in a worst case timeout of 150% of timewait.\r
+ */\r
+ cl_timer_trim( &gp_cep_mgr->timewait_timer,\r
+ (uint32_t)(-p_cep->timewait_time.QuadPart / 20000) );\r
+}\r
+\r
+\r
+static inline ib_api_status_t\r
+__do_cep_rej(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ return IB_INSUFFICIENT_RESOURCES;\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ status = conn_rej_set_ari(\r
+ p_ari, ari_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ status = conn_rej_set_pdata(\r
+ p_pdata, pdata_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ __reject_mad( p_port_cep, p_cep, p_mad, rej_status );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_get_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN net16_t attr_id,\r
+ OUT cep_agent_t** const pp_port_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ __format_mad_av( *pp_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ __format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id );\r
+\r
+ *pp_port_cep = p_port_cep;\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_dreq(\r
+ IN kcep_t* const p_cep, \r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_dreq_t *p_dreq;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
+\r
+ p_dreq->local_comm_id = p_cep->local_comm_id;\r
+ p_dreq->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ conn_dreq_set_remote_qpn( p_cep->remote_qpn, p_dreq );\r
+\r
+ /* copy optional data */\r
+ status = conn_dreq_set_pdata( p_pdata, pdata_len, p_dreq );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__dreq_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ cep_agent_t *p_agt;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_agt, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ status = __format_dreq( p_cep, NULL, 0, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ return status;\r
+\r
+ return __cep_send_retry( p_agt, p_cep, p_mad );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_drep(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT mad_cm_drep_t* const p_drep )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_drep->local_comm_id = p_cep->local_comm_id;\r
+ p_drep->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ /* copy optional data */\r
+ status = conn_drep_set_pdata( p_pdata, pdata_len, p_drep );\r
+\r
+ /* Store the RTU MAD so we can repeat it if we get a repeated DREP. */\r
+ if( status == IB_SUCCESS && p_drep != &p_cep->mads.drep )\r
+ p_cep->mads.drep = *p_drep;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__drep_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ cep_agent_t *p_agt;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_agt, &p_mad ) != IB_SUCCESS )\r
+ return;\r
+\r
+ if( __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf )\r
+ != IB_SUCCESS )\r
+ {\r
+ return;\r
+ }\r
+\r
+ __cep_send_mad( p_agt, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/* Called with CEP manager lock held. */\r
+static int32_t\r
+__cleanup_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ ib_mad_element_t *p_mad;\r
+ kcep_t *p_new_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ /* If we've already come through here, we're done. */\r
+ if( p_cep->state == CEP_STATE_DESTROY ||\r
+ p_cep->state == CEP_STATE_DREQ_DESTROY )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return -1;\r
+ }\r
+\r
+ /* Cleanup the pending MAD list. */\r
+ while( p_cep->p_mad_head )\r
+ {\r
+ p_mad = p_cep->p_mad_head;\r
+ p_cep->p_mad_head = p_mad->p_next;\r
+ p_mad->p_next = NULL;\r
+ if( p_mad->send_context1 )\r
+ {\r
+ p_new_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
+\r
+ __unbind_cep( p_new_cep );\r
+ __cleanup_cep( p_new_cep );\r
+ }\r
+ ib_put_mad( p_mad );\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ /* Reject the connection. */\r
+ __do_cep_rej( p_cep, IB_REJ_USER_DEFINED, NULL, 0, NULL, 0 );\r
+ break;\r
+\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel the send. */\r
+ CL_ASSERT( p_cep->h_mad_svc );\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ /* Reject the connection. */\r
+ __do_cep_rej( p_cep, IB_REJ_TIMEOUT, NULL, 0, NULL, 0 );\r
+ break;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ /* Disconnect the connection. */\r
+ if( __dreq_cep( p_cep ) != IB_SUCCESS )\r
+ break;\r
+ /* Fall through. */\r
+\r
+ case CEP_STATE_DREQ_SENT:\r
+ p_cep->state = CEP_STATE_DREQ_DESTROY;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+\r
+ case CEP_STATE_DREQ_RCVD:\r
+ /* Send the DREP. */\r
+ __drep_cep( p_cep );\r
+ break;\r
+\r
+ case CEP_STATE_SREQ_RCVD:\r
+ /* TODO: Reject the SIDR request. */\r
+ break;\r
+\r
+ case CEP_STATE_LISTEN:\r
+ /* Remove from listen map. */\r
+ cl_rbmap_remove_item( &gp_cep_mgr->listen_map, &p_cep->listen_item );\r
+ break;\r
+\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR, ("CEP in state %d.\n", p_cep->state) );\r
+ case CEP_STATE_TIMEWAIT:\r
+ /* Already in timewait - so all is good. */\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_DESTROY;\r
+ __insert_timewait( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return cl_atomic_dec( &p_cep->ref_cnt );\r
+}\r
+\r
+\r
+static void\r
+__destroy_cep(\r
+ IN kcep_t* const p_cep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT(\r
+ p_cep->cep.cid < cl_vector_get_size( &gp_cep_mgr->cid_vector ) );\r
+\r
+ CL_ASSERT( p_cep->p_cid == (cep_cid_t*)cl_vector_get_ptr(\r
+ &gp_cep_mgr->cid_vector, p_cep->cep.cid ) );\r
+\r
+ /* Free the CID. */\r
+ p_cep->p_cid->p_cep = (kcep_t*)(uintn_t)gp_cep_mgr->free_cid;\r
+ p_cep->p_cid->h_al = NULL;\r
+ gp_cep_mgr->free_cid = p_cep->cep.cid;\r
+\r
+ KeCancelTimer( &p_cep->timewait_timer );\r
+\r
+ ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
+\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cid );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __create_cep();\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ __bind_cep( p_cep, h_al, pfn_cb, context );\r
+\r
+ *p_cid = p_cep->cep.cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ void *context;\r
+ int32_t ref_cnt;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ /*\r
+ * Remove the CEP from the CID vector - no further API calls\r
+ * will succeed for it.\r
+ */\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ /* Invalid handle. */\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ context = p_cep->cep.context;\r
+\r
+ __unbind_cep( p_cep );\r
+ ref_cnt = __cleanup_cep( p_cep );\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ /*\r
+ * Done waiting. Release the reference so the timewait timer callback\r
+ * can finish cleaning up.\r
+ */\r
+ if( !ref_cnt && pfn_destroy_cb )\r
+ pfn_destroy_cb( context );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep, *p_listen;\r
+ cl_rbmap_item_t *p_item, *p_insert_at;\r
+ boolean_t left = TRUE;\r
+ intn_t cmp;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_listen_info );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ break;\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ goto done;\r
+ }\r
+\r
+ /* Insert the CEP into the listen map. */\r
+ p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
+ p_insert_at = p_item;\r
+ while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ p_insert_at = p_item;\r
+\r
+ p_listen = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
+\r
+ if( p_listen_info->svc_id == p_listen->sid )\r
+ goto port_cmp;\r
+ \r
+ if( p_listen_info->svc_id < p_listen->sid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+\r
+ continue;\r
+\r
+port_cmp:\r
+ if( p_listen_info->port_guid != IB_ALL_PORTS )\r
+ {\r
+ if( p_listen_info->port_guid == p_listen->port_guid )\r
+ goto pdata_cmp;\r
+ \r
+ if( p_listen_info->port_guid < p_listen->port_guid )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+\r
+ continue;\r
+ }\r
+\r
+pdata_cmp:\r
+ /*\r
+ * If an existing listen doesn't have a compare buffer,\r
+ * then we found a duplicate.\r
+ */\r
+ if( !p_listen->p_cmp_buf )\r
+ break;\r
+\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ /* Compare length must match. */\r
+ if( p_listen_info->cmp_len != p_listen->cmp_len )\r
+ break;\r
+\r
+ /* Compare offset must match. */\r
+ if( p_listen_info->cmp_offset != p_listen->cmp_offset )\r
+ break;\r
+\r
+ cmp = cl_memcmp( &p_listen_info->p_cmp_buf,\r
+ p_listen->p_cmp_buf, p_listen->cmp_len );\r
+\r
+ if( cmp < 0 )\r
+ p_item = cl_rbmap_left( p_item ), left = TRUE;\r
+ else if( cmp > 0 )\r
+ p_item = cl_rbmap_right( p_item ), left = FALSE;\r
+ else\r
+ break;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Svc ID match but compare buffer mismatch.\n") );\r
+ continue;\r
+ }\r
+ }\r
+\r
+ if( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
+ {\r
+ /* Duplicate!!! */\r
+ status = IB_INVALID_SETTING;\r
+ goto done;\r
+ }\r
+\r
+ /* Set up the CEP. */\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ p_cep->p_cmp_buf = cl_malloc( p_listen_info->cmp_len );\r
+ if( !p_cep->p_cmp_buf )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Failed to allocate compare buffer.\n") );\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto done;\r
+ }\r
+\r
+ cl_memcpy( p_cep->p_cmp_buf,\r
+ p_listen_info->p_cmp_buf, p_listen_info->cmp_len );\r
+ }\r
+ p_cep->cmp_len = p_listen_info->cmp_len;\r
+ p_cep->cmp_offset = p_listen_info->cmp_offset;\r
+ p_cep->sid = p_listen_info->svc_id;\r
+ p_cep->port_guid = p_listen_info->port_guid;\r
+ p_cep->state = CEP_STATE_LISTEN;\r
+\r
+ cl_rbmap_insert( &gp_cep_mgr->listen_map, p_insert_at,\r
+ &p_cep->listen_item, left );\r
+\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static cep_agent_t*\r
+__format_path_av(\r
+ IN const ib_path_rec_t* const p_path,\r
+ OUT kcep_av_t* const p_av )\r
+{\r
+ cep_agent_t* p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_path );\r
+ CL_ASSERT( p_av );\r
+\r
+ cl_memclr( p_av, sizeof(kcep_av_t) );\r
+\r
+ p_port_cep = __find_port_cep( &p_path->sgid, p_path->slid,\r
+ p_path->pkey, &p_av->pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return NULL;\r
+ }\r
+\r
+ p_av->port_guid = p_port_cep->port_guid;\r
+\r
+ p_av->attr.port_num = p_port_cep->port_num;\r
+\r
+ p_av->attr.sl = ib_path_rec_sl( p_path );\r
+ p_av->attr.dlid = p_path->dlid;\r
+\r
+ p_av->attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, p_path->tclass, ib_path_rec_flow_lbl( p_path ) );\r
+ p_av->attr.grh.hop_limit = ib_path_rec_hop_limit( p_path );\r
+ p_av->attr.grh.src_gid = p_path->sgid;\r
+ p_av->attr.grh.dest_gid = p_path->dgid;\r
+\r
+ p_av->attr.grh_valid = !ib_gid_is_link_local( &p_path->dgid );\r
+\r
+ p_av->attr.static_rate = ib_path_rec_rate( p_path );\r
+ p_av->attr.path_bits = (uint8_t)(p_path->slid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users.\r
+ */\r
+ p_av->attr.conn.path_mtu = p_path->mtu;\r
+ p_av->attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ ib_path_rec_pkt_life( p_path ) + 1, 0 );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return p_port_cep;\r
+}\r
+\r
+\r
+/*\r
+ * Formats a REQ mad's path information given a path record.\r
+ */\r
+static void\r
+__format_req_path(\r
+ IN const ib_path_rec_t* const p_path,\r
+ IN const uint8_t ack_delay,\r
+ OUT req_path_info_t* const p_req_path )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_req_path->local_lid = p_path->slid;\r
+ p_req_path->remote_lid = p_path->dlid;\r
+ p_req_path->local_gid = p_path->sgid;\r
+ p_req_path->remote_gid = p_path->dgid;\r
+\r
+ conn_req_path_set_flow_lbl( ib_path_rec_flow_lbl( p_path ),\r
+ p_req_path );\r
+ conn_req_path_set_pkt_rate( ib_path_rec_rate( p_path ),\r
+ p_req_path );\r
+\r
+ /* Traffic class & hop limit */\r
+ p_req_path->traffic_class = p_path->tclass;\r
+ p_req_path->hop_limit = ib_path_rec_hop_limit( p_path );\r
+\r
+ /* SL & Subnet Local fields */\r
+ conn_req_path_set_svc_lvl( ib_path_rec_sl( p_path ),\r
+ p_req_path );\r
+ conn_req_path_set_subn_lcl(\r
+ ib_gid_is_link_local( &p_path->dgid ), p_req_path );\r
+\r
+ conn_req_path_set_lcl_ack_timeout(\r
+ calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_path ) + 1,\r
+ ack_delay ), p_req_path );\r
+\r
+ conn_req_path_clr_rsvd_fields( p_req_path );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_req(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_req_t* p_req;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cm_req );\r
+ CL_ASSERT( p_cep->p_mad );\r
+\r
+ /* Format the MAD header. */\r
+ __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REQ_ATTR_ID );\r
+\r
+ /* Set the addressing information in the MAD. */\r
+ __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_req = (mad_cm_req_t*)p_cep->p_mad->p_mad_buf;\r
+\r
+ ci_ca_lock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
+ /*\r
+ * Store the local CA's ack timeout for use when computing\r
+ * the local ACK timeout.\r
+ */\r
+ p_cep->local_ack_delay =\r
+ p_cm_req->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay;\r
+ ci_ca_unlock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
+\r
+ /* Format the primary path. */\r
+ __format_req_path( p_cm_req->p_primary_path,\r
+ p_cep->local_ack_delay, &p_req->primary_path );\r
+\r
+ if( p_cm_req->p_alt_path )\r
+ {\r
+ /* Format the alternate path. */\r
+ __format_req_path( p_cm_req->p_alt_path,\r
+ p_cep->local_ack_delay, &p_req->alternate_path );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_req->alternate_path, sizeof(req_path_info_t) );\r
+ }\r
+\r
+ /* Set the local communication in the REQ. */\r
+ p_req->local_comm_id = p_cep->local_comm_id;\r
+ p_req->sid = p_cm_req->svc_id;\r
+ p_req->local_ca_guid = p_cm_req->h_qp->obj.p_ci_ca->verbs.guid;\r
+\r
+ conn_req_set_lcl_qpn( p_cep->local_qpn, p_req );\r
+ conn_req_set_resp_res( p_cm_req->resp_res, p_req );\r
+ conn_req_set_init_depth( p_cm_req->init_depth, p_req );\r
+ conn_req_set_remote_resp_timeout( p_cm_req->remote_resp_timeout, p_req );\r
+ conn_req_set_qp_type( p_cm_req->h_qp->type, p_req );\r
+ conn_req_set_flow_ctrl( p_cm_req->flow_ctrl, p_req );\r
+ conn_req_set_starting_psn( p_cep->rq_psn, p_req );\r
+\r
+ conn_req_set_lcl_resp_timeout( p_cm_req->local_resp_timeout, p_req );\r
+ conn_req_set_retry_cnt( p_cm_req->retry_cnt, p_req );\r
+\r
+ p_req->pkey = p_cm_req->p_primary_path->pkey;\r
+\r
+ conn_req_set_mtu( ib_path_rec_mtu( p_cm_req->p_primary_path ), p_req );\r
+ conn_req_set_rnr_retry_cnt( p_cm_req->rnr_retry_cnt, p_req );\r
+\r
+ conn_req_set_max_cm_retries( p_cm_req->max_cm_retries, p_req );\r
+ status = conn_req_set_pdata(\r
+ p_cm_req->p_req_pdata, p_cm_req->req_length, p_req );\r
+\r
+ conn_req_clr_rsvd_fields( p_req );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__save_user_req(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT cep_agent_t** const pp_port_cep )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_req->p_primary_path )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid primary path record.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->sid = p_cm_req->svc_id;\r
+\r
+ p_cep->idx_primary = 0;\r
+\r
+ p_cep->p2p = (p_cm_req->pfn_cm_req_cb != NULL);\r
+\r
+ if( p_cm_req->p_compare_buffer )\r
+ {\r
+ if( !p_cm_req->compare_length ||\r
+ (p_cm_req->compare_offset + p_cm_req->compare_length) >\r
+ IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+ p_cep->p_cmp_buf = cl_malloc( p_cm_req->compare_length );\r
+ if( !p_cep->p_cmp_buf )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ cl_memcpy( p_cep->p_cmp_buf,\r
+ p_cm_req->p_compare_buffer, p_cm_req->compare_length );\r
+\r
+ p_cep->cmp_len = p_cm_req->compare_length;\r
+ p_cep->cmp_offset = p_cm_req->compare_offset;\r
+ }\r
+ else\r
+ {\r
+ p_cep->p_cmp_buf = NULL;\r
+ p_cep->cmp_len = 0;\r
+ p_cep->cmp_offset = 0;\r
+ }\r
+ p_cep->was_active = TRUE;\r
+\r
+ /* Validate the primary path. */\r
+ p_port_cep = __format_path_av( p_cm_req->p_primary_path, &p_cep->av[0] );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Primary path unrealizable.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->av[0].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
+\r
+ /* Make sure the paths will work on the desired QP. */\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid !=\r
+ p_cm_req->h_qp->obj.p_ci_ca->verbs.guid )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Primary path not realizable on given QP.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
+\r
+ *pp_port_cep = p_port_cep;\r
+\r
+ /*\r
+ * Store the PKEY so we can ensure that alternate paths are\r
+ * on the same partition.\r
+ */\r
+ p_cep->pkey = p_cm_req->p_primary_path->pkey;\r
+ \r
+ p_cep->max_2pkt_life = ib_path_rec_pkt_life( p_cm_req->p_primary_path ) + 1;\r
+\r
+ if( p_cm_req->p_alt_path )\r
+ {\r
+ /* MTUs must match since they are specified only once. */\r
+ if( ib_path_rec_mtu( p_cm_req->p_primary_path ) !=\r
+ ib_path_rec_mtu( p_cm_req->p_alt_path ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Mismatched primary and alternate path MTUs.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ /* The PKEY must match too. */\r
+ if( p_cm_req->p_alt_path->pkey != p_cm_req->p_primary_path->pkey )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Mismatched pimary and alternate PKEYs.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_port_cep =\r
+ __format_path_av( p_cm_req->p_alt_path, &p_cep->av[1] );\r
+ if( p_port_cep &&\r
+ p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ /* Alternate path is not on same CA. */\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Alternate path unrealizable.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ p_cep->av[1].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
+\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
+ (ib_path_rec_pkt_life( p_cm_req->p_alt_path ) + 1) );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+ }\r
+\r
+ p_cep->p_cid->modifier++;\r
+ /*\r
+ * We don't ever want a modifier of zero for the CID at index zero\r
+ * since it would result in a total CID of zero.\r
+ */\r
+ if( !p_cep->cep.cid && !p_cep->p_cid->modifier )\r
+ p_cep->p_cid->modifier++;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24);\r
+ p_cep->remote_comm_id = 0;\r
+\r
+ /* Cache the local QPN. */\r
+ p_cep->local_qpn = p_cm_req->h_qp->num;\r
+ p_cep->remote_ca_guid = 0;\r
+ p_cep->remote_qpn = 0;\r
+\r
+ /* Retry timeout is remote CM response timeout plus 2 * packet life. */\r
+ p_cep->retry_timeout = __calc_mad_timeout( p_cep->max_2pkt_life ) +\r
+ __calc_mad_timeout( p_cm_req->remote_resp_timeout );\r
+ \r
+\r
+ /* Store the retry count. */\r
+ p_cep->max_cm_retries = p_cm_req->max_cm_retries;\r
+\r
+ /*\r
+ * Clear the maximum packet lifetime, used to calculate timewait.\r
+ * It will be set when we transition into the established state.\r
+ */\r
+ p_cep->timewait_time.QuadPart = 0;\r
+\r
+ p_cep->rq_psn = p_cep->local_qpn;\r
+\r
+ p_cep->rnr_nak_timeout = p_cm_req->rnr_nak_timeout;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_req );\r
+ CL_ASSERT( p_init );\r
+\r
+ /* TODO: Code P2P support. */\r
+ if( p_cm_req->pfn_cm_req_cb )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_IDLE:\r
+ status = __save_user_req( p_cep, p_cm_req, &p_port_cep );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status =\r
+ ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_req( p_cep, p_cm_req );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid pdata length.\n") );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ /* Format the INIT qp modify attributes. */\r
+ p_init->req_state = IB_QPS_INIT;\r
+ p_init->state.init.primary_port =\r
+ p_cep->av[p_cep->idx_primary].attr.port_num;\r
+ p_init->state.init.qkey = 0;\r
+ p_init->state.init.pkey_index =\r
+ p_cep->av[p_cep->idx_primary].pkey_index;\r
+ p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
+\r
+ p_cep->state = CEP_STATE_PRE_REQ;\r
+ break;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ status = IB_QP_IN_TIMEWAIT;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REQ:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ status = IB_INVALID_SETTING;\r
+ }\r
+ else\r
+ {\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
+\r
+ if( status == IB_SUCCESS )\r
+ p_cep->state = CEP_STATE_REQ_SENT;\r
+ else\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__save_user_rep(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Cache the local QPN. */\r
+ p_cep->local_qpn = p_cm_rep->h_qp->num;\r
+ p_cep->rq_psn = p_cep->local_qpn;\r
+ p_cep->init_depth = p_cm_rep->init_depth;\r
+\r
+ ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+ /* Check the CA's responder resource max and trim if necessary. */\r
+ if( (p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res <\r
+ p_cep->req_init_depth) )\r
+ {\r
+ /*\r
+ * The CA cannot handle the requested responder resources.\r
+ * Set the response to the CA's maximum.\r
+ */\r
+ p_cep->resp_res = \r
+ p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res;\r
+ }\r
+ else\r
+ {\r
+ /* The CA supports the requested responder resources. */\r
+ p_cep->resp_res = p_cep->req_init_depth;\r
+ }\r
+ ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+\r
+ p_cep->rnr_nak_timeout = p_cm_rep->rnr_nak_timeout;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_rep(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rep_t *p_rep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_cm_rep );\r
+ CL_ASSERT( p_cep->p_mad );\r
+\r
+ /* Format the MAD header. */\r
+ __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REP_ATTR_ID );\r
+\r
+ /* Set the addressing information in the MAD. */\r
+ __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_rep = (mad_cm_rep_t*)p_cep->p_mad->p_mad_buf;\r
+\r
+ p_rep->local_comm_id = p_cep->local_comm_id;\r
+ p_rep->remote_comm_id = p_cep->remote_comm_id;\r
+ conn_rep_set_lcl_qpn( p_cep->local_qpn, p_rep );\r
+ conn_rep_set_starting_psn( p_cep->rq_psn, p_rep );\r
+\r
+ if( p_cm_rep->failover_accepted != IB_FAILOVER_ACCEPT_SUCCESS )\r
+ {\r
+ /*\r
+ * Failover rejected - clear the alternate AV information.\r
+ * Note that at this point, the alternate is always at index 1.\r
+ */\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+ }\r
+ else if( !p_cep->av[1].port_guid )\r
+ {\r
+ /*\r
+ * Always reject alternate path if it's zero. We might\r
+ * have cleared the AV because it was unrealizable when\r
+ * processing the REQ.\r
+ */\r
+ conn_rep_set_failover( IB_FAILOVER_ACCEPT_ERROR, p_rep );\r
+ }\r
+ else\r
+ {\r
+ conn_rep_set_failover( p_cm_rep->failover_accepted, p_rep );\r
+ }\r
+\r
+ p_rep->resp_resources = p_cep->resp_res;\r
+\r
+ ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+ conn_rep_set_target_ack_delay(\r
+ p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay, p_rep );\r
+ ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
+\r
+ p_rep->initiator_depth = p_cep->init_depth;\r
+\r
+ conn_rep_set_e2e_flow_ctl( p_cm_rep->flow_ctrl, p_rep );\r
+\r
+ conn_rep_set_rnr_retry_cnt(\r
+ (uint8_t)(p_cm_rep->rnr_retry_cnt & 0x07), p_rep );\r
+\r
+ /* Local CA guid should have been set when processing the received REQ. */\r
+ CL_ASSERT( p_cep->local_ca_guid );\r
+ p_rep->local_ca_guid = p_cep->local_ca_guid;\r
+\r
+ status = conn_rep_set_pdata(\r
+ p_cm_rep->p_rep_pdata, p_cm_rep->rep_length, p_rep );\r
+\r
+ conn_rep_clr_rsvd_fields( p_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_rep );\r
+ CL_ASSERT( p_init );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ CL_ASSERT( !p_cep->p_mad );\r
+ status =\r
+ __cep_get_mad( p_cep, CM_REP_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ __save_user_rep( p_cep, p_cm_rep );\r
+\r
+ status = __format_rep( p_cep, p_cm_rep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ /* Format the INIT qp modify attributes. */\r
+ p_init->req_state = IB_QPS_INIT;\r
+ p_init->state.init.primary_port =\r
+ p_cep->av[p_cep->idx_primary].attr.port_num;\r
+ p_init->state.init.qkey = 0;\r
+ p_init->state.init.pkey_index =\r
+ p_cep->av[p_cep->idx_primary].pkey_index;\r
+ p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
+\r
+ p_cep->cep.context = context;\r
+\r
+ /* Just OR in the PREP bit into the state. */\r
+ p_cep->state |= CEP_STATE_PREP;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+ else\r
+ {\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
+ if( status == IB_SUCCESS )\r
+ {\r
+ p_cep->state = CEP_STATE_REP_SENT;\r
+ }\r
+ else\r
+ {\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ }\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static inline ib_api_status_t\r
+__format_rtu(\r
+ IN kcep_t* const p_cep, \r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rtu_t *p_rtu;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
+\r
+ p_rtu->local_comm_id = p_cep->local_comm_id;\r
+ p_rtu->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ /* copy optional data */\r
+ status = conn_rtu_set_pdata( p_pdata, pdata_len, p_rtu );\r
+\r
+ /* Store the RTU MAD so we can repeat it if we get a repeated REP. */\r
+ if( status == IB_SUCCESS )\r
+ p_cep->mads.rtu = *p_rtu;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ status = __cep_get_mad( p_cep, CM_RTU_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_rtu( p_cep, p_pdata, pdata_len, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ /* Update the timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ /* Send failures will get another chance if we receive a repeated REP. */\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ __remove_cep( p_cep );\r
+ status = __do_cep_rej(\r
+ p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ status = __do_cep_rej(\r
+ p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_mra(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t msg_mraed,\r
+ IN const ib_cm_mra_t* const p_cm_mra,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_mra_t *p_mra;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
+\r
+ conn_mra_set_msg_mraed( msg_mraed, p_mra );\r
+\r
+ p_mra->local_comm_id = p_cep->local_comm_id;\r
+ p_mra->remote_comm_id = p_cep->remote_comm_id;\r
+\r
+ conn_mra_set_svc_timeout( p_cm_mra->svc_timeout, p_mra );\r
+ status = conn_mra_set_pdata(\r
+ p_cm_mra->p_mra_pdata, p_cm_mra->mra_length, p_mra );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+ conn_mra_clr_rsvd_fields( p_mra );\r
+\r
+ /* Save the MRA so we can repeat it if we get a repeated message. */\r
+ p_cep->mads.mra = *p_mra;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+ uint8_t msg_mraed;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_mra );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_PRE_REP:\r
+ msg_mraed = 0;\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ msg_mraed = 1;\r
+ break;\r
+\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_LAP_RCVD:\r
+ msg_mraed = 2;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ goto done;\r
+ }\r
+\r
+ status = __cep_get_mad( p_cep, CM_MRA_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ goto done;\r
+\r
+ status = __format_mra( p_cep, msg_mraed, p_cm_mra, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ goto done;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_MRA;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+static ib_api_status_t\r
+__format_lap(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_lap_t* const p_cm_lap,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_lap_t *p_lap;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_LAP_ATTR_ID );\r
+\r
+ __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
+\r
+ p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
+\r
+ p_lap->alternate_path.local_lid = p_cm_lap->p_alt_path->slid;\r
+ p_lap->alternate_path.remote_lid = p_cm_lap->p_alt_path->dlid;\r
+ p_lap->alternate_path.local_gid = p_cm_lap->p_alt_path->sgid;\r
+ p_lap->alternate_path.remote_gid = p_cm_lap->p_alt_path->dgid;\r
+\r
+ /* Set Flow Label and Packet Rate */\r
+ conn_lap_path_set_flow_lbl(\r
+ ib_path_rec_flow_lbl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+ conn_lap_path_set_tclass(\r
+ p_cm_lap->p_alt_path->tclass, &p_lap->alternate_path );\r
+\r
+ p_lap->alternate_path.hop_limit =\r
+ ib_path_rec_hop_limit( p_cm_lap->p_alt_path );\r
+ conn_lap_path_set_pkt_rate(\r
+ ib_path_rec_rate( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+\r
+ /* Set SL and Subnet Local */\r
+ conn_lap_path_set_svc_lvl(\r
+ ib_path_rec_sl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
+ conn_lap_path_set_subn_lcl(\r
+ ib_gid_is_link_local( &p_cm_lap->p_alt_path->dgid ),\r
+ &p_lap->alternate_path );\r
+\r
+ conn_lap_path_set_lcl_ack_timeout(\r
+ calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1,\r
+ p_cep->local_ack_delay), &p_lap->alternate_path );\r
+\r
+ conn_lap_path_clr_rsvd_fields( &p_lap->alternate_path );\r
+\r
+ p_lap->local_comm_id = p_cep->local_comm_id;\r
+ p_lap->remote_comm_id = p_cep->remote_comm_id;\r
+ conn_lap_set_remote_qpn( p_cep->remote_qpn, p_lap );\r
+ conn_lap_set_resp_timeout( p_cm_lap->remote_resp_timeout, p_lap );\r
+\r
+ status = conn_lap_set_pdata(\r
+ p_cm_lap->p_lap_pdata, p_cm_lap->lap_length, p_lap );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("lap pdata invalid.\n") );\r
+ return status;\r
+ }\r
+\r
+ conn_lap_clr_rsvd_fields( p_lap );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_lap );\r
+ CL_ASSERT( p_cm_lap->p_alt_path );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ if( !p_cep->was_active )\r
+ {\r
+ /* Only the side that took the active role can initialte a LAP. */\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Only the active side of a connection can initiate a LAP.\n") );\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+\r
+ /*\r
+ * Format the AV information - store in the temporary location until we\r
+ * get the APR indicating acceptance.\r
+ */\r
+ p_port_cep = __format_path_av( p_cm_lap->p_alt_path, &p_cep->alt_av );\r
+ if( !p_port_cep )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR, ("Alternate path invalid!\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+ }\r
+\r
+ p_cep->alt_av.attr.conn.seq_err_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
+ p_cep->alt_av.attr.conn.rnr_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
+\r
+ if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Alternate CA GUID different from current!\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+ }\r
+\r
+ /* Store the alternate path info temporarilly. */\r
+ p_cep->alt_2pkt_life = ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1;\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_lap( p_cep, p_cm_lap, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __cep_send_retry( p_port_cep, p_cep, p_mad );\r
+ if( status == IB_SUCCESS )\r
+ p_cep->state = CEP_STATE_LAP_SENT;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__format_apr(\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ IN OUT ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_apr_t *p_apr;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
+\r
+ p_apr->local_comm_id = p_cep->local_comm_id;\r
+ p_apr->remote_comm_id = p_cep->remote_comm_id;\r
+ p_apr->status = p_cm_apr->apr_status;\r
+\r
+ status = conn_apr_set_apr_info( p_cm_apr->p_info->data,\r
+ p_cm_apr->info_length, p_apr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("apr_info invalid\n") );\r
+ return status;\r
+ }\r
+\r
+ status = conn_apr_set_pdata( p_cm_apr->p_apr_pdata,\r
+ p_cm_apr->apr_length, p_apr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("apr pdata invalid\n") );\r
+ return status;\r
+ }\r
+\r
+ conn_apr_clr_rsvd_fields( p_apr );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_apr );\r
+ CL_ASSERT( p_apr );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ /* Fall through. */\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ CL_ASSERT( !p_cep->p_mad );\r
+ status = __cep_get_mad( p_cep, CM_APR_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_apr( p_cep, p_cm_apr, p_cep->p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+ }\r
+\r
+ if( !p_cm_apr->apr_status )\r
+ {\r
+ /*\r
+ * Copy the temporary AV and port GUID information into\r
+ * the alternate path.\r
+ */\r
+ p_cep->av[((p_cep->idx_primary + 1) & 0x1)] = p_cep->alt_av;\r
+\r
+ /* Update our maximum packet lifetime. */\r
+ p_cep->max_2pkt_life =\r
+ max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
+\r
+ /* Update our timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ /* Fill in the QP attributes. */\r
+ cl_memclr( p_apr, sizeof(ib_qp_mod_t) );\r
+ p_apr->req_state = IB_QPS_RTS;\r
+ p_apr->state.rts.opts =\r
+ IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
+ p_apr->state.rts.alternate_av = p_cep->alt_av.attr;\r
+ p_apr->state.rts.apm_state = IB_APM_REARM;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_PREP;\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ CL_ASSERT( p_cep->p_mad );\r
+ p_port_cep = __get_cep_agent( p_cep );\r
+ if( !p_port_cep )\r
+ {\r
+ ib_put_mad( p_cep->p_mad );\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+ else\r
+ {\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ __cep_send_mad( p_port_cep, p_cep->p_mad );\r
+ status = IB_SUCCESS;\r
+ }\r
+ p_cep->p_mad = NULL;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata,\r
+ IN const uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS )\r
+ {\r
+ p_cep->state = CEP_STATE_DREQ_SENT;\r
+ }\r
+ else\r
+ {\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_agent_t *p_port_cep;\r
+ ib_mad_element_t *p_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_cm_drep );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_DREQ_RCVD:\r
+ status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_port_cep, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ status = __format_drep( p_cep, p_cm_drep->p_drep_pdata,\r
+ p_cm_drep->drep_length, (mad_cm_drep_t*)p_mad->p_mad_buf );\r
+ if( status != IB_SUCCESS )\r
+ break;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_migrate(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_cep->idx_primary++;\r
+ p_cep->idx_primary &= 0x1;\r
+ status = IB_SUCCESS;\r
+ break;\r
+ }\r
+\r
+ AL_TRACE( AL_DBG_ERROR, ("No alternate path avaialble.\n") );\r
+\r
+ /* Fall through. */\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_established(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_rtr );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_ESTABLISHED:\r
+ cl_memclr( p_rtr, sizeof(ib_qp_mod_t) );\r
+ p_rtr->req_state = IB_QPS_RTR;\r
+\r
+ /* Required params. */\r
+ p_rtr->state.rtr.rq_psn = p_cep->rq_psn;\r
+ p_rtr->state.rtr.dest_qp = p_cep->remote_qpn;\r
+ p_rtr->state.rtr.primary_av = p_cep->av[p_cep->idx_primary].attr;\r
+ p_rtr->state.rtr.resp_res = p_cep->resp_res;\r
+ p_rtr->state.rtr.rnr_nak_timeout = p_cep->rnr_nak_timeout;\r
+\r
+ /* Optional params. */\r
+ p_rtr->state.rtr.opts = 0;\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_rtr->state.rtr.opts |= IB_MOD_QP_ALTERNATE_AV;\r
+ p_rtr->state.rtr.alternate_av =\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
+ }\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_rts );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ case CEP_STATE_ESTABLISHED:\r
+ cl_memclr( p_rts, sizeof(ib_qp_mod_t) );\r
+ p_rts->req_state = IB_QPS_RTS;\r
+\r
+ /* Required params. */\r
+ p_rts->state.rts.sq_psn = p_cep->sq_psn;\r
+ p_rts->state.rts.retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
+ p_rts->state.rts.rnr_retry_cnt =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
+ p_rts->state.rts.local_ack_timeout =\r
+ p_cep->av[p_cep->idx_primary].attr.conn.local_ack_timeout;\r
+ p_rts->state.rts.init_depth = p_cep->init_depth;\r
+\r
+ /* Optional params. */\r
+ p_rts->state.rts.opts = 0;\r
+ if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
+ {\r
+ p_rts->state.rts.opts =\r
+ IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
+ p_rts->state.rts.apm_state = IB_APM_REARM;\r
+ p_rts->state.rts.alternate_av =\r
+ p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
+ }\r
+ status = IB_SUCCESS;\r
+ break;\r
+\r
+ default:\r
+ status = IB_INVALID_STATE;\r
+ break;\r
+ }\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us )\r
+{\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ *p_timewait_us = p_cep->timewait_time.QuadPart / 10;\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_new_cep );\r
+ CL_ASSERT( pp_mad );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cep->p_mad_head )\r
+ {\r
+ p_cep->signalled = FALSE;\r
+ status = IB_NOT_DONE;\r
+ goto done;\r
+ }\r
+\r
+ /* Set the MAD. */\r
+ *pp_mad = p_cep->p_mad_head;\r
+ p_cep->p_mad_head = p_cep->p_mad_head->p_next;\r
+ (*pp_mad)->p_next = NULL;\r
+\r
+ /* We're done with the input CEP. Reuse the variable */\r
+ p_cep = (kcep_t* __ptr64)(*pp_mad)->send_context1;\r
+ if( p_cep )\r
+ {\r
+ *p_new_cep = p_cep->cep;\r
+ }\r
+ else\r
+ {\r
+ p_new_cep->context = NULL;\r
+ p_new_cep->cid = AL_INVALID_CID;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Kernel-mode only call, so we assert on parameters that we expect kernel\r
+ * clients to always provide. \r
+ */\r
+ib_api_status_t\r
+al_cep_xchg_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_new_irp,\r
+ OUT IRP** const pp_old_irp )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( pp_old_irp );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ /* Always dequeue whatever IRP is there. */\r
+ *pp_old_irp = p_cep->p_irp;\r
+\r
+ /* Don't allow queueing the IRP if there are MADs to be reaped. */\r
+ if( p_cep->p_mad_head )\r
+ {\r
+ p_cep->p_irp = NULL;\r
+ status = IB_NOT_DONE;\r
+ }\r
+ else\r
+ {\r
+ p_cep->p_irp = p_new_irp;\r
+ status = IB_SUCCESS;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_cancel_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_irp )\r
+{\r
+ ib_api_status_t status;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+ CL_ASSERT( p_irp );\r
+\r
+ KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( h_al, cid );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( p_cep->p_irp == p_irp )\r
+ {\r
+ p_cep->p_irp = NULL;\r
+ status = IB_SUCCESS;\r
+ }\r
+ else\r
+ {\r
+ status = IB_NOT_DONE;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al )\r
+{\r
+ cl_list_item_t *p_item;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Destroy all CEPs associated with the input instance of AL. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ for( p_item = cl_qlist_head( &h_al->cep_list );\r
+ p_item != cl_qlist_end( &h_al->cep_list );\r
+ p_item = cl_qlist_head( &h_al->cep_list ) )\r
+ {\r
+ /*\r
+ * Note that we don't walk the list - we can't hold the AL\r
+ * lock when cleaning up its CEPs because the cleanup path\r
+ * takes the CEP's lock. We always want to take the CEP\r
+ * before the AL lock to prevent any possibilities of deadlock.\r
+ *\r
+ * So we just get the CID, and then release the AL lock and try to\r
+ * destroy. This should unbind the CEP from the AL instance and\r
+ * remove it from the list, allowing the next CEP to be cleaned up\r
+ * in the next pass through.\r
+ */\r
+ cid = PARENT_STRUCT( p_item, kcep_t, al_item )->cep.cid;\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ }\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
IN al_dev_open_context_t *p_context )\r
{\r
al_handle_t *p_h;\r
- ib_cm_handle_t h_cm;\r
- union _u\r
- {\r
- ib_cm_rej_t cm_rej;\r
- ib_cm_drep_t cm_drep;\r
- ib_cm_apr_t cm_apr;\r
- } u;\r
size_t i;\r
- uint32_t cm_subtype;\r
\r
CL_ENTER( AL_DBG_DEV, g_al_dbg_lvl );\r
\r
al_hdl_free( p_context->h_al, i );\r
break;\r
\r
- case AL_OBJ_TYPE_H_CONN:\r
- h_cm = (ib_cm_handle_t)p_h->p_obj;\r
- cm_subtype = AL_SUBTYPE( p_h->type );\r
- al_hdl_free( p_context->h_al, i );\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- switch( cm_subtype )\r
- {\r
- case AL_OBJ_SUBTYPE_REQ:\r
- case AL_OBJ_SUBTYPE_REP:\r
- /* Reject any outstanding connections. */\r
- cl_memclr( &u.cm_rej, sizeof( ib_cm_rej_t ) );\r
- u.cm_rej.rej_status = IB_REJ_TIMEOUT;\r
- ib_cm_rej( h_cm, &u.cm_rej );\r
- break;\r
-\r
- case AL_OBJ_SUBTYPE_DREQ:\r
- /* Issue a disconnect reply to any requests. */\r
- cl_memclr( &u.cm_drep, sizeof( ib_cm_drep_t ) );\r
- ib_cm_drep( h_cm, &u.cm_drep );\r
- break;\r
-\r
- case AL_OBJ_SUBTYPE_LAP:\r
- /* Reject the LAP. */\r
- cl_memclr( &u.cm_apr, sizeof( ib_cm_apr_t ) );\r
- u.cm_apr.apr_status = IB_AP_REJECT;\r
- ib_cm_apr( h_cm, &u.cm_apr );\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- break;\r
-\r
case AL_OBJ_TYPE_H_SA_REQ:\r
al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj );\r
break;\r
cl_status = proxy_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
cl_status = verbs_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = cm_ioctl( h_ioctl, &ret_bytes );\r
+ //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
+ // cl_status = cm_ioctl( h_ioctl, &ret_bytes );\r
+ else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
+ cl_status = cep_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
cl_status = al_ioctl( h_ioctl, &ret_bytes );\r
else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
p_mad_send->mad_send.h_av = NULL;\r
p_mad_send->mad_send.retry_cnt = 0;\r
p_mad_send->mad_send.retry_time = 0;\r
+ p_mad_send->mad_send.delay = 0;\r
p_mad_send->h_pool = p_mad_item->pool_key->h_pool;\r
\r
ref_al_obj( &p_mad_item->pool_key->h_pool->obj );\r
#include <iba/ib_ci.h>\r
\r
#include "al.h"\r
-#include "al_cm.h"\r
+#include "al_cm_cep.h"\r
#include "al_debug.h"\r
#include "al_dm.h"\r
#include "al_mad_pool.h"\r
}\r
\r
/* Initialize CM */\r
- status = create_cm( &gp_al_mgr->obj );\r
+ status = create_cep_mgr( &gp_al_mgr->obj );\r
if( status != IB_SUCCESS )\r
{\r
gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
cl_qlist_init( &h_al->mad_list );\r
cl_qlist_init( &h_al->key_list );\r
cl_qlist_init( &h_al->query_list );\r
- cl_qlist_init( &h_al->conn_list );\r
+ cl_qlist_init( &h_al->cep_list );\r
\r
cl_vector_construct( &h_al->hdl_vector );\r
\r
return p_obj;\r
}\r
\r
-\r
-al_conn_t*\r
-al_hdl_ref_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t sub_type )\r
-{\r
- al_handle_t *p_h;\r
- al_conn_t *p_conn;\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /*\r
- * Make sure that the handle is valid and the correct type. Note that we\r
- * support having multiple possible subtypes provided, and check against\r
- * any of them.\r
- */\r
- if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) ||\r
- ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_conn = (al_conn_t*)p_h->p_obj;\r
-\r
- __ref_conn( p_conn );\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_conn;\r
-}\r
-\r
-\r
-al_conn_t*\r
-al_hdl_get_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t sub_type )\r
-{\r
- al_handle_t *p_h;\r
- al_conn_t *p_conn;\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /*\r
- * Make sure that the handle is valid and the correct type. Note that we\r
- * support having multiple possible subtypes provided, and check against\r
- * any of them.\r
- */\r
- if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) ||\r
- ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_conn = (al_conn_t*)p_h->p_obj;\r
-\r
- /* Clear the entry. */\r
- p_h->type = AL_OBJ_TYPE_UNKNOWN;\r
- p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl;\r
- h_al->free_hdl = hdl;\r
-\r
- __ref_conn( p_conn );\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_conn;\r
-}\r
-\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id:$\r
+ */\r
+\r
+\r
+#include "al_debug.h"\r
+#include "al_cm_cep.h"\r
+#include "al_dev.h"\r
+#include <iba/ib_al_ioctl.h>\r
+#include "al_proxy.h"\r
+#include "al.h"\r
+#include "al_qp.h"\r
+\r
+\r
+static ib_api_status_t\r
+__proxy_cep_cb(\r
+ IN ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep );\r
+\r
+\r
+static cl_status_t\r
+proxy_create_cep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_create_cep_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_create_cep_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_out_size( h_ioctl ) != sizeof(ual_create_cep_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->status = al_create_cep( p_context->h_al, __proxy_cep_cb,\r
+ p_context, &p_ioctl->cid );\r
+\r
+ *p_ret_bytes = sizeof(ual_create_cep_ioctl_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static inline void\r
+__complete_get_event_ioctl(\r
+ IN ib_al_handle_t h_al,\r
+ IN IRP* const p_irp,\r
+ IN NTSTATUS status )\r
+{\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, NULL );\r
+#pragma warning(pop)\r
+\r
+ /* Complete the IRP. */\r
+ p_irp->IoStatus.Status = status;\r
+ p_irp->IoStatus.Information = 0;\r
+ IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT );\r
+\r
+ deref_al_obj( &h_al->obj );\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_destroy_cep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ib_api_status_t status;\r
+ al_dev_open_context_t *p_context;\r
+ IRP *p_irp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_xchg_irp(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL, &p_irp );\r
+ if( status != IB_INVALID_HANDLE && p_irp )\r
+ __complete_get_event_ioctl( p_context->h_al, p_irp, STATUS_CANCELLED );\r
+\r
+ al_destroy_cep( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_listen(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_listen_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_listen_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_listen_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Set the private data compare buffer to our kernel copy. */\r
+ if( p_ioctl->cep_listen.p_cmp_buf )\r
+ p_ioctl->cep_listen.p_cmp_buf = p_ioctl->compare;\r
+\r
+ status =\r
+ al_cep_listen( p_context->h_al, p_ioctl->cid, &p_ioctl->cep_listen );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_req(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_req_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_req_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_req_ioctl_out);\r
+\r
+ p_ioctl->in.cm_req.h_al = p_context->h_al;\r
+ p_ioctl->in.cm_req.p_primary_path = &p_ioctl->in.paths[0];\r
+ if( p_ioctl->in.cm_req.p_alt_path )\r
+ p_ioctl->in.cm_req.p_alt_path = &p_ioctl->in.paths[1];\r
+ if( p_ioctl->in.cm_req.p_compare_buffer )\r
+ p_ioctl->in.cm_req.p_compare_buffer = p_ioctl->in.compare;\r
+ if( p_ioctl->in.cm_req.p_req_pdata )\r
+ p_ioctl->in.cm_req.p_req_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_req.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_req.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_req( p_context->h_al, p_ioctl->in.cid,\r
+ &p_ioctl->in.cm_req, &p_ioctl->out.init );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_req(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_req(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_rep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rep_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_rep_ioctl_out);\r
+\r
+ if( p_ioctl->in.cm_rep.p_rep_pdata )\r
+ p_ioctl->in.cm_rep.p_rep_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_rep.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_rep.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid,\r
+ p_context, &p_ioctl->in.cm_rep, &p_ioctl->out.init );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_rep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_rep(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_rtr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_rtr_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_rtr_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rtr_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_rtr_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_get_rtr_attr( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rtr );\r
+\r
+ if( p_ioctl->status != IB_SUCCESS )\r
+ cl_memclr( &p_ioctl->rtr, sizeof(ib_qp_mod_t) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_rts(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_rts_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_rts_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rts_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_rts_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_get_rts_attr( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rts );\r
+\r
+ if( p_ioctl->status != IB_SUCCESS )\r
+ cl_memclr( &p_ioctl->rts, sizeof(ib_qp_mod_t) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_rtu(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rtu_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rtu_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rtu_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_rtu( p_context->h_al,\r
+ p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_rej(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_rej_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_rej_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rej_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_rej(\r
+ p_context->h_al, p_ioctl->cid, p_ioctl->rej_status, p_ioctl->ari,\r
+ p_ioctl->ari_len, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_mra(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_mra_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_mra_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_mra_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->cm_mra.p_mra_pdata = p_ioctl->pdata;\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_mra(\r
+ p_context->h_al, p_ioctl->cid, &p_ioctl->cm_mra );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_lap(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_lap_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_lap_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_lap_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ p_ioctl->cm_lap.p_alt_path = &p_ioctl->alt_path;\r
+ if( p_ioctl->cm_lap.p_lap_pdata )\r
+ p_ioctl->pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->cm_lap.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->cm_lap.h_qp = h_qp;\r
+\r
+ status = al_cep_lap( p_context->h_al, p_ioctl->cid, &p_ioctl->cm_lap );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+done:\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_pre_apr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_apr_ioctl_t *p_ioctl;\r
+ ib_qp_handle_t h_qp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_apr_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_in) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_out) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(struct _ual_cep_apr_ioctl_out);\r
+\r
+ if( p_ioctl->in.cm_apr.p_info )\r
+ p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_ioctl->in.apr_info;\r
+ if( p_ioctl->in.cm_apr.p_apr_pdata )\r
+ p_ioctl->in.cm_apr.p_apr_pdata = p_ioctl->in.pdata;\r
+\r
+ /* Get the kernel QP handle. */\r
+ h_qp = (ib_qp_handle_t)al_hdl_ref(\r
+ p_context->h_al, (uint64_t)p_ioctl->in.cm_apr.h_qp, AL_OBJ_TYPE_H_QP );\r
+ if( !h_qp )\r
+ {\r
+ p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
+ goto done;\r
+ }\r
+\r
+ p_ioctl->in.cm_apr.h_qp = h_qp;\r
+\r
+ p_ioctl->out.status = al_cep_pre_apr( p_context->h_al, p_ioctl->in.cid,\r
+ &p_ioctl->in.cm_apr, &p_ioctl->out.apr );\r
+\r
+ deref_al_obj( &h_qp->obj );\r
+\r
+ if( p_ioctl->out.status != IB_SUCCESS )\r
+ {\r
+done:\r
+ cl_memclr( &p_ioctl->out.apr, sizeof(ib_qp_mod_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_send_apr(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_apr(\r
+ p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_dreq(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_dreq_ioctl_t *p_ioctl;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_dreq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_dreq_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Set the private data compare buffer to our kernel copy. */\r
+ status = al_cep_dreq( p_context->h_al,\r
+ p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_drep(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_drep_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_drep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_drep_ioctl_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->cm_drep.p_drep_pdata = p_ioctl->pdata;\r
+\r
+ (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_drep(\r
+ p_context->h_al, p_ioctl->cid, &p_ioctl->cm_drep );\r
+\r
+ *p_ret_bytes = sizeof(ib_api_status_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_timewait(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_get_timewait_ioctl_t *p_ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_get_timewait_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_timewait_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_ioctl->status = al_cep_get_timewait( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->timewait_us );\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_get_timewait_ioctl_t);\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_poll(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ ual_cep_poll_ioctl_t *p_ioctl;\r
+ ib_mad_element_t *p_mad = NULL;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_context = (al_dev_open_context_t*)p_open_context;\r
+ p_ioctl = (ual_cep_poll_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
+\r
+ /* Validate user parameters. */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
+ cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_poll_ioctl_t) )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ *p_ret_bytes = sizeof(ual_cep_poll_ioctl_t);\r
+\r
+ p_ioctl->status = al_cep_poll( p_context->h_al,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->new_cep, &p_mad );\r
+\r
+ if( p_ioctl->status == IB_SUCCESS )\r
+ {\r
+ /* Copy the MAD for user consumption and free the it. */\r
+ CL_ASSERT( p_mad );\r
+ p_ioctl->element = *p_mad;\r
+ if( p_mad->grh_valid )\r
+ p_ioctl->grh = *p_mad->p_grh;\r
+ else\r
+ cl_memclr( &p_ioctl->grh, sizeof(ib_grh_t) );\r
+ cl_memcpy( p_ioctl->mad_buf, p_mad->p_mad_buf, MAD_BLOCK_SIZE );\r
+ ib_put_mad( p_mad );\r
+ }\r
+ else\r
+ {\r
+ cl_memclr( &p_ioctl->mad_buf, sizeof(MAD_BLOCK_SIZE) );\r
+ cl_memclr( &p_ioctl->new_cep, sizeof(ib_cep_t) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_SUCCESS;\r
+}\r
+\r
+\r
+static void\r
+__proxy_cancel_cep(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN IRP* p_irp )\r
+{\r
+ al_dev_open_context_t *p_context;\r
+ PIO_STACK_LOCATION p_io_stack;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_DEV );\r
+\r
+ UNUSED_PARAM( p_dev_obj );\r
+\r
+ /* Get the stack location. */\r
+ p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+ p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;\r
+ ASSERT( p_context );\r
+\r
+ cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0];\r
+ if( al_cep_cancel_irp( p_context->h_al, cid, p_irp ) == IB_SUCCESS )\r
+ __complete_get_event_ioctl( p_context->h_al, p_irp, STATUS_CANCELLED );\r
+\r
+ IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
+}\r
+\r
+\r
+static cl_status_t\r
+proxy_cep_get_event(\r
+ IN void *p_open_context,\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ ib_api_status_t status;\r
+ IO_STACK_LOCATION *p_io_stack;\r
+ al_dev_open_context_t *p_context;\r
+ net32_t cid;\r
+ IRP *p_old_irp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( p_ret_bytes );\r
+\r
+ p_context = p_open_context;\r
+\r
+ p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
+ if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Invalid file object type for request: %d\n",\r
+ p_io_stack->FileObject->FsContext2) );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Check the size of the ioctl */\r
+ if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid IOCTL input buffer.\n") );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl );\r
+\r
+ /* Store the CID in the IRP's driver context so we can cancel it. */\r
+ h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( h_ioctl, __proxy_cancel_cep );\r
+#pragma warning(pop)\r
+ IoMarkIrpPending( h_ioctl );\r
+\r
+ ref_al_obj( &p_context->h_al->obj );\r
+\r
+ /* Attempt to queue the IRP in the CEP. */\r
+ status = al_cep_xchg_irp( p_context->h_al, cid, h_ioctl, &p_old_irp );\r
+ switch( status )\r
+ {\r
+ case IB_SUCCESS:\r
+ break;\r
+\r
+ case IB_NOT_DONE:\r
+ /* There are queued MADs - complete the IOCTL now. */\r
+ __complete_get_event_ioctl( p_context->h_al, h_ioctl, STATUS_SUCCESS );\r
+ break;\r
+\r
+ default:\r
+ /* Invalid CID. Complete the request. */\r
+ __complete_get_event_ioctl(\r
+ p_context->h_al, h_ioctl, STATUS_INVALID_PARAMETER );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_PENDING;\r
+ }\r
+\r
+ /* Check for an existing IRP. */\r
+ if( p_old_irp )\r
+ {\r
+ /*\r
+ * We must handle the race between this IOCTL and a cancellation of\r
+ * the previous one both trying to cancel the old IRP.\r
+ */\r
+ __complete_get_event_ioctl(\r
+ p_context->h_al, p_old_irp, STATUS_CANCELLED );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return CL_PENDING;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__proxy_cep_cb(\r
+ IN ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ IRP *p_irp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_xchg_irp( h_al, p_cep->cid, NULL, &p_irp );\r
+ CL_ASSERT( status != IB_INVALID_HANDLE );\r
+ if( p_irp )\r
+ __complete_get_event_ioctl( h_al, p_irp, STATUS_SUCCESS );\r
+\r
+ /*\r
+ * Note that we always return IB_ERROR here. This causes the CEP manager\r
+ * to signal the callback event. While this can result in more calls to\r
+ * this function, it does eliminate potential races between destruction\r
+ * and callbacks.\r
+ */\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_ERROR;\r
+}\r
+\r
+\r
+\r
+cl_status_t cep_ioctl(\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes )\r
+{\r
+ cl_status_t cl_status;\r
+ IO_STACK_LOCATION *p_io_stack;\r
+ void *p_context;\r
+\r
+ AL_ENTER( AL_DBG_DEV );\r
+\r
+ CL_ASSERT( h_ioctl && p_ret_bytes );\r
+\r
+ p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
+ p_context = p_io_stack->FileObject->FsContext;\r
+\r
+ if( !p_context )\r
+ {\r
+ AL_EXIT( AL_DBG_DEV );\r
+ return CL_INVALID_PARAMETER;\r
+ }\r
+\r
+ switch( cl_ioctl_ctl_code( h_ioctl ) )\r
+ {\r
+ case UAL_CREATE_CEP:\r
+ cl_status = proxy_create_cep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_DESTROY_CEP:\r
+ cl_status = proxy_destroy_cep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_LISTEN:\r
+ cl_status = proxy_cep_listen( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_REQ:\r
+ cl_status = proxy_cep_pre_req( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_REQ:\r
+ cl_status = proxy_cep_send_req( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_REP:\r
+ cl_status = proxy_cep_pre_rep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_REP:\r
+ cl_status = proxy_cep_send_rep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_RTR:\r
+ cl_status = proxy_cep_get_rtr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_RTS:\r
+ cl_status = proxy_cep_get_rts( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_RTU:\r
+ cl_status = proxy_cep_rtu( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_REJ:\r
+ cl_status = proxy_cep_rej( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_MRA:\r
+ cl_status = proxy_cep_mra( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_LAP:\r
+ cl_status = proxy_cep_lap( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_PRE_APR:\r
+ cl_status = proxy_cep_pre_apr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_SEND_APR:\r
+ cl_status = proxy_cep_send_apr( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_DREQ:\r
+ cl_status = proxy_cep_dreq( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_DREP:\r
+ cl_status = proxy_cep_drep( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_TIMEWAIT:\r
+ cl_status = proxy_cep_get_timewait( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_GET_EVENT:\r
+ cl_status = proxy_cep_get_event( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ case UAL_CEP_POLL:\r
+ cl_status = proxy_cep_poll( p_context, h_ioctl, p_ret_bytes );\r
+ break;\r
+ default:\r
+ cl_status = CL_INVALID_PARAMETER;\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_DEV );\r
+ return cl_status;\r
+}\r
#include "al.h"\r
#include "al_debug.h"\r
#include "al_dev.h"\r
-#include "al_cm.h"\r
+#include "al_cm_cep.h"\r
#include "al_qp.h"\r
#include "al_proxy.h"\r
\r
ual_av.c \\r
ual_ca.c \\r
ual_ci_ca.c \\r
- ual_cm.c \\r
+ ual_cm_cep.c \\r
ual_cq.c \\r
ual_dm.c \\r
ual_mad.c \\r
..\al_av.c \\r
..\al_ca.c \\r
..\al_ci_ca_shared.c \\r
- ..\al_cm_shared.c \\r
+ ..\al_cm_qp.c \\r
..\al_common.c \\r
..\al_cq.c \\r
..\al_dm.c \\r
\r
if( cl_status != CL_SUCCESS )\r
{\r
+ CL_ASSERT( cl_status != CL_PENDING );\r
AL_TRACE( AL_DBG_ERROR,\r
("Error performing IOCTL 0x%08x to AL driver (%s)\n",\r
command, CL_STATUS_MSG(cl_status)) );\r
- return IB_ERROR;\r
+ return CL_ERROR;\r
}\r
\r
AL_EXIT( AL_DBG_DEV );\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id:$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_ptr_vector.h>\r
+#include <complib/cl_qlist.h>\r
+#include "al_common.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_debug.h"\r
+#include "ib_common.h"\r
+#include "al_mgr.h"\r
+//#include "al_ca.h"\r
+#include "al.h"\r
+//#include "al_mad.h"\r
+#include "al_qp.h"\r
+\r
+\r
+#define UAL_CEP_MIN (512)\r
+#define UAL_CEP_GROW (256)\r
+\r
+\r
+/* Global connection manager object. */\r
+typedef struct _ual_cep_mgr\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_ptr_vector_t cep_vector;\r
+\r
+ /* File handle on which to issue query IOCTLs. */\r
+ HANDLE h_file;\r
+\r
+} ual_cep_mgr_t;\r
+\r
+\r
+typedef struct _al_ucep\r
+{\r
+ ib_cep_t cep;\r
+ al_pfn_cep_cb_t pfn_cb;\r
+ ib_al_handle_t h_al;\r
+ cl_list_item_t al_item;\r
+ cl_spinlock_t lock;\r
+\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+\r
+ OVERLAPPED ov;\r
+ atomic32_t ref_cnt;\r
+\r
+} ucep_t;\r
+\r
+\r
+/* Global instance of the CM agent. */\r
+ual_cep_mgr_t *gp_cep_mgr = NULL;\r
+\r
+\r
+/*\r
+ * Frees the global CEP manager. Invoked during al_obj destruction.\r
+ */\r
+static void\r
+__free_cep_mgr(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
+\r
+ if( gp_cep_mgr->h_file != INVALID_HANDLE_VALUE )\r
+ CloseHandle( gp_cep_mgr->h_file );\r
+\r
+ cl_ptr_vector_destroy( &gp_cep_mgr->cep_vector );\r
+\r
+ destroy_al_obj( p_obj );\r
+\r
+ cl_free( gp_cep_mgr );\r
+ gp_cep_mgr = NULL;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Allocates and initialized the global user-mode CM agent.\r
+ */\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj )\r
+{\r
+ ib_api_status_t status;\r
+ cl_status_t cl_status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( gp_cep_mgr == NULL );\r
+\r
+ /* Allocate the global CM agent. */\r
+ gp_cep_mgr = (ual_cep_mgr_t*)cl_zalloc( sizeof(ual_cep_mgr_t) );\r
+ if( !gp_cep_mgr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Failed allocation of global CEP manager.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM );\r
+ cl_ptr_vector_construct( &gp_cep_mgr->cep_vector );\r
+ gp_cep_mgr->h_file = INVALID_HANDLE_VALUE;\r
+\r
+ status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE,\r
+ NULL, NULL, __free_cep_mgr );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_cep_mgr( &gp_cep_mgr->obj );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ /* Attach to the parent object. */\r
+ status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cl_status = cl_ptr_vector_init(\r
+ &gp_cep_mgr->cep_vector, UAL_CEP_MIN, UAL_CEP_GROW );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cl_vector_init failed with status %s.\n",\r
+ CL_STATUS_MSG(cl_status)) );\r
+ return ib_convert_cl_status( cl_status );\r
+ }\r
+\r
+ /* Create a file object on which to issue all CM requests. */\r
+ gp_cep_mgr->h_file = ual_create_async_file( UAL_BIND_CM );\r
+ if( gp_cep_mgr->h_file == INVALID_HANDLE_VALUE )\r
+ {\r
+ gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
+ GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ /* Release the reference from init_al_obj */\r
+ deref_al_obj( &gp_cep_mgr->obj );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al )\r
+{\r
+ cl_list_item_t *p_item;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Destroy all CEPs associated with the input instance of AL. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ for( p_item = cl_qlist_head( &h_al->cep_list );\r
+ p_item != cl_qlist_end( &h_al->cep_list );\r
+ p_item = cl_qlist_head( &h_al->cep_list ) )\r
+ {\r
+ /*\r
+ * Note that we don't walk the list - we can't hold the AL\r
+ * lock when cleaning up its CEPs because the cleanup path\r
+ * takes the CEP's lock. We always want to take the CEP\r
+ * before the AL lock to prevent any possibilities of deadlock.\r
+ *\r
+ * So we just get the CID, and then release the AL lock and try to\r
+ * destroy. This should unbind the CEP from the AL instance and\r
+ * remove it from the list, allowing the next CEP to be cleaned up\r
+ * in the next pass through.\r
+ */\r
+ cid = PARENT_STRUCT( p_item, ucep_t, al_item )->cep.cid;\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ }\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__destroy_ucep(\r
+ IN ucep_t* const p_cep )\r
+{\r
+ cl_spinlock_destroy( &p_cep->lock );\r
+ if( p_cep->pfn_destroy_cb )\r
+ p_cep->pfn_destroy_cb( p_cep->cep.context );\r
+ cl_free( p_cep );\r
+}\r
+\r
+\r
+ib_api_status_t\r
+__create_ucep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ ucep_t *p_cep;\r
+ DWORD bytes_ret;\r
+ ual_create_cep_ioctl_t ioctl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep = cl_zalloc( sizeof(ucep_t) );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate ucep_t\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ cl_spinlock_construct( &p_cep->lock );\r
+\r
+ if( cl_spinlock_init( &p_cep->lock ) != CL_SUCCESS )\r
+ {\r
+ __destroy_ucep( p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to initialize event.\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ /* Initialize to two - one for the CEP, and one for the IOCTL. */\r
+ p_cep->ref_cnt = 2;\r
+\r
+ /* Store user parameters. */\r
+ p_cep->pfn_cb = pfn_cb;\r
+ p_cep->cep.context = context;\r
+\r
+ /* Create a kernel CEP only if we don't already have a CID. */\r
+ if( cid == AL_INVALID_CID )\r
+ {\r
+ if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, NULL, 0,\r
+ &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ __destroy_ucep( p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CREATE_CEP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status != IB_SUCCESS )\r
+ {\r
+ __destroy_ucep( p_cep );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("UAL_CREATE_CEP IOCTL returned %s\n",\r
+ ib_get_err_str( ioctl.status )) );\r
+ return ioctl.status;\r
+ }\r
+\r
+ p_cep->cep.cid = ioctl.cid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->cep.cid = cid;\r
+ }\r
+\r
+ /* Track the CEP before we issue any further IOCTLs on it. */\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ cl_ptr_vector_set_min_size( &gp_cep_mgr->cep_vector, p_cep->cep.cid + 1 );\r
+ CL_ASSERT( !cl_ptr_vector_get( &gp_cep_mgr->cep_vector, p_cep->cep.cid ) );\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, p_cep );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ /* Now issue a poll request. This request is async. */\r
+ if( DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT,\r
+ &p_cep->cep.cid, sizeof(p_cep->cep.cid),\r
+ NULL, 0, NULL, &p_cep->ov ) ||\r
+ GetLastError() != ERROR_IO_PENDING )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR, ("Failed to issue CEP poll IOCTL.\n") );\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, NULL );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid,\r
+ sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL );\r
+\r
+ __destroy_ucep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ p_cep->h_al = h_al;\r
+\r
+ /* Track the CEP in its owning AL instance. */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ if( p_cid )\r
+ *p_cid = p_cep->cep.cid;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = __create_ucep( h_al, AL_INVALID_CID, pfn_cb, context, p_cid );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Note that destroy_cep is synchronous. It does however handle the case\r
+ * where a user calls it from a callback context.\r
+ */\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
+{\r
+ ucep_t *p_cep;\r
+ DWORD bytes_ret;\r
+ int32_t ref_cnt;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_al );\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ if( cid < cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) )\r
+ {\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ if( p_cep && p_cep->h_al == h_al )\r
+ cl_ptr_vector_set( &gp_cep_mgr->cep_vector, cid, NULL );\r
+ else\r
+ p_cep = NULL;\r
+ }\r
+ else\r
+ {\r
+ p_cep = NULL;\r
+ }\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ if( !p_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ p_cep->pfn_destroy_cb = pfn_destroy_cb;\r
+\r
+ /*\r
+ * Remove from the AL instance. Note that once removed, all\r
+ * callbacks for an item will stop.\r
+ */\r
+ cl_spinlock_acquire( &h_al->obj.lock );\r
+ cl_qlist_remove_item( &h_al->cep_list, &p_cep->al_item );\r
+ cl_spinlock_release( &h_al->obj.lock );\r
+\r
+ /*\r
+ * Decrement the reference count so that we stop issuing IOCTLs. Note\r
+ * that we must serialize with the IOCTL completion handler to close a\r
+ * race where the IOCTL completion handler could issue the next IOCTL\r
+ * and have its request be passed by this one.\r
+ */\r
+ cl_spinlock_acquire( &p_cep->lock );\r
+ ref_cnt = cl_atomic_dec( &p_cep->ref_cnt );\r
+\r
+ /* Destroy the kernel CEP right away. */\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid,\r
+ sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL );\r
+ cl_spinlock_release( &p_cep->lock );\r
+\r
+ if( !ref_cnt )\r
+ {\r
+ /* We have no remaining refrences. */\r
+ __destroy_ucep( p_cep );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info )\r
+{\r
+ ual_cep_listen_ioctl_t ioctl;\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_listen_info )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cep_listen = *p_listen_info;\r
+ if( p_listen_info->p_cmp_buf )\r
+ {\r
+ if( p_listen_info->cmp_len > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("Listen compare data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.compare, p_listen_info->p_cmp_buf,\r
+ p_listen_info->cmp_len );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_LISTEN, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ual_cep_listen IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ual_cep_req_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_req )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !p_init )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_req = *p_cm_req;\r
+ ioctl.in.cm_req.h_qp = (ib_qp_handle_t)p_cm_req->h_qp->obj.hdl;\r
+ ioctl.in.paths[0] = *(p_cm_req->p_primary_path);\r
+ if( p_cm_req->p_alt_path )\r
+ ioctl.in.paths[1] = *(p_cm_req->p_alt_path);\r
+ /* Copy private data, if any. */\r
+ if( p_cm_req->p_req_pdata )\r
+ {\r
+ if( p_cm_req->req_length > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.pdata, p_cm_req->p_req_pdata,\r
+ p_cm_req->req_length );\r
+ }\r
+\r
+ /* Copy compare data, if any. */\r
+ if( p_cm_req->p_compare_buffer )\r
+ {\r
+ if( p_cm_req->compare_length > IB_REQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("REQ compare data larger than REQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.compare, p_cm_req->p_compare_buffer,\r
+ p_cm_req->compare_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REQ, &ioctl,\r
+ sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_init = ioctl.out.init;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REQ, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init )\r
+{\r
+ ucep_t *p_cep;\r
+ ual_cep_rep_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_rep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !p_init )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Store the context for the CEP. */\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ if( !p_cep )\r
+ {\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+ p_cep->cep.context = context;\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_rep = *p_cm_rep;\r
+ ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)p_cm_rep->h_qp->obj.hdl;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_rep->p_rep_pdata )\r
+ {\r
+ if( p_cm_rep->rep_length > IB_REP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.in.pdata, p_cm_rep->p_rep_pdata,\r
+ p_cm_rep->rep_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REP, &ioctl,\r
+ sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_init = ioctl.out.init;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REP, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_REP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr )\r
+{\r
+ ual_cep_get_rtr_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_rtr )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTR, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTR IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_rtr = ioctl.rtr;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts )\r
+{\r
+ ual_cep_get_rts_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_rts )\r
+ {\r
+ AL_EXIT( AL_DBG_ERROR );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTS, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_rts = ioctl.rts;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_rtu_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ /* Copy private data, if any. */\r
+ if( p_pdata )\r
+ {\r
+ if( pdata_len > IB_RTU_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than RTU private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ }\r
+ ioctl.pdata_len = pdata_len;\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_RTU, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_RTU IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_rej_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.rej_status = rej_status;\r
+ if( p_ari )\r
+ {\r
+ if( ari_len > IB_ARI_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REJ ARI data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.ari, p_ari, ari_len );\r
+ ioctl.ari_len = ari_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.ari_len = 0;\r
+ }\r
+ /* Copy private data, if any. */\r
+ if( p_pdata)\r
+ {\r
+ if( pdata_len > IB_REJ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than REJ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ ioctl.pdata_len = pdata_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.pdata_len = 0;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_mra_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_mra )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_mra = *p_cm_mra;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_mra->p_mra_pdata )\r
+ {\r
+ if( p_cm_mra->mra_length > IB_MRA_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than MRA private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_mra->p_mra_pdata, p_cm_mra->mra_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_MRA, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_MRA IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_lap_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_lap )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_lap->p_alt_path )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_lap = *p_cm_lap;\r
+ ioctl.cm_lap.h_qp = (ib_qp_handle_t)p_cm_lap->h_qp->obj.hdl;\r
+ ioctl.alt_path = *(p_cm_lap->p_alt_path);\r
+ /* Copy private data, if any. */\r
+ if( p_cm_lap->p_lap_pdata )\r
+ {\r
+ if( p_cm_lap->lap_length > IB_LAP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than LAP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_lap->p_lap_pdata, p_cm_lap->lap_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_LAP, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_LAP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr )\r
+{\r
+ ual_cep_apr_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_apr || !p_apr )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ioctl.in.cid = cid;\r
+ ioctl.in.cm_apr = *p_cm_apr;\r
+ ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)p_cm_apr->h_qp->obj.hdl;\r
+ if( p_cm_apr->p_info )\r
+ {\r
+ if( p_cm_apr->info_length > IB_APR_INFO_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than APR info data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.in.apr_info, p_cm_apr->p_info, p_cm_apr->info_length );\r
+ }\r
+ /* Copy private data, if any. */\r
+ if( p_cm_apr->p_apr_pdata )\r
+ {\r
+ if( p_cm_apr->apr_length > IB_REJ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than APR private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.in.pdata, p_cm_apr->p_apr_pdata, p_cm_apr->apr_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl.in,\r
+ sizeof(ioctl.in), &ioctl.out, sizeof(ioctl.out), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl.out) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+ \r
+ if( ioctl.out.status == IB_SUCCESS )\r
+ *p_apr = ioctl.out.apr;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.out.status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid )\r
+{\r
+ ib_api_status_t status;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_APR, &cid,\r
+ sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_SEND_APR IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata OPTIONAL,\r
+ IN const uint8_t pdata_len )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_dreq_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ /* Copy private data, if any. */\r
+ if( p_pdata )\r
+ {\r
+ if( pdata_len > IB_DREQ_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than DREQ private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy( ioctl.pdata, p_pdata, pdata_len );\r
+ ioctl.pdata_len = pdata_len;\r
+ }\r
+ else\r
+ {\r
+ ioctl.pdata_len = 0;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_DREQ, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREQ IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ ual_cep_drep_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_cm_drep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ioctl.cid = cid;\r
+ ioctl.cm_drep = *p_cm_drep;\r
+ /* Copy private data, if any. */\r
+ if( p_cm_drep->p_drep_pdata )\r
+ {\r
+ if( p_cm_drep->drep_length > IB_DREP_PDATA_SIZE )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("private data larger than DREP private data.\n") );\r
+ return IB_INVALID_SETTING;\r
+ }\r
+\r
+ cl_memcpy(\r
+ ioctl.pdata, p_cm_drep->p_drep_pdata, p_cm_drep->drep_length );\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_DREP, &ioctl,\r
+ sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(status) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us )\r
+{\r
+ ual_cep_get_timewait_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_timewait_us )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_GET_TIMEWAIT, &cid, sizeof(cid),\r
+ &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ *p_timewait_us = ioctl.timewait_us;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+//\r
+//\r
+//ib_api_status_t\r
+//al_cep_migrate(\r
+// IN ib_al_handle_t h_al,\r
+// IN net32_t cid );\r
+//\r
+//\r
+//ib_api_status_t\r
+//al_cep_established(\r
+// IN ib_al_handle_t h_al,\r
+// IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad )\r
+{\r
+ ucep_t *p_cep;\r
+ ib_api_status_t status;\r
+ ual_cep_poll_ioctl_t ioctl;\r
+ DWORD bytes_ret;\r
+ ib_mad_element_t *p_mad;\r
+ ib_grh_t *p_grh;\r
+ ib_mad_t *p_mad_buf;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !h_al )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if( !p_new_cep || !pp_mad )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
+ if( cid > cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) )\r
+ p_cep = NULL;\r
+ else\r
+ p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid );\r
+ cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
+ if( !p_cep )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = ib_get_mad( g_pool_key, MAD_BLOCK_SIZE, &p_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ p_mad_buf = p_mad->p_mad_buf;\r
+ p_grh = p_mad->p_grh;\r
+\r
+ if( !DeviceIoControl( g_al_device, UAL_CEP_POLL, &cid,\r
+ sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ bytes_ret != sizeof(ioctl) )\r
+ {\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) );\r
+ return IB_ERROR;\r
+ }\r
+\r
+ if( ioctl.status == IB_SUCCESS )\r
+ {\r
+ if( ioctl.new_cep.cid != AL_INVALID_CID )\r
+ {\r
+ /* Need to create a new CEP for user-mode. */\r
+ status = __create_ucep( p_cep->h_al, ioctl.new_cep.cid,\r
+ p_cep->pfn_cb, ioctl.new_cep.context, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ DeviceIoControl( g_al_device, UAL_DESTROY_CEP,\r
+ &ioctl.new_cep.cid, sizeof(ioctl.new_cep.cid),\r
+ NULL, 0, &bytes_ret, NULL );\r
+ goto err;\r
+ }\r
+ }\r
+\r
+ /* Copy the MAD payload as it's all that's used. */\r
+ *p_mad = ioctl.element;\r
+ p_mad->p_grh = p_grh;\r
+ if( p_mad->grh_valid )\r
+ cl_memcpy( p_mad->p_grh, &ioctl.grh, sizeof(ib_grh_t) );\r
+ p_mad->p_mad_buf = p_mad_buf;\r
+ \r
+ cl_memcpy( p_mad->p_mad_buf, ioctl.mad_buf, MAD_BLOCK_SIZE );\r
+\r
+ *p_new_cep = ioctl.new_cep;\r
+ *pp_mad = p_mad;\r
+ }\r
+ else\r
+ {\r
+err:\r
+ ib_put_mad( p_mad );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return ioctl.status;\r
+}\r
+\r
+\r
+/* Callback to process CM events */\r
+void\r
+cm_cb(\r
+ IN DWORD error_code,\r
+ IN DWORD ret_bytes,\r
+ IN LPOVERLAPPED p_ov )\r
+{\r
+ ucep_t *p_cep;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* The UAL_CEP_GET_EVENT IOCTL does not have any output data. */\r
+ UNUSED_PARAM( ret_bytes );\r
+\r
+ p_cep = PARENT_STRUCT( p_ov, ucep_t, ov );\r
+\r
+ cl_atomic_inc( &p_cep->ref_cnt );\r
+ if( !error_code )\r
+ p_cep->pfn_cb( p_cep->h_al, &p_cep->cep );\r
+\r
+ /* Lock against destruction. */\r
+ cl_spinlock_acquire( &p_cep->lock );\r
+\r
+ /*\r
+ * Under normal circumstances, the reference count here will be 3 -\r
+ * one to indicate the object is alive and well, a second for this IOCTL,\r
+ * and a third that we just took for the callback.\r
+ *\r
+ * If a user tries to destroy the CEP, it will have decremented the count\r
+ * by one. Thus, if when we release the callback reference we reach 1,\r
+ * we know to cleanup.\r
+ */\r
+ if( cl_atomic_dec( &p_cep->ref_cnt ) == 1 )\r
+ {\r
+ /* The CEP needs to be freed. */\r
+ cl_spinlock_release( &p_cep->lock );\r
+ __destroy_ucep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( !error_code )\r
+ {\r
+ if( !DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT,\r
+ &p_cep->cep.cid, sizeof(p_cep->cep.cid), NULL, 0,\r
+ NULL, &p_cep->ov ) && GetLastError() == ERROR_IO_PENDING )\r
+ {\r
+ cl_spinlock_release( &p_cep->lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("DeviceIoControl for CEP callback request returned %d.\n",\r
+ GetLastError()) );\r
+ }\r
+ else\r
+ {\r
+ AL_TRACE( AL_DBG_WARN,\r
+ ("UAL_CEP_GET_EVENT IOCTL returned ERROR_OPERATION_ABORTED.\n") );\r
+ }\r
+\r
+ /*\r
+ * We failed to issue the next request or the previous request was\r
+ * cancelled. Release the reference held by the previous IOCTL and exit.\r
+ */\r
+ cl_atomic_dec( &p_cep->ref_cnt );\r
+ cl_spinlock_release( &p_cep->lock );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
#include "al_cq.h"\r
#include "ual_ca.h"\r
#include "ual_qp.h"\r
-#include "ual_cm.h"\r
#include "ual_mad.h"\r
#include "ib_common.h"\r
+#include "al_cm_cep.h"\r
\r
\r
/* Global AL manager handle is defined in al_mgr_shared.c */\r
__cb_thread_routine(\r
IN void *context );\r
\r
-static void\r
-__process_cm_cb(\r
- IN cm_cb_ioctl_info_t* p_cm_cb_info);\r
+//static void\r
+//__process_cm_cb(\r
+// IN cm_cb_ioctl_info_t* p_cm_cb_info);\r
\r
static void\r
__process_misc_cb(\r
gp_al_mgr->ual_mgr.exit_thread = TRUE;\r
\r
/* Closing the file handles cancels any pending I/O requests. */\r
- CloseHandle( gp_al_mgr->ual_mgr.h_cm_file );\r
+ //CloseHandle( gp_al_mgr->ual_mgr.h_cm_file );\r
CloseHandle( gp_al_mgr->ual_mgr.h_cq_file );\r
CloseHandle( gp_al_mgr->ual_mgr.h_misc_file );\r
CloseHandle( g_al_device );\r
}\r
\r
/* Create CM callback file handle. */\r
- gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM );\r
- if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
- GetLastError()) );\r
- return IB_ERROR;\r
- }\r
+ //gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM );\r
+ //if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
+ //{\r
+ // gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
+ // AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ // ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
+ // GetLastError()) );\r
+ // return IB_ERROR;\r
+ //}\r
\r
/* Create the CQ completion callback file handle. */\r
gp_al_mgr->ual_mgr.h_cq_file = ual_create_async_file( UAL_BIND_CQ );\r
return ib_status;\r
}\r
\r
+ /* Initialize CM */\r
+ ib_status = create_cep_mgr( &gp_al_mgr->obj );\r
+ if( ib_status != IB_SUCCESS )\r
+ {\r
+ gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("create_cm_mgr failed, status = 0x%x.\n", ib_status) );\r
+ return ib_status;\r
+ }\r
+\r
cl_status = cl_event_init( &gp_al_mgr->ual_mgr.sync_event, FALSE );\r
if( cl_status != CL_SUCCESS )\r
{\r
}\r
}\r
\r
- if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
- NULL, 0,\r
- &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
- NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
- {\r
- if( GetLastError() != ERROR_IO_PENDING )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("DeviceIoControl for CM callback request returned %d.\n",\r
- GetLastError()) );\r
- gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
- return IB_ERROR;\r
- }\r
- }\r
+ //if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
+ // NULL, 0,\r
+ // &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
+ // NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
+ //{\r
+ // if( GetLastError() != ERROR_IO_PENDING )\r
+ // {\r
+ // AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ // ("DeviceIoControl for CM callback request returned %d.\n",\r
+ // GetLastError()) );\r
+ // gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
+ // return IB_ERROR;\r
+ // }\r
+ //}\r
\r
if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO,\r
NULL, 0,\r
/*\r
* UAL thread start routines.\r
*/\r
-\r
-\r
-/* Thread to process the asynchronous CM notifications */\r
-void\r
-cm_cb(\r
- IN DWORD error_code,\r
- IN DWORD ret_bytes,\r
- IN LPOVERLAPPED p_ov )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_ov );\r
-\r
- if( !error_code && ret_bytes )\r
- {\r
- /* Check the record type and adjust the pointers */\r
- /* TBD */\r
- __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info );\r
- }\r
- \r
- if( error_code != ERROR_OPERATION_ABORTED )\r
- {\r
- if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
- NULL, 0,\r
- &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
- NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
- {\r
- if( GetLastError() != ERROR_IO_PENDING )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("DeviceIoControl for CM callback request returned %d.\n",\r
- GetLastError()) );\r
- }\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-static void\r
-__process_cm_cb(\r
- IN cm_cb_ioctl_info_t* p_cm_cb_info)\r
-{\r
- switch( p_cm_cb_info->rec_type)\r
- {\r
- case CM_REQ_REC:\r
- {\r
- struct _cm_req_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec;\r
-\r
- if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
- {\r
- p_ioctl_rec->req_rec.p_req_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
- }\r
- else\r
- {\r
- p_ioctl_rec->req_rec.p_req_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
- }\r
- ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr,\r
- &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms );\r
- break;\r
- }\r
- case CM_REP_REC:\r
- {\r
- struct _cm_rep_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec;\r
-\r
- if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
- {\r
- p_ioctl_rec->rep_rec.p_rep_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
- }\r
- else\r
- {\r
- p_ioctl_rec->rep_rec.p_rep_pdata =\r
- (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
- }\r
- ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr,\r
- &p_ioctl_rec->qp_mod_rts );\r
- break;\r
- }\r
- case CM_RTU_REC:\r
- {\r
- struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata;\r
- ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec );\r
- break;\r
- }\r
- case CM_REJ_REC:\r
- {\r
- struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->rej_rec.p_rej_pdata = \r
- (uint8_t*)&p_ioctl_rec->rej_pdata;\r
- p_ioctl_rec->rej_rec.p_ari =\r
- (uint8_t*)&p_ioctl_rec->ari_pdata;\r
- ual_cm_rej_cb( &p_ioctl_rec->rej_rec );\r
- break;\r
- }\r
- case CM_MRA_REC:\r
- {\r
- struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->mra_rec.p_mra_pdata =\r
- (uint8_t*)&p_ioctl_rec->mra_pdata;\r
- ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec );\r
- break;\r
- }\r
- case CM_LAP_REC:\r
- {\r
- struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->lap_rec.p_lap_pdata =\r
- (uint8_t *)&p_ioctl_rec->lap_pdata;\r
- ual_cm_lap_cb( &p_ioctl_rec->lap_rec );\r
- break;\r
- }\r
- case CM_APR_REC:\r
- {\r
- struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->apr_rec.p_apr_pdata =\r
- (uint8_t*)&p_ioctl_rec->apr_pdata;\r
- p_ioctl_rec->apr_rec.p_info =\r
- (uint8_t*)&p_ioctl_rec->apr_info;\r
- ual_cm_apr_cb( &p_ioctl_rec->apr_rec );\r
- break;\r
- }\r
- case CM_DREQ_REC:\r
- {\r
- struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->dreq_rec.p_dreq_pdata =\r
- (uint8_t*)&p_ioctl_rec->dreq_pdata;\r
- ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec );\r
- break;\r
- }\r
- case CM_DREP_REC:\r
- {\r
- struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
- &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec;\r
-\r
- p_ioctl_rec->drep_rec.p_drep_pdata =\r
- (uint8_t*)&p_ioctl_rec->drep_pdata;\r
- ual_cm_drep_cb( &p_ioctl_rec->drep_rec );\r
- break;\r
- }\r
- default:\r
- /* Unknown record type - just return */\r
- break;\r
- }\r
-}\r
-\r
-\r
-\r
+//\r
+//\r
+///* Thread to process the asynchronous CM notifications */\r
+//void\r
+//cm_cb(\r
+// IN DWORD error_code,\r
+// IN DWORD ret_bytes,\r
+// IN LPOVERLAPPED p_ov )\r
+//{\r
+// AL_ENTER( AL_DBG_CM );\r
+//\r
+// UNUSED_PARAM( p_ov );\r
+//\r
+// if( !error_code && ret_bytes )\r
+// {\r
+// /* Check the record type and adjust the pointers */\r
+// /* TBD */\r
+// __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info );\r
+// }\r
+// \r
+// if( error_code != ERROR_OPERATION_ABORTED )\r
+// {\r
+// if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
+// NULL, 0,\r
+// &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
+// NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
+// {\r
+// if( GetLastError() != ERROR_IO_PENDING )\r
+// {\r
+// AL_TRACE_EXIT( AL_DBG_ERROR,\r
+// ("DeviceIoControl for CM callback request returned %d.\n",\r
+// GetLastError()) );\r
+// }\r
+// }\r
+// }\r
+//\r
+// AL_EXIT( AL_DBG_CM );\r
+//}\r
+\r
+\r
+\r
+//static void\r
+//__process_cm_cb(\r
+// IN cm_cb_ioctl_info_t* p_cm_cb_info)\r
+//{\r
+// switch( p_cm_cb_info->rec_type)\r
+// {\r
+// case CM_REQ_REC:\r
+// {\r
+// struct _cm_req_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec;\r
+//\r
+// if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
+// {\r
+// p_ioctl_rec->req_rec.p_req_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
+// }\r
+// else\r
+// {\r
+// p_ioctl_rec->req_rec.p_req_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
+// }\r
+// ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr,\r
+// &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms );\r
+// break;\r
+// }\r
+// case CM_REP_REC:\r
+// {\r
+// struct _cm_rep_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec;\r
+//\r
+// if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
+// {\r
+// p_ioctl_rec->rep_rec.p_rep_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
+// }\r
+// else\r
+// {\r
+// p_ioctl_rec->rep_rec.p_rep_pdata =\r
+// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
+// }\r
+// ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr,\r
+// &p_ioctl_rec->qp_mod_rts );\r
+// break;\r
+// }\r
+// case CM_RTU_REC:\r
+// {\r
+// struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata;\r
+// ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec );\r
+// break;\r
+// }\r
+// case CM_REJ_REC:\r
+// {\r
+// struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->rej_rec.p_rej_pdata = \r
+// (uint8_t*)&p_ioctl_rec->rej_pdata;\r
+// p_ioctl_rec->rej_rec.p_ari =\r
+// (uint8_t*)&p_ioctl_rec->ari_pdata;\r
+// ual_cm_rej_cb( &p_ioctl_rec->rej_rec );\r
+// break;\r
+// }\r
+// case CM_MRA_REC:\r
+// {\r
+// struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->mra_rec.p_mra_pdata =\r
+// (uint8_t*)&p_ioctl_rec->mra_pdata;\r
+// ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec );\r
+// break;\r
+// }\r
+// case CM_LAP_REC:\r
+// {\r
+// struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->lap_rec.p_lap_pdata =\r
+// (uint8_t *)&p_ioctl_rec->lap_pdata;\r
+// ual_cm_lap_cb( &p_ioctl_rec->lap_rec );\r
+// break;\r
+// }\r
+// case CM_APR_REC:\r
+// {\r
+// struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->apr_rec.p_apr_pdata =\r
+// (uint8_t*)&p_ioctl_rec->apr_pdata;\r
+// p_ioctl_rec->apr_rec.p_info =\r
+// (uint8_t*)&p_ioctl_rec->apr_info;\r
+// ual_cm_apr_cb( &p_ioctl_rec->apr_rec );\r
+// break;\r
+// }\r
+// case CM_DREQ_REC:\r
+// {\r
+// struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->dreq_rec.p_dreq_pdata =\r
+// (uint8_t*)&p_ioctl_rec->dreq_pdata;\r
+// ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec );\r
+// break;\r
+// }\r
+// case CM_DREP_REC:\r
+// {\r
+// struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
+// &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec;\r
+//\r
+// p_ioctl_rec->drep_rec.p_drep_pdata =\r
+// (uint8_t*)&p_ioctl_rec->drep_pdata;\r
+// ual_cm_drep_cb( &p_ioctl_rec->drep_rec );\r
+// break;\r
+// }\r
+// default:\r
+// /* Unknown record type - just return */\r
+// break;\r
+// }\r
+//}\r
+//\r
+//\r
+//\r
static void\r
__process_comp_cb(\r
IN comp_cb_ioctl_info_t* p_comp_cb_info )\r
cl_qlist_init( &h_al->mad_list );\r
cl_qlist_init( &h_al->key_list );\r
cl_qlist_init( &h_al->query_list );\r
- cl_qlist_init( &h_al->conn_list );\r
+ cl_qlist_init( &h_al->cep_list );\r
\r
if( cl_spinlock_init( &h_al->mad_lock ) != CL_SUCCESS )\r
{\r
switch( key )\r
{\r
case UAL_BIND_CM:\r
+ //DebugBreak();\r
/* CM callback. */\r
cm_cb( err, ret_bytes, p_ov );\r
break;\r
HANDLE h_cb_port;\r
\r
/* File to handle CM related notifications */\r
- HANDLE h_cm_file;\r
- cm_cb_ioctl_info_t cm_cb_info;\r
- OVERLAPPED cm_ov;\r
+ //HANDLE h_cm_file;\r
+ //cm_cb_ioctl_info_t cm_cb_info;\r
+ //OVERLAPPED cm_ov;\r
\r
/* Thread to handle work request completions */\r
HANDLE h_cq_file;\r
return status;\r
}\r
\r
- /* Create a file object on which to issue all SA requests. */\r
+ /* Create a file object on which to issue all PNP requests. */\r
gp_pnp->h_file = ual_create_async_file( UAL_BIND_PNP );\r
if( gp_pnp->h_file == INVALID_HANDLE_VALUE )\r
{\r
*****************************************************************************/\r
\r
\r
+#include <complib/cl_rbmap.h>\r
#include <complib/cl_qmap.h>\r
#include <complib/cl_map.h>\r
#include <complib/cl_fleximap.h>\r
#include <complib/cl_memory.h>\r
\r
\r
+/******************************************************************************\r
+*******************************************************************************\r
+************** ************\r
+************** IMPLEMENTATION OF RB MAP ************\r
+************** ************\r
+*******************************************************************************\r
+******************************************************************************/\r
+\r
+\r
+/*\r
+ * Returns whether a given item is on the left of its parent.\r
+ */\r
+static boolean_t\r
+__cl_rbmap_is_left_child(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_up );\r
+ CL_ASSERT( p_item->p_up != p_item );\r
+\r
+ return( p_item->p_up->p_left == p_item );\r
+}\r
+\r
+\r
+/*\r
+ * Retrieve the pointer to the parent's pointer to an item.\r
+ */\r
+static cl_rbmap_item_t**\r
+__cl_rbmap_get_parent_ptr_to_item(\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_up );\r
+ CL_ASSERT( p_item->p_up != p_item );\r
+\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ return( &p_item->p_up->p_left );\r
+\r
+ CL_ASSERT( p_item->p_up->p_right == p_item );\r
+ return( &p_item->p_up->p_right );\r
+}\r
+\r
+\r
+/*\r
+ * Rotate a node to the left. This rotation affects the least number of links\r
+ * between nodes and brings the level of C up by one while increasing the depth\r
+ * of A one. Note that the links to/from W, X, Y, and Z are not affected.\r
+ *\r
+ * R R\r
+ * | |\r
+ * A C\r
+ * / \ / \\r
+ * W C A Z\r
+ * / \ / \\r
+ * B Z W B\r
+ * / \ / \\r
+ * X Y X Y\r
+ */\r
+static void\r
+__cl_rbmap_rot_left(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t **pp_root;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_right != &p_map->nil );\r
+\r
+ pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item );\r
+\r
+ /* Point R to C instead of A. */\r
+ *pp_root = p_item->p_right;\r
+ /* Set C's parent to R. */\r
+ (*pp_root)->p_up = p_item->p_up;\r
+\r
+ /* Set A's right to B */\r
+ p_item->p_right = (*pp_root)->p_left;\r
+ /*\r
+ * Set B's parent to A. We trap for B being NIL since the\r
+ * caller may depend on NIL not changing.\r
+ */\r
+ if( (*pp_root)->p_left != &p_map->nil )\r
+ (*pp_root)->p_left->p_up = p_item;\r
+\r
+ /* Set C's left to A. */\r
+ (*pp_root)->p_left = p_item;\r
+ /* Set A's parent to C. */\r
+ p_item->p_up = *pp_root;\r
+}\r
+\r
+\r
+/*\r
+ * Rotate a node to the right. This rotation affects the least number of links\r
+ * between nodes and brings the level of A up by one while increasing the depth\r
+ * of C one. Note that the links to/from W, X, Y, and Z are not affected.\r
+ *\r
+ * R R\r
+ * | |\r
+ * C A\r
+ * / \ / \\r
+ * A Z W C\r
+ * / \ / \\r
+ * W B B Z\r
+ * / \ / \\r
+ * X Y X Y\r
+ */\r
+static void\r
+__cl_rbmap_rot_right(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t **pp_root;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_left != &p_map->nil );\r
+\r
+ /* Point R to A instead of C. */\r
+ pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item );\r
+ (*pp_root) = p_item->p_left;\r
+ /* Set A's parent to R. */\r
+ (*pp_root)->p_up = p_item->p_up;\r
+\r
+ /* Set C's left to B */\r
+ p_item->p_left = (*pp_root)->p_right;\r
+ /*\r
+ * Set B's parent to C. We trap for B being NIL since the\r
+ * caller may depend on NIL not changing.\r
+ */\r
+ if( (*pp_root)->p_right != &p_map->nil )\r
+ (*pp_root)->p_right->p_up = p_item;\r
+\r
+ /* Set A's right to C. */\r
+ (*pp_root)->p_right = p_item;\r
+ /* Set C's parent to A. */\r
+ p_item->p_up = *pp_root;\r
+}\r
+\r
+\r
+/*\r
+ * Balance a tree starting at a given item back to the root.\r
+ */\r
+static void\r
+__cl_rbmap_ins_bal(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* p_item )\r
+{\r
+ cl_rbmap_item_t* p_grand_uncle;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item != &p_map->root );\r
+\r
+ while( p_item->p_up->color == CL_MAP_RED )\r
+ {\r
+ if( __cl_rbmap_is_left_child( p_item->p_up ) )\r
+ {\r
+ p_grand_uncle = p_item->p_up->p_up->p_right;\r
+ CL_ASSERT( p_grand_uncle );\r
+ if( p_grand_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_grand_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ p_item = p_item->p_up->p_up;\r
+ continue;\r
+ }\r
+\r
+ if( !__cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_item = p_item->p_up;\r
+ __cl_rbmap_rot_left( p_map, p_item );\r
+ }\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up->p_up );\r
+ }\r
+ else\r
+ {\r
+ p_grand_uncle = p_item->p_up->p_up->p_left;\r
+ CL_ASSERT( p_grand_uncle );\r
+ if( p_grand_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_grand_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ p_item = p_item->p_up->p_up;\r
+ continue;\r
+ }\r
+\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_item = p_item->p_up;\r
+ __cl_rbmap_rot_right( p_map, p_item );\r
+ }\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_item->p_up->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up->p_up );\r
+ }\r
+ }\r
+}\r
+\r
+\r
+void\r
+cl_rbmap_insert(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_insert_at,\r
+ IN cl_rbmap_item_t* const p_item,\r
+ IN boolean_t left )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ CL_ASSERT( p_insert_at );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_map->root.p_up == &p_map->root );\r
+ CL_ASSERT( p_map->root.color != CL_MAP_RED );\r
+ CL_ASSERT( p_map->nil.color != CL_MAP_RED );\r
+\r
+ p_item->p_left = &p_map->nil;\r
+ p_item->p_right = &p_map->nil;\r
+ p_item->color = CL_MAP_RED;\r
+\r
+ if( p_insert_at == cl_rbmap_end( p_map ) )\r
+ {\r
+ p_map->root.p_left = p_item;\r
+ p_item->p_up = &p_map->root;\r
+ }\r
+ else\r
+ {\r
+ if( left )\r
+ p_insert_at->p_left = p_item;\r
+ else\r
+ p_insert_at->p_right = p_item;\r
+\r
+ p_item->p_up = p_insert_at;\r
+ }\r
+\r
+ /* Increase the count. */\r
+ p_map->count++;\r
+\r
+ /*\r
+ * We have added depth to this section of the tree.\r
+ * Rebalance as necessary as we retrace our path through the tree\r
+ * and update colors.\r
+ */\r
+ __cl_rbmap_ins_bal( p_map, p_item );\r
+\r
+ cl_rbmap_root( p_map )->color = CL_MAP_BLACK;\r
+\r
+ /*\r
+ * Note that it is not necessary to re-color the nil node black because all\r
+ * red color assignments are made via the p_up pointer, and nil is never\r
+ * set as the value of a p_up pointer.\r
+ */\r
+\r
+#ifdef _DEBUG_\r
+ /* Set the pointer to the map in the map item for consistency checking. */\r
+ p_item->p_map = p_map;\r
+#endif\r
+}\r
+\r
+\r
+static void\r
+__cl_rbmap_del_bal(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* p_item )\r
+{\r
+ cl_rbmap_item_t *p_uncle;\r
+\r
+ while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) )\r
+ {\r
+ if( __cl_rbmap_is_left_child( p_item ) )\r
+ {\r
+ p_uncle = p_item->p_up->p_right;\r
+\r
+ if( p_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up );\r
+ p_uncle = p_item->p_up->p_right;\r
+ }\r
+\r
+ if( p_uncle->p_right->color != CL_MAP_RED )\r
+ {\r
+ if( p_uncle->p_left->color != CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_RED;\r
+ p_item = p_item->p_up;\r
+ continue;\r
+ }\r
+\r
+ p_uncle->p_left->color = CL_MAP_BLACK;\r
+ p_uncle->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_uncle );\r
+ p_uncle = p_item->p_up->p_right;\r
+ }\r
+ p_uncle->color = p_item->p_up->color;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_uncle->p_right->color = CL_MAP_BLACK;\r
+ __cl_rbmap_rot_left( p_map, p_item->p_up );\r
+ break;\r
+ }\r
+ else\r
+ {\r
+ p_uncle = p_item->p_up->p_left;\r
+\r
+ if( p_uncle->color == CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_BLACK;\r
+ p_item->p_up->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up );\r
+ p_uncle = p_item->p_up->p_left;\r
+ }\r
+\r
+ if( p_uncle->p_left->color != CL_MAP_RED )\r
+ {\r
+ if( p_uncle->p_right->color != CL_MAP_RED )\r
+ {\r
+ p_uncle->color = CL_MAP_RED;\r
+ p_item = p_item->p_up;\r
+ continue;\r
+ }\r
+\r
+ p_uncle->p_right->color = CL_MAP_BLACK;\r
+ p_uncle->color = CL_MAP_RED;\r
+ __cl_rbmap_rot_left( p_map, p_uncle );\r
+ p_uncle = p_item->p_up->p_left;\r
+ }\r
+ p_uncle->color = p_item->p_up->color;\r
+ p_item->p_up->color = CL_MAP_BLACK;\r
+ p_uncle->p_left->color = CL_MAP_BLACK;\r
+ __cl_rbmap_rot_right( p_map, p_item->p_up );\r
+ break;\r
+ }\r
+ }\r
+ p_item->color = CL_MAP_BLACK;\r
+}\r
+\r
+\r
+void\r
+cl_rbmap_remove_item(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item )\r
+{\r
+ cl_rbmap_item_t *p_child, *p_del_item;\r
+\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ CL_ASSERT( p_item );\r
+ CL_ASSERT( p_item->p_map == p_map );\r
+\r
+ if( p_item == cl_rbmap_end( p_map ) )\r
+ return;\r
+\r
+ if( p_item->p_right == &p_map->nil )\r
+ {\r
+ /* The item being removed has children on at most its left. */\r
+ p_del_item = p_item;\r
+ p_child = p_del_item->p_left;\r
+ }\r
+ else if( p_item->p_left == &p_map->nil )\r
+ {\r
+ /* The item being removed has children on at most its right. */\r
+ p_del_item = p_item;\r
+ p_child = p_del_item->p_right;\r
+ }\r
+ else\r
+ {\r
+ /*\r
+ * The item being removed has children on both side.\r
+ * We select the item that will replace it. After removing\r
+ * the substitute item and rebalancing, the tree will have the\r
+ * correct topology. Exchanging the substitute for the item\r
+ * will finalize the removal.\r
+ */\r
+ p_del_item = p_item->p_right;\r
+ CL_ASSERT( p_del_item != &p_map->nil );\r
+ while( p_del_item->p_left != &p_map->nil )\r
+ p_del_item = p_del_item->p_left;\r
+ p_child = p_del_item->p_right;\r
+ }\r
+\r
+ /* Decrement the item count. */\r
+ p_map->count--;\r
+\r
+ /*\r
+ * This assignment may modify the parent pointer of the nil node.\r
+ * This is inconsequential.\r
+ */\r
+ p_child->p_up = p_del_item->p_up;\r
+ (*__cl_rbmap_get_parent_ptr_to_item( p_del_item )) = p_child; // 2 right = 5\r
+\r
+ if( p_del_item->color != CL_MAP_RED )\r
+ __cl_rbmap_del_bal( p_map, p_child );\r
+\r
+ /*\r
+ * Note that the splicing done below does not need to occur before\r
+ * the tree is balanced, since the actual topology changes are made by the\r
+ * preceding code. The topology is preserved by the color assignment made\r
+ * below (reader should be reminded that p_del_item == p_item in some cases).\r
+ */\r
+ if( p_del_item != p_item )\r
+ {\r
+ /*\r
+ * Finalize the removal of the specified item by exchanging it with\r
+ * the substitute which we removed above.\r
+ */\r
+ p_del_item->p_up = p_item->p_up;\r
+ p_del_item->p_left = p_item->p_left;\r
+ p_del_item->p_right = p_item->p_right;\r
+ (*__cl_rbmap_get_parent_ptr_to_item( p_item )) = p_del_item;\r
+ p_item->p_right->p_up = p_del_item;\r
+ p_item->p_left->p_up = p_del_item;\r
+ p_del_item->color = p_item->color;\r
+ }\r
+\r
+ CL_ASSERT( p_map->nil.color != CL_MAP_RED );\r
+\r
+#ifdef _DEBUG_\r
+ /* Clear the pointer to the map since the item has been removed. */\r
+ p_item->p_map = NULL;\r
+#endif\r
+}\r
+\r
+\r
/******************************************************************************\r
*******************************************************************************\r
************** ************\r
OUT VAPI_qp_attr_t *qp_attr_p, \r
OUT VAPI_qp_attr_mask_t *attr_mask_p)\r
{\r
- /* VAPI doesn't support modifying the WQE depth ever. */\r
- if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
- modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
- {\r
- return IB_UNSUPPORTED;\r
- }\r
\r
qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
*attr_mask_p = QP_ATTR_QP_STATE;\r
break;\r
\r
case IB_QPS_RTR:\r
+ /* VAPI doesn't support modifying the WQE depth ever. */\r
+ if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
+ {\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
*attr_mask_p |= QP_ATTR_RQ_PSN |\r
QP_ATTR_DEST_QP_NUM |\r
QP_ATTR_QP_OUS_RD_ATOM |\r
break;\r
\r
case IB_QPS_RTS:\r
+ /* VAPI doesn't support modifying the WQE depth ever. */\r
+ if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
+ {\r
+ return IB_UNSUPPORTED;\r
+ }\r
+\r
*attr_mask_p |= QP_ATTR_SQ_PSN |\r
QP_ATTR_RETRY_COUNT |\r
QP_ATTR_RNR_RETRY |\r
MTL_DEBUG4("THH_hob_get_gid_tbl_local: hca_hndl=0x%p, port= %d, return table len = %d\n",\r
hca_hndl, port, tbl_len_in);\r
\r
- if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) {\r
+ if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK && use_mad_query_for_gid_prefix) {\r
MTL_ERROR1("THH_hob_get_gid_tbl: NOT IN TASK CONTEXT)\n");\r
return HH_ERR;\r
}\r
return HH_EINVAL;\r
}\r
\r
- mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
- if ( !mad_frame_in ) {\r
- return HH_EAGAIN;\r
- }\r
- mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
- if ( !mad_frame_out ) {\r
- FREE(mad_frame_in);\r
- return HH_EAGAIN;\r
- }\r
-\r
\r
/* get GID table using MAD commands in THH_cmd object */\r
if (use_mad_query_for_gid_prefix == TRUE) {\r
+\r
+ mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
+ if ( !mad_frame_in ) {\r
+ return HH_EAGAIN;\r
+ }\r
+ mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE);\r
+ if ( !mad_frame_out ) {\r
+ FREE(mad_frame_in);\r
+ return HH_EAGAIN;\r
+ }\r
/* First, get the GID prefix from via MAD query */\r
memset(mad_frame_in, 0, sizeof(mad_frame_in));\r
memset(mad_frame_out, 0, sizeof(mad_frame_out));\r
}\r
}\r
}\r
+ FREE(mad_frame_out);\r
+ FREE(mad_frame_in);\r
} else {\r
memset(&port_info, 0, sizeof(port_info));\r
hh_ret = THH_hob_get_qpm ( thh_hob_p, &qpm );\r
if (hh_ret != HH_OK) {\r
MTL_ERROR2( "THH_hob_get_qpm: invalid QPM handle (ret= %d)\n", hh_ret);\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_EINVAL;\r
}\r
/*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/\r
hh_ret = THH_qpm_get_all_sgids(qpm,port,(u_int8_t)num_guids, param_gid_p);\r
if (hh_ret != HH_OK) {\r
MTL_ERROR2( "THH_qpm_get_all_sgids failed (ret= %d)\n", hh_ret);\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_EINVAL;\r
}\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_OK;\r
}\r
\r
- FREE(mad_frame_out);\r
- FREE(mad_frame_in);\r
return HH_OK;\r
} /* THH_get_gid_tbl */\r
/******************************************************************************\r
#define _CL_QMAP_H_\r
\r
\r
+#include <complib/cl_rbmap.h>\r
#include <complib/cl_qpool.h>\r
\r
\r
*********/\r
\r
\r
-/****i* Component Library: Quick Map/cl_map_color_t\r
-* NAME\r
-* cl_map_color_t\r
-*\r
-* DESCRIPTION\r
-* The cl_map_color_t enumerated type is used to note the color of\r
-* nodes in a map.\r
-*\r
-* SYNOPSIS\r
-*/\r
-typedef enum _cl_map_color\r
-{\r
- CL_MAP_RED,\r
- CL_MAP_BLACK\r
-\r
-} cl_map_color_t;\r
-/*\r
-* VALUES\r
-* CL_MAP_RED\r
-* The node in the map is red.\r
-*\r
-* CL_MAP_BLACK\r
-* The node in the map is black.\r
-*\r
-* SEE ALSO\r
-* Quick Map, cl_map_item_t\r
-*********/\r
-\r
-\r
/****s* Component Library: Quick Map/cl_map_item_t\r
* NAME\r
* cl_map_item_t\r
--- /dev/null
+/*++\r
+Copyright © InfiniCon Systems, Inc. All rights reserved.\r
+\r
+THIS SOFTWARE IS PROVIDED BY INFINICON SYSTEMS, INC. ("INFINICON") TO EACH\r
+PERSON OR COMPANY ("RECIPIENT") ON AN "AS IS" BASIS. ANY EXPRESS OR IMPLIED\r
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\r
+IN NO EVENT SHALL INFINICON BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\r
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED OR ON ANY THEORY OF LIABILITY,\r
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\r
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\r
+OF THE POSSIBILITY OF SUCH DAMAGE.\r
+\r
+Any agreements between InfiniCon and the Recipient shall apply to Recipient's\r
+use of the Software.\r
+--*/\r
+\r
+\r
+/*\r
+ * Abstract:\r
+ * Declaration of primitive red/black map, a red/black tree where the caller\r
+ * always provides all necessary storage.\r
+ *\r
+ * This tree implementation exposes functions required for the client to\r
+ * manually walk the map, allowing clients to implement various methods\r
+ * of comparisson.\r
+ *\r
+ * Environment:\r
+ * All\r
+ *\r
+ * $Revision$\r
+ */\r
+\r
+\r
+#ifndef _CL_RBMAP_H_\r
+#define _CL_RBMAP_H_\r
+\r
+\r
+#include <complib/cl_types.h>\r
+\r
+\r
+/****h* Component Library/RB Map\r
+* NAME\r
+* RB Map\r
+*\r
+* DESCRIPTION\r
+* RB map implements a binary tree that stores user provided cl_rbmap_item_t\r
+* structures. Each item stored in a RB map has a unique key\r
+* (duplicates are not allowed). RB map provides the ability to\r
+* efficiently search for an item given a key.\r
+*\r
+* RB map does not allocate any memory, and can therefore not fail\r
+* any operations due to insufficient memory. RB map can thus be useful\r
+* in minimizing the error paths in code.\r
+*\r
+* RB map is not thread safe, and users must provide serialization when\r
+* adding and removing items from the map.\r
+*\r
+* The RB map functions operate on a cl_rbmap_t structure which should be\r
+* treated as opaque and should be manipulated only through the provided\r
+* functions.\r
+*\r
+* SEE ALSO\r
+* Structures:\r
+* cl_rbmap_t, cl_rbmap_item_t\r
+*\r
+* Initialization:\r
+* cl_rbmap_init\r
+*\r
+* Iteration:\r
+* cl_rbmap_root, cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*\r
+* Manipulation:\r
+* cl_rbmap_insert, cl_rbmap_get, cl_rbmap_remove_item, cl_rbmap_remove,\r
+* cl_rbmap_reset, cl_rbmap_merge, cl_rbmap_delta\r
+*\r
+* Search:\r
+* cl_rbmap_apply_func\r
+*\r
+* Attributes:\r
+* cl_rbmap_count, cl_is_rbmap_empty,\r
+*********/\r
+\r
+\r
+/****i* Component Library: RB Map/cl_map_color_t\r
+* NAME\r
+* cl_map_color_t\r
+*\r
+* DESCRIPTION\r
+* The cl_map_color_t enumerated type is used to note the color of\r
+* nodes in a map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef enum _cl_map_color\r
+{\r
+ CL_MAP_RED,\r
+ CL_MAP_BLACK\r
+\r
+} cl_map_color_t;\r
+/*\r
+* VALUES\r
+* CL_MAP_RED\r
+* The node in the map is red.\r
+*\r
+* CL_MAP_BLACK\r
+* The node in the map is black.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****s* Component Library: RB Map/cl_rbmap_item_t\r
+* NAME\r
+* cl_rbmap_item_t\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_item_t structure is used by maps to store objects.\r
+*\r
+* The cl_rbmap_item_t structure should be treated as opaque and should\r
+* be manipulated only through the provided functions.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _cl_rbmap_item\r
+{\r
+ struct _cl_rbmap_item *p_left;\r
+ struct _cl_rbmap_item *p_right;\r
+ struct _cl_rbmap_item *p_up;\r
+ cl_map_color_t color;\r
+#ifdef _DEBUG_\r
+ struct _cl_rbmap *p_map;\r
+#endif\r
+\r
+} cl_rbmap_item_t;\r
+/*\r
+* FIELDS\r
+* p_left\r
+* Pointer to the map item that is a child to the left of the node.\r
+*\r
+* p_right\r
+* Pointer to the map item that is a child to the right of the node.\r
+*\r
+* p_up\r
+* Pointer to the map item that is the parent of the node.\r
+*\r
+* color\r
+* Indicates whether a node is red or black in the map.\r
+*\r
+* NOTES\r
+* None of the fields of this structure should be manipulated by users, as\r
+* they are crititcal to the proper operation of the map in which they\r
+* are stored.\r
+*\r
+* To allow storing items in either a quick list, a quick pool, or a quick\r
+* map, the map implementation guarantees that the map item can be safely\r
+* cast to a pool item used for storing an object in a quick pool, or cast to\r
+* a list item used for storing an object in a quick list. This removes the\r
+* need to embed a map item, a list item, and a pool item in objects that need\r
+* to be stored in a quick list, a quick pool, and a RB map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_insert, cl_rbmap_key, cl_pool_item_t, cl_list_item_t\r
+*********/\r
+\r
+\r
+/****s* Component Library: RB Map/cl_rbmap_t\r
+* NAME\r
+* cl_rbmap_t\r
+*\r
+* DESCRIPTION\r
+* Quick map structure.\r
+*\r
+* The cl_rbmap_t structure should be treated as opaque and should\r
+* be manipulated only through the provided functions.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _cl_rbmap\r
+{\r
+ cl_rbmap_item_t root;\r
+ cl_rbmap_item_t nil;\r
+ cl_state_t state;\r
+ size_t count;\r
+\r
+} cl_rbmap_t;\r
+/*\r
+* PARAMETERS\r
+* root\r
+* Map item that serves as root of the map. The root is set up to\r
+* always have itself as parent. The left pointer is set to point to\r
+* the item at the root.\r
+*\r
+* nil\r
+* Map item that serves as terminator for all leaves, as well as providing\r
+* the list item used as quick list for storing map items in a list for\r
+* faster traversal.\r
+*\r
+* state\r
+* State of the map, used to verify that operations are permitted.\r
+*\r
+* count\r
+* Number of items in the map.\r
+*\r
+* SEE ALSO\r
+* RB Map\r
+*********/\r
+\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_count\r
+* NAME\r
+* cl_rbmap_count\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_count function returns the number of items stored\r
+* in a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE size_t CL_API\r
+cl_rbmap_count(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ return( p_map->count );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose item count to return.\r
+*\r
+* RETURN VALUE\r
+* Returns the number of items stored in the map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_is_rbmap_empty\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_is_rbmap_empty\r
+* NAME\r
+* cl_is_rbmap_empty\r
+*\r
+* DESCRIPTION\r
+* The cl_is_rbmap_empty function returns whether a RB map is empty.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE boolean_t CL_API\r
+cl_is_rbmap_empty(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+\r
+ return( p_map->count == 0 );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to test for emptiness.\r
+*\r
+* RETURN VALUES\r
+* TRUE if the RB map is empty.\r
+*\r
+* FALSE otherwise.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_count, cl_rbmap_reset\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_reset\r
+* NAME\r
+* cl_rbmap_reset\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_reset function removes all items in a RB map,\r
+* leaving it empty.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE void CL_API\r
+cl_rbmap_reset(\r
+ IN cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+\r
+ p_map->root.p_left = &p_map->nil;\r
+ p_map->count = 0;\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to empty.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_remove_item\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_init\r
+* NAME\r
+* cl_rbmap_init\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_init function initialized a RB map for use.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE void CL_API\r
+cl_rbmap_init(\r
+ IN cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+\r
+ /* special setup for the root node */\r
+ p_map->root.p_left = &p_map->nil;\r
+ p_map->root.p_right = &p_map->nil;\r
+ p_map->root.p_up = &p_map->root;\r
+ p_map->root.color = CL_MAP_BLACK;\r
+\r
+ /* Setup the node used as terminator for all leaves. */\r
+ p_map->nil.p_left = &p_map->nil;\r
+ p_map->nil.p_right = &p_map->nil;\r
+ p_map->nil.p_up = &p_map->nil;\r
+ p_map->nil.color = CL_MAP_BLACK;\r
+\r
+#ifdef _DEBUG_\r
+ p_map->root.p_map = p_map;\r
+ p_map->nil.p_map = p_map;\r
+#endif\r
+\r
+ p_map->state = CL_INITIALIZED;\r
+\r
+ p_map->count = 0;\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure to initialize.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* NOTES\r
+* Allows calling RB map manipulation functions.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_insert, cl_rbmap_remove\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_root\r
+* NAME\r
+* cl_rbmap_root\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_root function returns the root of a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* const CL_API\r
+cl_rbmap_root(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ return( p_map->root.p_left );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose root to return.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the end of the map.\r
+*\r
+* NOTES\r
+* cl_rbmap_end is useful for determining the validity of map items returned\r
+* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map\r
+* item pointer returned by any of these functions compares to the end, the\r
+* end of the map was encoutered.\r
+* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that\r
+* the map is empty.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev,\r
+* cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_end\r
+* NAME\r
+* cl_rbmap_end\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_end function returns the end of a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE const cl_rbmap_item_t* const CL_API\r
+cl_rbmap_end(\r
+ IN const cl_rbmap_t* const p_map )\r
+{\r
+ CL_ASSERT( p_map );\r
+ CL_ASSERT( p_map->state == CL_INITIALIZED );\r
+ /* Nil is the end of the map. */\r
+ return( &p_map->nil );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure whose end to return.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the end of the map.\r
+*\r
+* NOTES\r
+* cl_rbmap_end is useful for determining the validity of map items returned\r
+* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map\r
+* item pointer returned by any of these functions compares to the end, the\r
+* end of the map was encoutered.\r
+* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that\r
+* the map is empty.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev\r
+* cl_rbmap_root, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_left\r
+* NAME\r
+* cl_rbmap_left\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_left function returns the map item to the left\r
+* of the specified map item.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* CL_API\r
+cl_rbmap_left(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ return( (cl_rbmap_item_t*)p_item->p_left );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item whose predecessor to return.\r
+*\r
+* RETURN VALUES\r
+* Pointer to the map item to the left in a RB map.\r
+*\r
+* Pointer to the map end if no item is to the left.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,\r
+* cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_right\r
+* NAME\r
+* cl_rbmap_right\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_right function returns the map item to the right\r
+* of the specified map item.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_INLINE cl_rbmap_item_t* CL_API\r
+cl_rbmap_right(\r
+ IN const cl_rbmap_item_t* const p_item )\r
+{\r
+ CL_ASSERT( p_item );\r
+ return( (cl_rbmap_item_t*)p_item->p_right );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item whose predecessor to return.\r
+*\r
+* RETURN VALUES\r
+* Pointer to the map item to the right in a RB map.\r
+*\r
+* Pointer to the map end if no item is to the right.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,\r
+* cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_insert\r
+* NAME\r
+* cl_rbmap_insert\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_insert function inserts a map item into a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_EXPORT void CL_API\r
+cl_rbmap_insert(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_insert_at,\r
+ IN cl_rbmap_item_t* const p_item,\r
+ IN boolean_t left );\r
+/*\r
+* PARAMETERS\r
+* p_map\r
+* [in] Pointer to a cl_rbmap_t structure into which to add the item.\r
+*\r
+* p_insert_at\r
+* [in] Pointer to a cl_rbmap_item_t structure to serve as parent\r
+* to p_item.\r
+*\r
+* p_item\r
+* [in] Pointer to a cl_rbmap_item_t stucture to insert into the RB map.\r
+*\r
+* left\r
+* [in] Indicates that p_item should be inserted to the left of p_insert_at.\r
+*\r
+* RETURN VALUE\r
+* Pointer to the item in the map with the specified key. If insertion\r
+* was successful, this is the pointer to the item. If an item with the\r
+* specified key already exists in the map, the pointer to that item is\r
+* returned.\r
+*\r
+* NOTES\r
+* Insertion operations may cause the RB map to rebalance.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_item_t\r
+*********/\r
+\r
+\r
+/****f* Component Library: RB Map/cl_rbmap_remove_item\r
+* NAME\r
+* cl_rbmap_remove_item\r
+*\r
+* DESCRIPTION\r
+* The cl_rbmap_remove_item function removes the specified map item\r
+* from a RB map.\r
+*\r
+* SYNOPSIS\r
+*/\r
+CL_EXPORT void CL_API\r
+cl_rbmap_remove_item(\r
+ IN cl_rbmap_t* const p_map,\r
+ IN cl_rbmap_item_t* const p_item );\r
+/*\r
+* PARAMETERS\r
+* p_item\r
+* [in] Pointer to a map item to remove from its RB map.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* In a debug build, cl_rbmap_remove_item asserts that the item being removed\r
+* is in the specified map.\r
+*\r
+* NOTES\r
+* Removes the map item pointed to by p_item from its RB map.\r
+*\r
+* SEE ALSO\r
+* RB Map, cl_rbmap_remove, cl_rbmap_reset, cl_rbmap_insert\r
+*********/\r
+\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+\r
+#endif /* _CL_RBMAP_H_ */\r
typedef struct _al_query* __ptr64 ib_query_handle_t;\r
typedef struct _al_sub* __ptr64 ib_sub_handle_t;\r
typedef struct _al_listen* __ptr64 ib_listen_handle_t;\r
-typedef struct _al_conn* __ptr64 ib_cm_handle_t;\r
typedef struct _al_ioc* __ptr64 ib_ioc_handle_t;\r
typedef struct _al_svc_entry* __ptr64 ib_svc_handle_t;\r
typedef struct _al_pool_key* __ptr64 ib_pool_key_t;\r
typedef struct _al_pool* __ptr64 ib_pool_handle_t;\r
\r
\r
+typedef struct _ib_cm_handle\r
+{\r
+ ib_al_handle_t h_al;\r
+ ib_qp_handle_t h_qp;\r
+ net32_t cid;\r
+\r
+} ib_cm_handle_t;\r
+\r
+\r
/****s* Access Layer/ib_shmid_t\r
* NAME\r
* ib_shmid_t\r
{\r
const uint8_t* __ptr64 p_rtu_pdata;\r
\r
- ib_qp_type_t qp_type;\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the ready\r
* to use message.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the connection request.\r
*\r
\r
const uint8_t* __ptr64 p_rej_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the connection\r
* request reply.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with a connection request.\r
*\r
{\r
const uint8_t* __ptr64 p_mra_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* p_mra_pdata\r
* A reference to user-defined private data sent as part of the MRA.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with a connection request.\r
*\r
\r
const uint8_t* __ptr64 p_lap_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
const void* __ptr64 qp_context;\r
\r
} ib_cm_lap_rec_t;\r
* A reference to user-defined private data sent as part of the load\r
* alternate path request.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* qp_context\r
* The queue pair context associated with a connection request.\r
*\r
\r
const uint8_t* __ptr64 p_apr_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the alternate\r
* path response.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the alternate path response.\r
*\r
\r
const uint8_t* __ptr64 p_dreq_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
const void* __ptr64 qp_context;\r
\r
} ib_cm_dreq_rec_t;\r
* A reference to user-defined private data sent as part of the\r
* disconnect request.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* qp_context\r
* The queue pair context associated with the disconnect request.\r
*\r
\r
const uint8_t* __ptr64 p_drep_pdata;\r
\r
- ib_qp_type_t qp_type;\r
-\r
- /* valid for rc, uc & rd qp_type only */\r
ib_qp_handle_t h_qp;\r
const void* __ptr64 qp_context;\r
\r
* A reference to user-defined private data sent as part of the\r
* disconnect reply.\r
*\r
-* qp_type\r
-* Indicates the CM service type.\r
-*\r
* h_qp\r
* The queue pair handle associated with the disconnect reply.\r
*\r
ib_net16_t pkey;\r
\r
uint8_t* __ptr64 p_compare_buffer;\r
- uint32_t compare_offset;\r
- uint32_t compare_length;\r
+ uint8_t compare_offset;\r
+ uint8_t compare_length;\r
\r
ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
\r
ib_qp_type_t qp_type;\r
\r
- /* valid for rc, uc & rd qp_type only */\r
- ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
- ib_pfn_cm_rej_cb_t pfn_cm_rej_cb;\r
-\r
/* valid for ud qp_type only */\r
const void* __ptr64 sidr_context;\r
\r
ib_qp_handle_t h_qp;\r
\r
uint8_t* __ptr64 p_compare_buffer;\r
- uint32_t compare_offset;\r
- uint32_t compare_length;\r
+ uint8_t compare_offset;\r
+ uint8_t compare_length;\r
\r
uint8_t resp_res;\r
uint8_t init_depth;\r
uint8_t rnr_nak_timeout;\r
uint8_t rnr_retry_cnt;\r
\r
+ ib_pfn_cm_rej_cb_t pfn_cm_rej_cb;\r
+ ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb;\r
ib_pfn_cm_lap_cb_t pfn_cm_lap_cb;\r
ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb;\r
*****/\r
\r
\r
+typedef struct _ib_cep\r
+{\r
+ void *context;\r
+ net32_t cid;\r
+\r
+} ib_cep_t;\r
+\r
+\r
+/****s* Access Layer/ib_cep_listen_t\r
+* NAME\r
+* ib_cep_listen_t\r
+*\r
+* DESCRIPTION\r
+* Request to listen for incoming connection attempts.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ib_cep_listen\r
+{\r
+ net64_t svc_id;\r
+\r
+ net64_t port_guid;\r
+\r
+ uint8_t* __ptr64 p_cmp_buf;\r
+ uint8_t cmp_len;\r
+ uint8_t cmp_offset;\r
+\r
+} ib_cep_listen_t;\r
+/*\r
+* FIELDS\r
+* svc_id\r
+* The identifier of the service to register for incoming connection\r
+* requests.\r
+*\r
+* port_guid\r
+* Directs the communication manager to register the listen only\r
+* with the specified port. This should be set to IB_ALL_PORTS\r
+* if the listen is not directed to a particular port.\r
+*\r
+* p_cmp_buf\r
+* An optionally provided buffer that will be used to match incoming\r
+* connection requests with a registered service. Use of this buffer\r
+* permits multiple services to listen on the same service ID as long as\r
+* they provide different compare buffers. Incoming requests will\r
+* be matched against the compare buffer.\r
+*\r
+* cmp_len\r
+* Specifies the size of the compare buffer in bytes. The length must\r
+* be the same for all requests using the same service ID.\r
+*\r
+* cmp_offset\r
+* An offset into the user-defined data area of a connection request\r
+* which contains the start of the data that will be compared against.\r
+* The offset must be the same for all requests using the same service ID.\r
+*\r
+* NOTES\r
+* Users fill out this structure when listening on a service ID with the\r
+* local communication manager. The communication manager will use the given\r
+* service ID and compare buffer to route connection requests to the\r
+* appropriate client. Users may direct listens requests on a particular\r
+* channel adapter, port, or LID.\r
+*****/\r
+\r
+\r
/****f* Access Layer/ib_create_ioc\r
* NAME\r
* ib_create_ioc\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_listen_ioctl_t\r
+/****s* User-mode Access Layer/ual_create_cep_ioctl_t\r
* NAME\r
-* ual_cm_listen_ioctl_t\r
+* ual_create_cep_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters to\r
-* perform a CM listen request.\r
+* IOCTL structure containing the output parameters to\r
+* create a CEP.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_listen_ioctl\r
+typedef struct _ual_create_cep_ioctl\r
{\r
- struct _ual_cm_listen_ioctl_in\r
- {\r
- void* __ptr64 context;\r
- ib_cm_listen_t cm_listen;\r
- /* Compare data buffer follows IOCTL buffer immediately. */\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
\r
- } in;\r
- struct _ual_cm_listen_ioctl_out\r
- {\r
- ib_api_status_t status;\r
- uint64_t h_cm_listen;\r
-\r
- } out;\r
-\r
-} ual_cm_listen_ioctl_t;\r
+} ual_create_cep_ioctl_t;\r
/*\r
* FIELDS\r
-* in.listen_context\r
-* User-specified context information that is returned as a part of all\r
-* connection requests through the pfn_cm_req_cb routine. The context is\r
-* also returned through the error and destroy callbacks.\r
-*\r
-* in.cm_listen\r
-* Information used to direct the listen request to match incoming\r
-* connection requests.\r
-*\r
-* out.status\r
+* status\r
* Status of the operation.\r
*\r
-* out.h_cm_listen\r
-* Upon successful completion of this call, handle to the listen request.\r
-* This handle may be used to cancel the listen operation.\r
+* cid\r
+* CID of the created CEP.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_cancel_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_listen_ioctl_t\r
* NAME\r
-* ual_cm_cancel_ioctl_t\r
+* ual_cep_listen_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_cancel\r
+* IOCTL structure containing the input parameters to\r
+* perform a CM listen request.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_cancel_ioctl\r
+typedef struct _ual_cep_listen_ioctl\r
{\r
- struct _ual_cm_cancel_ioctl_in\r
- {\r
- uint64_t h_cm_listen;\r
+ net32_t cid;\r
+ ib_cep_listen_t cep_listen;\r
+ uint8_t compare[IB_REQ_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_cancel_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_cancel_ioctl_t;\r
+} ual_cep_listen_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm_listen\r
-* The listen handle that needs to be cancelled.\r
+* in.cid\r
+* CID of an existing CEP.\r
*\r
-* out.status\r
-* Status of the operation.\r
+* in.cep_listen\r
+* Information used to direct the listen request to match incoming\r
+* connection requests.\r
*****/\r
\r
\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
-* ib_cm_req\r
+* al_cep_pre_req call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_req_ioctl\r
+typedef union _ual_cep_req_ioctl\r
{\r
- struct _ual_cm_req_ioctl_in\r
+ struct _ual_cep_req_ioctl_in\r
{\r
- uint64_t h_qp; /* for CM */\r
+ net32_t cid;\r
ib_cm_req_t cm_req;\r
- ib_path_rec_t paths[1];\r
- /* If an alternate path is specified, it follows the primary path. */\r
- /* private data follows the IOCTL buffer immediately. */\r
- /* compare data follows private data immediately. */\r
+ ib_path_rec_t paths[2];\r
+ uint8_t pdata[IB_REQ_PDATA_SIZE];\r
+ uint8_t compare[IB_REQ_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_req_ioctl_out\r
+ struct _ual_cep_req_ioctl_out\r
{\r
ib_api_status_t status;\r
+ ib_qp_mod_t init;\r
\r
} out;\r
\r
-} ual_cm_req_ioctl_t;\r
+} ual_cep_req_ioctl_t;\r
/*\r
* FIELDS\r
+* in.cid\r
+* CID of the target CEP.\r
+*\r
* in.cm_req\r
* CM REQ parameters.\r
*\r
*\r
* out.status\r
* Status of the operation\r
+*\r
+* out.init\r
+* QP modify paramters for INIT state transition.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rep_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_rep_ioctl_t\r
* NAME\r
-* ual_cm_rep_ioctl_t\r
+* ual_cep_rep_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rep\r
+* al_cep_pre_rep call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rep_ioctl\r
+typedef union _ual_cep_rep_ioctl\r
{\r
- struct _ual_cm_rep_ioctl_in\r
+ struct _ual_cep_rep_ioctl_in\r
{\r
- uint64_t h_cm_req;\r
- uint64_t h_qp;\r
+ net32_t cid;\r
ib_cm_rep_t cm_rep;\r
- /* Private data follows immediately. */\r
+ uint8_t pdata[IB_REP_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_rep_ioctl_out\r
+ struct _ual_cep_rep_ioctl_out\r
{\r
ib_api_status_t status;\r
+ ib_qp_mod_t init;\r
\r
} out;\r
\r
-} ual_cm_rep_ioctl_t;\r
+} ual_cep_rep_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_req\r
*\r
* out.status\r
* Status of the operation.\r
+*\r
+* out.init\r
+* QP modify paramters for INIT state transition.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rtu_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_get_rtr_ioctl_t\r
* NAME\r
-* ual_cm_rtu_ioctl_t\r
+* ual_cep_get_rtr_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rtu\r
+* IOCTL structure containing the output parameters for\r
+* al_cep_get_rtr_attr call.\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rtu_ioctl\r
+typedef struct _ual_cep_get_rtr_ioctl\r
{\r
- struct _ual_cm_rtu_ioctl_in\r
- {\r
- uint64_t h_cm_rep;\r
- ib_cm_rtu_t cm_rtu;\r
- /* Private data follows IOCTL buffer. */\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t rtr;\r
\r
- } in;\r
- struct _ual_cm_rtu_ioctl_out\r
- {\r
- ib_api_status_t status;\r
+} ual_cep_get_rtr_ioctl_t;\r
+/*\r
+* FIELDS\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.rtr\r
+* QP modify paramters for RTR state transition.\r
+*****/\r
\r
- } out;\r
\r
-} ual_cm_rtu_ioctl_t;\r
+\r
+/****s* User-mode Access Layer/ual_cep_get_rts_ioctl_t\r
+* NAME\r
+* ual_cep_get_rts_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters for\r
+* al_cep_get_rts_attr call.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_get_rts_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t rts;\r
+\r
+} ual_cep_get_rts_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm_rep\r
+* out.status\r
+* Status of the operation.\r
+*\r
+* out.rts\r
+* QP modify paramters for RTS state transition.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_rtu_ioctl_t\r
+* NAME\r
+* ual_cep_rtu_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the input parameters for\r
+* al_cep_rtu call.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_rtu_ioctl\r
+{\r
+ net32_t cid;\r
+ uint8_t pdata_len;\r
+ uint8_t pdata[IB_RTU_PDATA_SIZE];\r
+\r
+} ual_cep_rtu_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.cid\r
* The cm_rep connection handle got on the callback.\r
*\r
-* in.cm_rtu\r
-* CM RTU parameters.\r
+* in.pdata_len\r
+* Length of private data.\r
*\r
-* out.status\r
-* Status of the operation\r
+* in.pdata\r
+* Private data.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_rej_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_rej_ioctl_t\r
* NAME\r
-* ual_cm_rej_ioctl_t\r
+* ual_cep_rej_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_rej\r
+* IOCTL structure containing the input parameters for\r
+* al_cep_rej\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_rej_ioctl\r
+typedef struct _ual_cep_rej_ioctl\r
{\r
- struct _ual_cm_rej_ioctl_in\r
- {\r
- uint64_t h_cm;\r
- ib_cm_rej_t cm_rej;\r
- /* ARI and private data data follow IOCTL buffer immediately. */\r
+ net32_t cid;\r
\r
- } in;\r
- struct _ual_cm_rej_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
+ ib_rej_status_t rej_status;\r
+ uint8_t ari_len;\r
+ uint8_t pdata_len;\r
+ uint8_t ari[IB_ARI_SIZE];\r
+ uint8_t pdata[IB_REJ_PDATA_SIZE];\r
\r
-} ual_cm_rej_ioctl_t;\r
+} ual_cep_rej_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm\r
-* The connection handle got on the callback.\r
+* in.cid\r
+* The CID of the target CEP.\r
*\r
-* in.cm_rej\r
-* CM REJ parameters.\r
+* in.rej_status\r
+* Rejection status as defined in IB spec.\r
*\r
-* out.status\r
-* Status of the operation.\r
+* in.ari_len\r
+* Length of the ARI data.\r
+*\r
+* in.pdata_len\r
+* Length of the private data.\r
+*\r
+* in.ari\r
+* ARI data.\r
+*\r
+* in.pdata\r
+* Private data.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_handoff_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_handoff_ioctl_t\r
* NAME\r
-* ual_cm_handoff_ioctl_t\r
+* ual_cep_handoff_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_handoff_ioctl\r
+typedef union _ual_cep_handoff_ioctl\r
{\r
- struct _ual_cm_handoff_ioctl_in\r
+ struct _ual_cep_handoff_ioctl_in\r
{\r
uint64_t h_cm;\r
net64_t sid;\r
\r
} in;\r
- struct _ual_cm_handoff_ioctl_out\r
+ struct _ual_cep_handoff_ioctl_out\r
{\r
ib_api_status_t status;\r
\r
} out;\r
\r
-} ual_cm_handoff_ioctl_t;\r
+} ual_cep_handoff_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_mra_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_mra_ioctl_t\r
* NAME\r
-* ual_cm_mra_ioctl_t\r
+* ual_cep_mra_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_mra\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_mra_ioctl\r
+typedef struct _ual_cep_mra_ioctl\r
{\r
- struct _ual_cm_mra_ioctl_in\r
- {\r
- uint64_t h_cm;\r
- ib_cm_mra_t cm_mra;\r
- /* Private data follows IOCTL buffer immediately. */\r
+ net32_t cid;\r
+ ib_cm_mra_t cm_mra;\r
+ uint8_t pdata[IB_MRA_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_mra_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_mra_ioctl_t;\r
+} ual_cep_mra_ioctl_t;\r
/*\r
* FIELDS\r
-* in.h_cm\r
-* The connection handle got on the callback.\r
+* in.cid\r
+* The CID for the target CEP.\r
*\r
-* cm_mra\r
+* in.cm_mra\r
* CM MRA parameters.\r
-*\r
-* out.status\r
-* Status of the operation\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_lap_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_lap_ioctl_t\r
* NAME\r
-* ual_cm_lap_ioctl_t\r
+* ual_cep_lap_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_lap\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_lap_ioctl\r
+typedef struct _ual_cep_lap_ioctl\r
{\r
- struct _ual_cm_lap_ioctl_in\r
- {\r
- uint64_t h_qp;\r
- ib_cm_lap_t cm_lap;\r
- ib_path_rec_t alt_path;\r
- /* Private data follows IOCTL buffer immediately */\r
+ net32_t cid;\r
+ ib_cm_lap_t cm_lap;\r
+ ib_path_rec_t alt_path;\r
+ uint8_t pdata[IB_LAP_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_lap_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_lap_ioctl_t;\r
+} ual_cep_lap_ioctl_t;\r
/*\r
* FIELDS\r
* in.cm_lap\r
*\r
* in.alt_path\r
* Alternate path information.\r
-*\r
-* out.status\r
-* Status of the operation\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_apr_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_apr_ioctl_t\r
* NAME\r
-* ual_cm_apr_ioctl_t\r
+* ual_cep_apr_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
-* ib_cm_apr\r
+* IOCTL structure containing the input parameters for\r
+* ib_cep_apr\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_apr_ioctl\r
+typedef union _ual_cep_apr_ioctl\r
{\r
- struct _ual_cm_apr_ioctl_in\r
+ struct _ual_cep_apr_ioctl_in\r
{\r
- uint64_t h_cm_lap;\r
- uint64_t h_qp;\r
- ib_cm_apr_t cm_apr;\r
- /* Info and Private data follow IOCTL buffer immediately */\r
+ net32_t cid;\r
+ ib_cm_apr_t cm_apr;\r
+ uint8_t apr_info[IB_APR_INFO_SIZE];\r
+ uint8_t pdata[IB_APR_PDATA_SIZE];\r
\r
} in;\r
- struct _ual_cm_apr_ioctl_out\r
+\r
+ struct _ual_cep_apr_ioctl_out\r
{\r
- ib_api_status_t status;\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t apr;\r
\r
} out;\r
\r
-} ual_cm_apr_ioctl_t;\r
+} ual_cep_apr_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_lap\r
*\r
* in.cm_apr\r
* CM APR parameters.\r
-*\r
-* out.status\r
-* Status of the operation.\r
*****/\r
\r
\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_dreq_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_dreq_ioctl_t\r
* NAME\r
-* ual_cm_dreq_ioctl_t\r
+* ual_cep_dreq_ioctl_t\r
*\r
* DESCRIPTION\r
-* IOCTL structure containing the input and output parameters for\r
+* IOCTL structure containing the input parameters for\r
* ib_cm_dreq\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_dreq_ioctl\r
+typedef struct _ual_cep_dreq_ioctl\r
{\r
- struct _ual_cm_dreq_ioctl_in\r
- {\r
- uint64_t h_qp;\r
- ib_cm_dreq_t cm_dreq;\r
- /* Private data follows IOCTL buffer immediately */\r
-\r
- } in;\r
- struct _ual_cm_dreq_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
+ net32_t cid;\r
+ uint8_t pdata_len;\r
+ uint8_t pdata[IB_DREQ_PDATA_SIZE];\r
\r
-} ual_cm_dreq_ioctl_t;\r
+} ual_cep_dreq_ioctl_t;\r
/*\r
* FIELDS\r
* cm_dreq\r
* CM DREQ parameters.\r
-*\r
-* out.status\r
-* Status of the operation.\r
*****/\r
\r
\r
\r
-/****s* User-mode Access Layer/ual_cm_drep_ioctl_t\r
+/****s* User-mode Access Layer/ual_cep_drep_ioctl_t\r
* NAME\r
-* ual_cm_drep_ioctl_t\r
+* ual_cep_drep_ioctl_t\r
*\r
* DESCRIPTION\r
* IOCTL structure containing the input and output parameters for\r
*\r
* SYNOPSIS\r
*/\r
-typedef union _ual_cm_drep_ioctl\r
+typedef struct _ual_cep_drep_ioctl\r
{\r
- struct _ual_cm_drep_ioctl_in\r
- {\r
- uint64_t h_cm_dreq;\r
- ib_cm_drep_t cm_drep;\r
- /* Private data follows IOCTL buffer immediately. */\r
+ net32_t cid;\r
+ ib_cm_drep_t cm_drep;\r
+ uint8_t pdata[IB_DREP_PDATA_SIZE];\r
\r
- } in;\r
- struct _ual_cm_drep_ioctl_out\r
- {\r
- ib_api_status_t status;\r
-\r
- } out;\r
-\r
-} ual_cm_drep_ioctl_t;\r
+} ual_cep_drep_ioctl_t;\r
/*\r
* FIELDS\r
* in.h_cm_dreq\r
*\r
* in.cm_drep\r
* CM DREP parameters.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_get_timewait_ioctl_t\r
+* NAME\r
+* ual_cep_get_timewait_ioctl_t\r
*\r
-* out.status\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters for\r
+* ib_cep_get_timewait\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_get_timewait_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ uint64_t timewait_us;\r
+\r
+} ual_cep_get_timewait_ioctl_t;\r
+/*\r
+* FIELDS\r
+* in.status\r
+* Status of the request.\r
+*\r
+* in.timewait_us\r
+* Timewait value, in microseconds.\r
+*****/\r
+\r
+\r
+\r
+/****s* User-mode Access Layer/ual_cep_poll_ioctl_t\r
+* NAME\r
+* ual_cep_poll_ioctl_t\r
+*\r
+* DESCRIPTION\r
+* IOCTL structure containing the output parameters to\r
+* poll for incoming events on a CEP. The input parameter is the CID.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _ual_cep_poll_ioctl\r
+{\r
+ ib_api_status_t status;\r
+ ib_cep_t new_cep;\r
+ ib_mad_element_t element;\r
+ ib_grh_t grh;\r
+ uint8_t mad_buf[MAD_BLOCK_SIZE];\r
+\r
+} ual_cep_poll_ioctl_t;\r
+/*\r
+* FIELDS\r
+* status\r
* Status of the operation.\r
+*\r
+* new_cep\r
+* For listen requests, CEP information of CEPs created in response\r
+* to incoming REQs.\r
+*\r
+* mad_buf\r
+* Payload of a received MAD (or failed send)\r
*****/\r
\r
\r
{\r
struct _qp_init\r
{\r
- ib_qp_opts_t opts;\r
uint8_t primary_port;\r
ib_net32_t qkey;\r
uint16_t pkey_index;\r
*/\r
\r
\r
-\r
-\r
#ifndef _CL_DEBUG_OSD_H_\r
#define _CL_DEBUG_OSD_H_\r
\r
* CL_TRACE_EXIT, and CL_EXIT macros.\r
*/\r
#define _CL_DBG_ENTER \\r
- ("%s%s%s() [\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("%d:%s%s%s() [\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_EXIT \\r
- ("%s%s%s() ]\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("%d:%s%s%s() ]\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_INFO \\r
- ("%s%s%s(): ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("%d:%s%s%s(): ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define _CL_DBG_ERROR \\r
- ("%s%s%s() !ERROR!: ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
+ ("%d:%s%s%s() !ERROR!: ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__)\r
\r
#define CL_CHK_STK\r
\r
p_cm_rep->flow_ctrl = TRUE;\r
p_cm_rep->rnr_nak_timeout = 7;\r
p_cm_rep->rnr_retry_cnt = 7;\r
+ p_cm_rep->pfn_cm_rej_cb = alts_cm_rej_cb;\r
+ p_cm_rep->pfn_cm_mra_cb = alts_cm_mra_cb;\r
p_cm_rep->pfn_cm_rtu_cb = alts_cm_rtu_cb;\r
p_cm_rep->pfn_cm_lap_cb = alts_cm_lap_cb;\r
p_cm_rep->pfn_cm_dreq_cb = alts_cm_dreq_cb;\r
p_listen->lid = p_ca_obj->dlid;\r
p_listen->pkey = p_ca_obj->p_dest_port_attr->p_pkey_table[0];\r
p_listen->pfn_cm_req_cb = alts_cm_req_cb;\r
- p_listen->pfn_cm_rej_cb = alts_cm_rej_cb;\r
\r
ib_status = ib_cm_listen(h_al, p_listen, alts_cm_err_cb,\r
p_ca_obj, &p_ca_obj->h_cm_listen );\r
ib_pd_handle_t h_pd;\r
\r
/* Input parameters to control test. */\r
- uint32_t num_nodes;\r
+ int32_t num_nodes;\r
uint32_t num_msgs;\r
boolean_t per_msg_buf;\r
cl_mutex_t mutex;\r
\r
cmtest_state_t state;\r
- uint32_t num_connected;\r
+ atomic32_t num_connected;\r
uint32_t conn_index; /* current connection id */\r
uint32_t total_sent;\r
uint32_t total_recv;\r
g_root.cm_rep.flow_ctrl = TRUE;\r
g_root.cm_rep.rnr_nak_timeout = 7;\r
g_root.cm_rep.rnr_retry_cnt = 6;\r
+ g_root.cm_rep.pfn_cm_rej_cb = __rej_cb;\r
+ g_root.cm_rep.pfn_cm_mra_cb = __mra_cb;\r
g_root.cm_rep.pfn_cm_rtu_cb = __rtu_cb;\r
g_root.cm_rep.pfn_cm_lap_cb = __lap_cb;\r
g_root.cm_rep.pfn_cm_dreq_cb = __dreq_cb;\r
{\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
\r
+ /*\r
+ * Note - because this callback exits the app, any output beyond the\r
+ * the first time may report junk. There have been instances where\r
+ * the callback is invoked more times than there are connection requests\r
+ * but that behavior disapeared if the call to exit below is removed.\r
+ */\r
printf( "Connection was rejected, status: 0x%x\n",\r
p_cm_rej_rec->rej_status );\r
\r
status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &g_root.cm_rtu );\r
if( status != IB_SUCCESS )\r
{\r
- printf( "Call to ib_cm_rtu failed\n" );\r
+ printf( "Call to ib_cm_rtu returned %s\n", ib_get_err_str( status ) );\r
exit( 1 );\r
}\r
\r
- g_root.num_connected++;\r
+ cl_atomic_inc( &g_root.num_connected );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
p_node->state = node_conn;\r
\r
__post_recvs( p_node );\r
- g_root.num_connected++;\r
+ cl_atomic_inc( &g_root.num_connected );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
CL_ASSERT( p_cm_dreq_rec );\r
p_node = (ib_node_t*)p_cm_dreq_rec->qp_context;\r
+ CL_ASSERT( p_node );\r
\r
/*\r
* Record that we've already received a DREQ to avoid trying to\r
if( status == IB_SUCCESS )\r
{\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
}\r
}\r
else\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
CL_ASSERT( p_cm_drep_rec );\r
p_node = (ib_node_t*)p_cm_drep_rec->qp_context;\r
+ CL_ASSERT( p_node );\r
\r
/* We're done with this connection. */\r
+ cl_mutex_acquire( &g_root.mutex );\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
+ cl_mutex_release( &g_root.mutex );\r
\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
}\r
__create_qps()\r
{\r
uint64_t start_time, total_time;\r
- uint32_t i;\r
+ int32_t i;\r
ib_api_status_t status;\r
\r
printf( "Creating QPs...\n" );\r
__destroy_qps()\r
{\r
uint64_t start_time, total_time;\r
- uint32_t i;\r
+ int32_t i;\r
\r
printf( "Destroying QPs...\n" );\r
start_time = cl_get_time_stamp();\r
return (FALSE);\r
if ( p_node->h_send_cq )\r
{\r
- status = ib_destroy_cq( p_node->h_send_cq, NULL );\r
+ status = ib_destroy_cq( p_node->h_send_cq, ib_sync_destroy );\r
p_node->h_send_cq = NULL;\r
if( status != IB_SUCCESS )\r
{\r
}\r
if (p_node->h_recv_cq)\r
{\r
- status = ib_destroy_cq( p_node->h_recv_cq, NULL );\r
+ status = ib_destroy_cq( p_node->h_recv_cq, ib_sync_destroy );\r
p_node->h_recv_cq = NULL;\r
if( status != IB_SUCCESS )\r
{\r
static boolean_t\r
__create_nodes()\r
{\r
- uint32_t i;\r
+ int32_t i;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
for( i = 0; i < g_root.num_nodes; i++ )\r
static boolean_t\r
__destroy_nodes()\r
{\r
- uint32_t i;\r
+ int32_t i;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
\r
return FALSE;\r
}\r
\r
- if( !__create_nodes() )\r
- {\r
- printf( "Unable to create nodes.\n" );\r
- return FALSE;\r
- }\r
-\r
CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
return TRUE;\r
}\r
\r
cm_listen.qp_type = IB_QPT_RELIABLE_CONN;\r
\r
- cm_listen.pfn_cm_mra_cb = __mra_cb;\r
- cm_listen.pfn_cm_rej_cb = __rej_cb;\r
-\r
status = ib_cm_listen( g_root.h_al, &cm_listen, \r
__cm_listen_err_cb, &g_root, &g_root.h_listen );\r
if( status != IB_SUCCESS )\r
__conn_reqs()\r
{\r
ib_api_status_t status;\r
- uintn_t i;\r
+ int32_t i;\r
uint8_t pdata[IB_REQ_PDATA_SIZE];\r
\r
g_root.cm_req.p_req_pdata = pdata;\r
printf( "ib_cm_rep failed [%s]!\n", ib_get_err_str(status) );\r
return status;\r
}\r
- g_root.p_nodes[i].h_cm_req = NULL;\r
}\r
return IB_SUCCESS;\r
}\r
__disconnect()\r
{\r
ib_api_status_t status;\r
- uint32_t i;\r
+ int32_t i;\r
ib_node_t *p_node;\r
uint64_t total_time, start_time;\r
\r
{\r
case node_conn:\r
g_root.cm_dreq.h_qp = p_node->h_qp;\r
- ib_cm_dreq( &g_root.cm_dreq );\r
- cl_mutex_release( &g_root.mutex );\r
+ status = ib_cm_dreq( &g_root.cm_dreq );\r
+ if( status == IB_SUCCESS )\r
+ p_node->state = node_dreq_sent;\r
break;\r
\r
case node_dreq_rcvd:\r
- cl_mutex_release( &g_root.mutex );\r
status = ib_cm_drep( p_node->h_cm_dreq, &g_root.cm_drep );\r
- p_node->h_cm_dreq = NULL;\r
\r
/* If the DREP was successful, we're done with this connection. */\r
if( status == IB_SUCCESS )\r
{\r
p_node->state = node_idle;\r
- g_root.num_connected--;\r
+ cl_atomic_dec( &g_root.num_connected );\r
}\r
break;\r
\r
default:\r
/* Node is already disconnected. */\r
- cl_mutex_release( &g_root.mutex );\r
break;\r
}\r
+ cl_mutex_release( &g_root.mutex );\r
}\r
\r
/* Wait for all disconnections to complete. */\r
__send_msgs()\r
{\r
ib_api_status_t status;\r
- uint32_t i, m;\r
+ int32_t i;\r
+ uint32_t m;\r
ib_send_wr_t send_wr;\r
ib_send_wr_t *p_send_failure;\r
ib_local_ds_t ds_array;\r
ib_wc_t *p_free_wc, *p_done_wc;\r
\r
CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl );\r
- memset (free_wc, 0, sizeof(free_wc));\r
\r
while( status != IB_NOT_FOUND )\r
{\r
\r
/* Continue polling if nothing is done. */\r
if( status == IB_NOT_FOUND )\r
- return TRUE;\r
+ break;\r
\r
/* Abort if an error occurred. */\r
if( status != IB_SUCCESS )\r
}\r
p_done_wc = p_done_wc->p_next;\r
}\r
+ }\r
\r
- if( !g_root.is_polling )\r
+ if( !g_root.is_polling )\r
+ {\r
+ status = ib_rearm_cq(h_cq, FALSE);\r
+ if (status != IB_SUCCESS)\r
{\r
- status = ib_rearm_cq(h_cq, FALSE);\r
- if (status != IB_SUCCESS)\r
- {\r
- printf("Failed to rearm CQ %p\n", h_cq );\r
- return FALSE;\r
- }\r
+ printf("Failed to rearm CQ %p\n", h_cq );\r
+ return FALSE;\r
}\r
}\r
\r
__poll_send_cqs()\r
{\r
ib_node_t *p_node;\r
- uintn_t i;\r
+ int32_t i;\r
\r
for( i = 0; i < g_root.num_nodes; i++ )\r
{\r
__poll_recv_cqs()\r
{\r
ib_node_t *p_node;\r
- uintn_t i;\r
+ int32_t i;\r
\r
for( i = 0; i < g_root.num_nodes; i++ )\r
{\r
*\r
* PURPOSE: Utility defs & routines for the adapter data structure\r
*\r
- * $Id: dapl_adapter_util.h,v 1.42 2004/06/04 20:09:43 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.1\r
*\r
- * $Id: dapl_cno_create.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.2\r
*\r
- * $Id: dapl_cno_free.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.4\r
*\r
- * $Id: dapl_cno_modify_agent.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.5\r
*\r
- * $Id: dapl_cno_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage CNO Info structure\r
*\r
- * $Id: dapl_cno_util.c,v 1.15 2004/06/15 15:26:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ia_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the cno data structure\r
*\r
- * $Id: dapl_cno_util.h,v 1.7 2004/03/24 16:37:48 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.2.3\r
*\r
- * $Id: dapl_cno_wait.c,v 1.2 2003/12/02 18:19:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* - completions are delivered in order\r
*\r
- * $Id: dapl_cookie.c,v 1.13 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_cookie.h"\r
*\r
* PURPOSE: Utility defs & routines for the cookie data structure\r
*\r
- * $Id: dapl_cookie.h,v 1.7 2003/06/13 12:21:02 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_accept.c,v 1.26 2004/06/04 20:09:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Accepts asynchronous callbacks from the Communications Manager\r
* for EVDs that have been specified as the connection_evd.\r
*\r
- * $Id: dapl_cr_callback.c,v 1.74 2004/06/07 13:06:57 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
DAT_RETURN dat_status;\r
\r
dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK,\r
- "--> dapl_cr_callback! context: %p event: %x cm_handle %p\n",\r
+ "--> dapl_cr_callback! context: %p event: %x cm_handle %d\n",\r
context,\r
ib_cm_event,\r
- (void *) ib_cm_handle);\r
+ ib_cm_handle.cid );\r
\r
/*\r
* Passive side of the connection, context is a SP and\r
*/\r
dapl_os_lock ( &ep_ptr->header.lock );\r
ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED;\r
- ep_ptr->cm_handle = IB_INVALID_HANDLE;\r
+ cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) );\r
+ ep_ptr->cm_handle.cid = 0xFFFFFFFF;\r
dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event);\r
dapl_os_unlock ( &ep_ptr->header.lock );\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_handoff.c,v 1.4 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_query.c,v 1.10 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_cr_reject.c,v 1.14 2003/10/07 11:22:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage CR (Connection Request) structure\r
*\r
- * $Id: dapl_cr_util.c,v 1.7 2003/08/08 19:20:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the CR data structure\r
*\r
- * $Id: dapl_cr_util.h,v 1.6 2003/06/13 12:21:03 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_connect.c,v 1.28 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the kDAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_create.c,v 1.32 2004/06/02 18:12:46 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_disconnect.c,v 1.23 2004/05/10 18:04:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_dup_connect.c,v 1.9 2004/04/23 19:06:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5.4\r
*\r
- * $Id: dapl_ep_free.c,v 1.29 2004/06/03 14:57:23 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_get_status.c,v 1.9 2003/07/30 18:13:37 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.0 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_modify.c,v 1.23 2003/07/11 18:42:17 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_rdma_read.c,v 1.9 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_rdma_write.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_recv.c,v 1.19 2004/01/19 21:24:49 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_post_send.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5\r
*\r
- * $Id: dapl_ep_query.c,v 1.9 2004/05/14 16:22:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 5.13\r
*\r
- * $Id: dapl_ep_reset.c,v 1.6 2003/07/08 14:23:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage EP Info structure\r
*\r
- * $Id: dapl_ep_util.c,v 1.49 2004/05/10 18:04:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ep_util.h"\r
ep_ptr->qp_handle = IB_INVALID_HANDLE;\r
ep_ptr->qpn = 0;\r
ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED;\r
- ep_ptr->cm_handle = IB_INVALID_HANDLE;\r
+ cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) );\r
+ ep_ptr->cm_handle.cid = 0xFFFFFFFF;\r
\r
ep_ptr->req_count = 0;\r
ep_ptr->recv_count = 0;\r
*\r
* PURPOSE: Utility defs & routines for the EP data structure\r
*\r
- * $Id: dapl_ep_util.h,v 1.14 2004/01/05 13:39:05 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.4.8\r
*\r
- * $Id: dapl_evd_clear_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Accepts asynchronous callbacks from the Communications Manager\r
* for EVDs that have been specified as the connection_evd.\r
*\r
- * $Id: dapl_evd_connection_callb.c,v 1.45 2004/06/07 13:06:56 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
\r
dapl_dbg_log (\r
DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK,\r
- "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %p\n",\r
+ "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %d\n",\r
context,\r
ib_cm_event,\r
- (void *) ib_cm_handle);\r
+ ib_cm_handle.cid);\r
DAPL_CNTR(DCNT_EVD_CONN_CALLBACK);\r
\r
/*\r
*\r
* PURPOSE: implements CQ async_callbacks from verbs\r
*\r
- * $Id: dapl_evd_cq_async_error_callb.c,v 1.8 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_create.c,v 1.3 2004/02/09 20:34:33 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_dequeue.c,v 1.10 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_disable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements DTO callbacks from verbs\r
*\r
- * $Id: dapl_evd_dto_callb.c,v 1.18 2004/01/06 14:19:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_enable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_free.c,v 1.13 2003/12/18 21:00:53 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_modify_cno.c,v 1.12 2003/12/17 11:31:53 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_post_se.c,v 1.7 2003/06/16 17:53:32 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements QP callbacks from verbs\r
*\r
- * $Id: dapl_evd_qp_async_error_callb.c,v 1.17 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API, Chapter 6, section 3\r
*\r
- * $Id: dapl_evd_resize.c,v 1.7 2004/01/15 20:34:44 addetia Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 3.4.7\r
*\r
- * $Id: dapl_evd_set_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: implements Unaffiliated callbacks from verbs\r
*\r
- * $Id: dapl_evd_un_async_error_callb.c,v 1.9 2003/07/31 13:55:18 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage EVD Info structure\r
*\r
- * $Id: dapl_evd_util.c,v 1.56 2004/05/10 20:21:07 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_evd_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the EVD data structure\r
*\r
- * $Id: dapl_evd_util.h,v 1.11 2003/11/11 20:38:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely defined in \r
* the uDAPL 1.1 API specification\r
*\r
- * $Id: dapl_evd_wait.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_get_consumer_context.c,v 1.6 2003/10/24 20:21:18 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_get_handle_type.c,v 1.5 2003/08/20 13:50:45 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* Provides a generic hash table with chaining.\r
*\r
- * $Id: dapl_hash.c,v 1.13 2004/04/28 15:29:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_hash.h"\r
*\r
* PURPOSE: Utility defs & routines for the hash data structure\r
*\r
- * $Id: dapl_hash.h,v 1.6 2004/05/07 11:43:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_HASH_H_\r
*\r
* PURPOSE: Manage HCA structure\r
*\r
- * $Id: dapl_hca_util.c,v 1.15 2004/04/15 15:36:25 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the HCA data structure\r
*\r
- * $Id: dapl_hca_util.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_HCA_UTIL_H_\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_close.c,v 1.9 2003/07/30 18:13:38 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_open.c,v 1.35 2004/04/13 17:11:31 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_ia_query.c,v 1.25 2004/05/14 17:28:55 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage IA Info structure\r
*\r
- * $Id: dapl_ia_util.c,v 1.40 2004/04/13 17:11:31 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the IA data structure\r
*\r
- * $Id: dapl_ia_util.h,v 1.9 2003/07/25 19:24:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_IA_UTIL_H_\r
*\r
* PURPOSE: Prototypes for library-interface init and fini functions\r
*\r
- * $Id: dapl_init.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Note: Each of the remove functions takes an assertion failure if\r
* an element cannot be removed from the list.\r
*\r
- * $Id: dapl_llist.c,v 1.11 2004/05/04 14:02:51 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_create.c,v 1.3 2003/11/10 14:43:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_free.c,v 1.16 2003/11/10 12:51:26 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_lmr_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* PURPOSE: Memory management support routines\r
* Description: Support routines for LMR functions\r
*\r
- * $Id: dapl_lmr_util.c,v 1.8 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_lmr_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the LMR data structure\r
*\r
- * $Id: dapl_lmr_util.h,v 1.6 2003/06/30 16:25:59 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Common Memory Management functions and data structures\r
*\r
- * $Id: dapl_mr_util.c,v 1.9 2003/11/10 12:51:26 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_mr_util.h"\r
*\r
* PURPOSE: Utility defs & routines for memory registration functions\r
*\r
- * $Id: dapl_mr_util.h,v 1.5 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* PURPOSE: Provider function table\r
* Description: DAT Interfaces to this provider\r
*\r
- * $Id: dapl_provider.c,v 1.11 2003/11/18 18:55:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_provider.h"\r
* PURPOSE: Provider function table\r
* Description: DAT Interfaces to this provider\r
*\r
- * $Id: dapl_provider.h,v 1.5 2004/03/17 13:59:42 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_PROVIDER_H_\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_create.c,v 1.21 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_create_any.c,v 1.7 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_free.c,v 1.20 2003/10/07 11:22:08 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_psp_query.c,v 1.8 2003/06/23 12:28:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_create.c,v 1.7 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_pz_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_free.c,v 1.9 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_pz_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage PZ structure\r
*\r
- * $Id: dapl_pz_util.c,v 1.7 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_pz_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the PZ data structure\r
*\r
- * $Id: dapl_pz_util.h,v 1.4 2003/06/13 12:21:11 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* PURPOSE: Ring buffer management\r
* Description: Support and management functions for ring buffers\r
*\r
- * $Id: dapl_ring_buffer_util.c,v 1.11 2004/03/24 16:30:52 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_ring_buffer_util.h"\r
*\r
* PURPOSE: Utility defs & routines for the ring buffer data structure\r
*\r
- * $Id: dapl_ring_buffer_util.h,v 1.6 2003/11/11 20:38:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_bind.c,v 1.18 2004/01/27 18:42:12 addetia Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_create.c,v 1.7 2003/11/04 17:08:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_rmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_free.c,v 1.15 2003/11/04 17:08:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_rmr_util.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 6\r
*\r
- * $Id: dapl_rmr_query.c,v 1.5 2003/06/16 17:53:34 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Utility defs & routines for the RMR data structure\r
*\r
- * $Id: dapl_rmr_util.h,v 1.5 2003/06/30 16:25:59 jlentini Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_create.c,v 1.16 2004/02/24 17:28:30 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_free.c,v 1.20 2004/01/29 21:14:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 4\r
*\r
- * $Id: dapl_rsp_query.c,v 1.6 2003/06/16 17:53:34 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_set_consumer_context.c,v 1.6 2003/08/20 13:50:45 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
*\r
* PURPOSE: Manage PSP Info structure\r
*\r
- * $Id: dapl_sp_util.c,v 1.12 2003/12/18 18:00:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
\r
do \r
{\r
- if ( cr_ptr->ib_cm_handle == ib_cm_handle )\r
+ if ( !memcmp( &cr_ptr->ib_cm_handle, &ib_cm_handle, sizeof(ib_cm_handle_t) ) )\r
{\r
cr_ptr_fnd = cr_ptr;\r
\r
*\r
* PURPOSE: Utility defs & routines for the PSP & RSP data structure\r
*\r
- * $Id: dapl_sp_util.h,v 1.6 2003/12/18 18:00:43 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: IB Connection routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_cm.c 1.38 04/08/06 19:29:06-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
}\r
else\r
{\r
- dapl_evd_connection_callback ( NULL,\r
+ dapl_evd_connection_callback ( ep_ptr->cm_handle,\r
cm_event,\r
(void *) p_cm_rej_rec->p_rej_pdata,\r
(void *) p_cm_rej_rec->qp_context);\r
* Register request or mra callback functions\r
*/\r
cm_listen.pfn_cm_req_cb = dapli_ib_cm_req_cb;\r
- cm_listen.pfn_cm_rej_cb = dapli_ib_cm_rej_cb;\r
- cm_listen.pfn_cm_mra_cb = dapli_ib_cm_mra_cb;\r
\r
ib_status = ib_cm_listen ( dapl_ibal_root.h_al,\r
&cm_listen,\r
#endif\r
#endif\r
\r
+ cm_rep.pfn_cm_rej_cb = dapli_ib_cm_rej_cb;\r
+ cm_rep.pfn_cm_mra_cb = dapli_ib_cm_mra_cb;\r
cm_rep.pfn_cm_rtu_cb = dapli_ib_cm_rtu_cb;\r
cm_rep.pfn_cm_lap_cb = dapli_ib_cm_lap_cb;\r
cm_rep.pfn_cm_dreq_cb = dapli_ib_cm_dreq_cb;\r
\r
cr_ptr = (DAPL_CR *) cr_handle;\r
\r
- if (cr_ptr->ib_cm_handle == IB_INVALID_HANDLE)\r
+ if (cr_ptr->ib_cm_handle.cid = 0xFFFFFFFF)\r
{\r
dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p invalid cm handle\n", cr_ptr);\r
return DAT_INVALID_PARAMETER;\r
* PURPOSE: Utility routines for data transfer operations using the\r
* IBAL APIs\r
*\r
- * $Id: dapl_ibal_dto.h 1.15 04/07/14 18:37:35-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_kmod.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_mrdb.c,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_mrdb.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: IB QP routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_qp.c 1.25 04/08/06 18:43:01-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility routines for access to IBAL APIs\r
*\r
- * $Id: dapl_ibal_util.c 1.28 04/07/14 18:37:36-04:00 aestrin@aestrin.infiniconsys.com $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
*\r
* PURPOSE: Utility defs & routines for access to Intel IBAL APIs\r
*\r
- * $Id: dapl_ibal_util.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* DAPL RI.\r
*\r
*\r
- * $Id: dapl.h,v 1.77 2004/06/04 13:20:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_H_\r
* Description:\r
*\r
*\r
- * $Id: dapl_debug.h,v 1.5 2003/12/18 17:55:39 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_DEBUG_H_\r
*\r
* NOTE: As implementations mature this may not be necessary.\r
*\r
- * $Id: dapl_ipoib_names.h,v 1.4 2003/06/13 12:21:13 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _IPOIB_NAMING_H_\r
* these values are returned in the DAT_IA_ATTR parameter of\r
* dat_ia_query()\r
*\r
- * $Id: dapl_vendor.h,v 1.3 2003/06/30 13:12:54 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/**********************************************************************\r
* Description: Interfaces in this file are completely described in\r
* the DAPL 1.1 API, Chapter 6, section 2\r
*\r
- * $Id: dapl_init.c,v 1.50 2004/01/06 14:21:59 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* Description: Interfaces in this file are completely described in\r
* dapl_name_service.h\r
*\r
- * $Id: dapl_name_service.c,v 1.2 2004/02/25 13:21:43 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/*\r
*\r
* PURPOSE: Utility defs & routines supporting name services\r
*\r
- * $Id: dapl_name_service.h,v 1.1 2003/10/28 14:43:22 sjs2 Exp $\r
+ * $Id$\r
*\r
**********************************************************************/\r
\r
* This file also contains the timer handler thread,\r
* embodied in dapls_timer_thread().\r
*\r
- * $Id: dapl_timer_util.c,v 1.2 2004/01/05 20:50:21 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl.h"\r
* PURPOSE: DAPL timer management\r
* Description: support for dapl_timer.h\r
*\r
- * $Id: dapl_timer_util.h,v 1.2 2004/01/05 13:39:05 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
void dapls_timer_init ( void );\r
* of common functions.\r
* \r
*\r
- * $Id: dapl_osd.c,v 1.26 2003/07/31 14:04:18 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dapl_osd.h"\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dapl_osd.h,v 1.38 2003/08/20 14:08:57 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_OSD_H_\r
* of common functions.\r
* \r
*\r
- * $Id: dapl_osd.c,v 1.16 2003/07/16 17:54:27 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
/*\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dapl_osd.h,v 1.20 2003/07/31 14:04:18 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAPL_OSD_H_\r
*\r
* PURPOSE: Convert DAT_RETURN values to humman readable string\r
*\r
- * $Id: dat_strerror.c,v 1.3 2003/09/24 14:49:46 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <dat/udat.h>\r
* Description: Interfaces in this file are completely described in\r
*the kDAPL 1.0 API\r
*\r
- * $Id: dat_kdapl.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/ \r
\r
#include "dat_osd.h"\r
* kernel\r
* Description: a linux module implementation\r
*\r
- * $Id: dat_module.c,v 1.4 2003/06/13 11:10:36 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* of common functions.\r
* \r
*\r
- * $Id: dat_osd.c,v 1.5 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <linux/module.h>\r
* a canonical DAT interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.5 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
* interface. Designed to be portable and hide OS specific quirks\r
* of common functions.\r
*\r
- * $Id: dat_osd.c,v 1.8 2003/08/15 20:09:52 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* a canonical DAT interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.14 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
*\r
* PURPOSE: DAT Provider and Consumer registry functions.\r
*\r
- * $Id: udat.c,v 1.13 2003/08/20 14:28:40 hobie16 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include <dat/udat.h>\r
*\r
* PURPOSE: static registry parser\r
*\r
- * $Id: udat_sr_parser.c,v 1.1 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
\r
*\r
* PURPOSE: static registry (SR) parser inteface declarations\r
*\r
- * $Id: udat_sr_parser.h,v 1.1 2003/07/31 14:04:19 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_SR_PARSER_H_\r
* interface. Designed to be portable and hide OS specific quirks\r
* of common functions.\r
*\r
- * $Id: dat_osd.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#include "dat_osd.h"\r
* a canonical DAPL interface. Designed to be portable\r
* and hide OS specific quirks of common functions.\r
*\r
- * $Id: dat_osd.h,v 1.12 2003/08/15 20:09:53 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_H_\r
*\r
* PURPOSE: static registry (SR) platform specific inteface declarations\r
*\r
- * $Id: dat_osd_sr.h,v 1.1 2003/03/28 22:52:47 jlentini Exp $\r
+ * $Id$\r
**********************************************************************/\r
\r
#ifndef _DAT_OSD_SR_H_\r
socket_info->info.listen.listen_req_param.identifier));\r
\r
param.pfn_cm_req_cb = cm_req_callback;\r
- param.pfn_cm_mra_cb = cm_mra_callback;\r
- param.pfn_cm_rej_cb = cm_rej_callback;\r
\r
param.qp_type = IB_QPT_RELIABLE_CONN;\r
\r
cm_req.rnr_retry_cnt = QP_ATTRIB_RNR_RETRY;\r
cm_req.retry_cnt = QP_ATTRIB_RETRY_COUNT;\r
cm_req.p_alt_path = NULL;\r
- cm_req.pfn_cm_req_cb = cm_req_callback;\r
cm_req.pfn_cm_mra_cb = cm_mra_callback;\r
cm_req.pfn_cm_rej_cb = cm_rej_callback;\r
\r
cm_rep.flow_ctrl = cm_req_received->flow_ctrl;\r
cm_rep.rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT;\r
cm_rep.rnr_retry_cnt = cm_req_received->rnr_retry_cnt;\r
+ cm_rep.pfn_cm_mra_cb = cm_mra_callback;\r
+ cm_rep.pfn_cm_rej_cb = cm_rej_callback;\r
cm_rep.pfn_cm_rtu_cb = cm_rtu_callback;\r
cm_rep.pfn_cm_lap_cb = cm_lap_callback;\r
cm_rep.pfn_cm_dreq_cb = cm_dreq_callback;\r
status = ib_query_qp( socket_info->qp, &qp_attr );\r
if( status == IB_SUCCESS )\r
{\r
- socket_info->max_inline = qp_attr.sq_max_inline;\r
+ socket_info->max_inline = min( g_max_inline, qp_attr.sq_max_inline );\r
}\r
else\r
{\r
};\r
\r
static DWORD no_read = 0;\r
+uint32_t g_max_inline = 0xFFFFFFFF;\r
\r
/*\r
* Function: DllMain\r
IN DWORD dwReason,\r
IN LPVOID lpvReserved )\r
{\r
-#ifdef _DEBUG_\r
- TCHAR dbg_lvl_str[16];\r
+ TCHAR env_var[16];\r
DWORD i;\r
-#endif\r
\r
CL_ENTER( IBSP_DBG_DLL, gdbg_lvl );\r
\r
CL_TRACE( IBSP_DBG_DLL, gdbg_lvl, ("DllMain: DLL_PROCESS_ATTACH\n") );\r
\r
#ifdef _DEBUG_\r
- i = GetEnvironmentVariable( "IBWSD_DBG", dbg_lvl_str, 16 );\r
+ i = GetEnvironmentVariable( "IBWSD_DBG", env_var, 16 );\r
if( i && i <= 16 )\r
{\r
- gdbg_lvl = _tcstoul( dbg_lvl_str, NULL, 16 );\r
+ gdbg_lvl = _tcstoul( env_var, NULL, 16 );\r
IBSP_TRACE( IBSP_DBG_DLL,\r
("Given IBWSD_DBG debug level:0x%X\n",\r
gdbg_lvl) );\r
/* See if the user wants to disable RDMA reads. */\r
no_read = GetEnvironmentVariable( "IBWSD_NO_READ", NULL, 0 );\r
\r
+ i = GetEnvironmentVariable( "IBWSD_INLINE", env_var, 16 );\r
+ if( i && i <= 16 )\r
+ g_max_inline = _tcstoul( env_var, NULL, 10 );\r
+\r
if( init_globals() )\r
return FALSE;\r
break;\r
\r
extern struct ibspdll_globals g_ibsp;\r
\r
+extern uint32_t g_max_inline;\r
+\r
#endif /* IBSPDLL_H */\r