}\r
\r
cl_spinlock_release( &p_obj->lock );\r
+\r
+ /* Cleanup any left-over connections. */\r
+ al_cep_cleanup_al( h_al );\r
}\r
\r
\r
/* Free any MADs not returned by the user. */\r
__free_mads( h_al );\r
\r
- /* Cleanup any left-over connections. */\r
- al_cep_cleanup_al( h_al );\r
-\r
#ifdef CL_KERNEL\r
cl_vector_destroy( &h_al->hdl_vector );\r
#endif\r
#define AL_INVALID_CID 0xFFFFFFFF\r
\r
\r
-typedef ib_api_status_t\r
+typedef void\r
(*al_pfn_cep_cb_t)(\r
IN const ib_al_handle_t h_al,\r
IN ib_cep_t* const p_cep );\r
* into the al_cep_poll call.\r
*\r
* RETURN VALUES:\r
-* IB_SUCCESS\r
-* Recipient successfully processed the event.\r
-*\r
-* IB_ERROR\r
-* The event could not be processed.\r
+* This function does not return a value.\r
*\r
* NOTES\r
* The callback is invoked at DISPATCH_LEVEL.\r
/*\r
* NOTES\r
* This function may be invoked at DISPATCH_LEVEL\r
+*\r
+* The pfn_cb parameter may be NULL in the kernel if using IRPs for\r
+* event notification.\r
*********/\r
\r
\r
\r
\r
#ifdef CL_KERNEL\r
-ib_api_status_t\r
-al_cep_xchg_irp(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN IRP* const p_new_irp,\r
- OUT IRP** const pp_old_irp );\r
-\r
-ib_api_status_t\r
-al_cep_cancel_irp(\r
+NTSTATUS\r
+al_cep_queue_irp(\r
IN ib_al_handle_t h_al,\r
IN net32_t cid,\r
IN IRP* const p_irp );\r
__cep_timewait_qp( p_cm->h_qp );\r
\r
cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
- CL_ASSERT( cid == p_cm->cid );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ CL_ASSERT( cid == p_cm->cid );\r
\r
- if( al_destroy_cep( p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS )\r
+ if( al_destroy_cep(\r
+ p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+ }\r
+ else\r
+ {\r
deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
\r
((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec );\r
\r
static void\r
__process_cep_cb(\r
#else\r
-static ib_api_status_t\r
+static void\r
__cm_handler(\r
#endif\r
IN const ib_al_handle_t h_al,\r
}\r
ib_put_mad( p_mad );\r
}\r
-#ifndef CL_KERNEL\r
- return IB_SUCCESS;\r
-#endif\r
}\r
\r
\r
* to a passive level thread context to perform QP modify and invoke user\r
* callbacks.\r
*/\r
-static ib_api_status_t\r
+static void\r
__cm_handler(\r
IN const ib_al_handle_t h_al,\r
IN ib_cep_t* const p_cep )\r
AL_TRACE_EXIT( AL_DBG_ERROR,\r
("failed to cl_zalloc cm_async_mad_t (%d bytes)\n",\r
sizeof(cep_async_mad_t)) );\r
- return IB_ERROR;\r
+ return;\r
}\r
\r
p_async_mad->h_al = h_al;\r
cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item );\r
\r
AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
}\r
#endif /* CL_KERNEL */\r
\r
al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
\r
if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
- deref_al_obj( &h_cm.h_qp->obj );\r
+ deref_al_obj( &p_cm_rep->h_qp->obj );\r
}\r
\r
AL_EXIT( AL_DBG_CM );\r
\r
cid = cl_atomic_xchg(\r
&((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID );\r
- CL_ASSERT( cid == h_cm.cid );\r
-\r
- ref_al_obj( &h_cm.h_qp->obj );\r
- if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
- deref_al_obj( &h_cm.h_qp->obj );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_cm.h_qp->obj );\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &h_cm.h_qp->obj );\r
+ }\r
}\r
\r
AL_EXIT( AL_DBG_CM );\r
\r
cid = cl_atomic_xchg(\r
&((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID );\r
- CL_ASSERT( cid == h_cm_dreq.cid );\r
- ref_al_obj( &h_cm_dreq.h_qp->obj );\r
- if( al_destroy_cep(\r
- h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS )\r
+ if( cid != AL_INVALID_CID )\r
{\r
- deref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ CL_ASSERT( cid == h_cm_dreq.cid );\r
+ ref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ }\r
}\r
}\r
\r
__destroying_listen(\r
IN al_obj_t* p_obj )\r
{\r
+ ib_api_status_t status;\r
al_listen_t *p_listen;\r
\r
p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj );\r
\r
/* Destroy the listen's CEP. */\r
- ref_al_obj( p_obj );\r
- if( al_destroy_cep(\r
- p_obj->h_al, p_listen->cid, deref_al_obj ) != IB_SUCCESS )\r
+ status = al_destroy_cep(\r
+ p_obj->h_al, p_listen->cid, deref_al_obj );\r
+ if( status != IB_SUCCESS )\r
{\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_destroy_cep returned %s.\n", ib_get_err_str( status )) );\r
deref_al_obj( p_obj );\r
}\r
}\r
/* valid for ud qp_type only */\r
p_listen->sidr_context = p_cm_listen->sidr_context;\r
\r
- /*\r
- * Cast of ib_cm_cancel to type al_pfn_destroy_t required for first\r
- * paramter type mismatch.\r
- */\r
construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN );\r
status = init_al_obj( &p_listen->obj, listen_context, TRUE,\r
__destroying_listen, NULL, __free_listen );\r
\r
*ph_cm_listen = p_listen;\r
\r
+ /* Note that we keep the reference held on behalf of the CEP. */\r
+\r
AL_EXIT( AL_DBG_CM );\r
return IB_SUCCESS;\r
}\r
status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context,\r
ph_cm_listen );\r
\r
- /* Release the reference taken in init_al_obj. */\r
- if( status == IB_SUCCESS )\r
- deref_al_obj( &(*ph_cm_listen)->obj );\r
-\r
CL_EXIT( AL_DBG_CM, g_al_dbg_lvl );\r
return status;\r
}\r
typedef enum _cep_state\r
{\r
CEP_STATE_IDLE,\r
- CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
CEP_STATE_LISTEN,\r
CEP_STATE_ESTABLISHED,\r
CEP_STATE_TIMEWAIT,\r
- CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
CEP_STATE_SREQ_SENT,\r
CEP_STATE_SREQ_RCVD,\r
CEP_STATE_ERROR,\r
+ CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
+ CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD,\r
CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP,\r
CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT,\r
{\r
case CM_REQ_ATTR_ID:\r
status = conn_rej_set_ari(\r
- (uint8_t*)&((mad_cm_req_t*)p_mad->p_mad_buf)->local_ca_guid,\r
- sizeof(net64_t), (mad_cm_rej_t*)p_mad_buf );\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
CL_ASSERT( status == IB_SUCCESS );\r
__reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
break;\r
\r
case CM_REP_ATTR_ID:\r
status = conn_rej_set_ari(\r
- (uint8_t*)&((mad_cm_rep_t*)p_mad->p_mad_buf)->local_ca_guid,\r
- sizeof(net64_t), (mad_cm_rej_t*)p_mad_buf );\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
CL_ASSERT( status == IB_SUCCESS );\r
__reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
break;\r
goto err;\r
}\r
}\r
+ /*\r
+ * Note that we don't update the CEP's remote comm ID - it messes up REP\r
+ * processing since a non-zero RCID implies the connection is in the RCID\r
+ * map. Adding it here requires checking there and conditionally adding\r
+ * it. Ignoring it is a valid thing to do.\r
+ */\r
\r
if( !(p_cep->state & CEP_STATE_SENT) ||\r
(1 << conn_mra_get_msg_mraed( p_mra ) !=\r
\r
/* Delay the current send. */\r
CL_ASSERT( p_cep->p_send_mad );\r
- ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad, __calc_mad_timeout(\r
- conn_mra_get_svc_timeout( p_mra ) + p_cep->max_2pkt_life - 1 ) );\r
+ ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad,\r
+ __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) +\r
+ __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) );\r
\r
/* We only invoke a single callback for MRA. */\r
if( p_cep->state & CEP_STATE_MRA )\r
\r
/* Fall through */\r
case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
/* Cancel any outstanding MAD. */\r
if( p_cep->p_send_mad )\r
{\r
/* Fall through */\r
case CEP_STATE_REQ_RCVD:\r
case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REQ_MRA_RCVD:\r
case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
case CEP_STATE_REP_MRA_SENT:\r
case CEP_STATE_PRE_REP:\r
case CEP_STATE_PRE_REP_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
/* Abort connection establishment. No transition to timewait. */\r
__remove_cep( p_cep );\r
p_cep->state = CEP_STATE_IDLE;\r
case CEP_STATE_LAP_MRA_SENT:\r
case CEP_STATE_PRE_APR:\r
case CEP_STATE_PRE_APR_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
p_cep->state = CEP_STATE_TIMEWAIT;\r
__insert_timewait( p_cep );\r
break;\r
case IB_WCS_TIMEOUT_RETRY_ERR:\r
default:\r
/* Timeout. Reject the connection. */\r
- if( p_cep->state == CEP_STATE_REQ_SENT ||\r
- p_cep->state == CEP_STATE_REQ_MRA_RCVD ||\r
- p_cep->state == CEP_STATE_REP_SENT ||\r
- p_cep->state == CEP_STATE_REP_MRA_RCVD )\r
+ switch( p_cep->state )\r
{\r
+ case CEP_STATE_REQ_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
/* Send the REJ. */\r
__reject_timeout( p_port_cep, p_cep, p_mad );\r
__remove_cep( p_cep );\r
p_cep->state = CEP_STATE_IDLE;\r
- }\r
- else if( p_cep->state == CEP_STATE_DREQ_DESTROY )\r
- {\r
+ break;\r
+\r
+ case CEP_STATE_DREQ_DESTROY:\r
p_cep->state = CEP_STATE_DESTROY;\r
__insert_timewait( p_cep );\r
+ /* Fall through. */\r
+\r
+ case CEP_STATE_DESTROY:\r
KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
ib_put_mad( p_mad );\r
- break;\r
- }\r
- else if( p_cep->state == CEP_STATE_DREQ_SENT )\r
- {\r
+ goto done;\r
+\r
+ case CEP_STATE_DREQ_SENT:\r
/*\r
* Make up a DREP mad so we can respond if we receive\r
* a DREQ while in timewait.\r
__format_drep( p_cep, NULL, 0, &p_cep->mads.drep );\r
p_cep->state = CEP_STATE_TIMEWAIT;\r
__insert_timewait( p_cep );\r
+\r
+ default:\r
+ break;\r
}\r
\r
status = __cep_queue_mad( p_cep, p_mad );\r
break;\r
}\r
\r
+done:\r
pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
cep_context = p_cep->cep.context;\r
\r
IN ib_al_handle_t h_al OPTIONAL,\r
IN net32_t cid )\r
{\r
+ size_t idx;\r
cep_cid_t *p_cid;\r
\r
/* Mask off the counter bits so we get the index in our vector. */\r
- cid &= CEP_MAX_CID_MASK;\r
+ idx = cid & CEP_MAX_CID_MASK;\r
\r
/*\r
* Remove the CEP from the CID vector - no further API calls\r
* will succeed for it.\r
*/\r
- if( cid > cl_vector_get_size( &gp_cep_mgr->cid_vector ) )\r
+ if( idx > cl_vector_get_size( &gp_cep_mgr->cid_vector ) )\r
return NULL;\r
\r
- p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid );\r
- if( p_cid->h_al && (!h_al || p_cid->h_al == h_al) )\r
- return p_cid->p_cep;\r
+ p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, idx );\r
+ if( !p_cid->h_al )\r
+ return NULL;\r
\r
- /* Not the correct owner. */\r
- return NULL;\r
+ /*\r
+ * h_al is NULL when processing MADs, so we need to match on\r
+ * the actual local communication ID. If h_al is non-NULL, we\r
+ * are doing a lookup from a call to our API, and only need to match\r
+ * on the index in the vector (without the modifier).\r
+ */\r
+ if( h_al )\r
+ {\r
+ if( p_cid->h_al != h_al )\r
+ return NULL;\r
+ }\r
+ else if( p_cid->p_cep->local_comm_id != cid )\r
+ {\r
+ return NULL;\r
+ }\r
+\r
+ return p_cid->p_cep;\r
}\r
\r
\r
}\r
\r
\r
+static inline void\r
+__cep_complete_irp(\r
+ IN kcep_t* const p_cep,\r
+ IN NTSTATUS status,\r
+ IN CCHAR increment )\r
+{\r
+ IRP *p_irp;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_irp = InterlockedExchangePointer( &p_cep->p_irp, NULL );\r
+\r
+ if( p_irp )\r
+ {\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, NULL );\r
+#pragma warning(pop)\r
+\r
+ /* Complete the IRP. */\r
+ p_irp->IoStatus.Status = status;\r
+ p_irp->IoStatus.Information = 0;\r
+ IoCompleteRequest( p_irp, increment );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
static inline void\r
__process_cep(\r
IN kcep_t* const p_cep )\r
CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
\r
/* Signal to the user there are callback waiting. */\r
- p_cep->pfn_cb( p_cep->p_cid->h_al, &p_cep->cep );\r
+ if( p_cep->pfn_cb )\r
+ p_cep->pfn_cb( p_cep->p_cid->h_al, &p_cep->cep );\r
+ else\r
+ __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
\r
pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
context = p_cep->cep.context;\r
* timeout_ms = 67 << (x - 14)\r
* The results are off by 0.162%.\r
*\r
- * Note that we will never return less than 1 millisecond.\r
+ * Note that we will never return less than 1 millisecond. We also\r
+ * trap exceedingly large values to prevent wrapping.\r
*/\r
+ if( pkt_life > 39 )\r
+ return ~0UL;\r
if( pkt_life > 14 )\r
return 67 << (pkt_life - 14);\r
else if( pkt_life > 8 )\r
p_port_cep = __get_cep_agent( p_cep );\r
if( !p_port_cep )\r
{\r
- AL_EXIT( AL_DBG_CM );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("__get_cep_agent failed.\n") );\r
return IB_INSUFFICIENT_RESOURCES;\r
}\r
\r
status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad );\r
if( status != IB_SUCCESS )\r
{\r
- AL_EXIT( AL_DBG_CM );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s.\n", ib_get_err_str( status )) );\r
return status;\r
}\r
\r
__format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id );\r
\r
*pp_port_cep = p_port_cep;\r
+ AL_EXIT( AL_DBG_CM );\r
return status;\r
}\r
\r
CL_ASSERT( p_cep->p_send_mad );\r
ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
/* Reject the connection. */\r
- __do_cep_rej( p_cep, IB_REJ_TIMEOUT, NULL, 0, NULL, 0 );\r
+ __do_cep_rej( p_cep, IB_REJ_TIMEOUT, (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), NULL, 0 );\r
break;\r
\r
case CEP_STATE_ESTABLISHED:\r
}\r
\r
context = p_cep->cep.context;\r
+ p_cep->pfn_destroy_cb = pfn_destroy_cb;\r
+\r
+ /* Cancel any queued IRP */\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
\r
__unbind_cep( p_cep );\r
ref_cnt = __cleanup_cep( p_cep );\r
{\r
case CEP_STATE_REQ_RCVD:\r
case CEP_STATE_REQ_MRA_SENT:\r
- __remove_cep( p_cep );\r
status = __do_cep_rej(\r
p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
+ __remove_cep( p_cep );\r
p_cep->state = CEP_STATE_IDLE;\r
break;\r
\r
\r
status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad );\r
if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__format_dreq returned %s.\n", ib_get_err_str( status )) );\r
break;\r
+ }\r
\r
if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS )\r
{\r
}\r
\r
\r
-/*\r
- * Kernel-mode only call, so we assert on parameters that we expect kernel\r
- * clients to always provide. \r
- */\r
-ib_api_status_t\r
-al_cep_xchg_irp(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN IRP* const p_new_irp,\r
- OUT IRP** const pp_old_irp )\r
+static void\r
+__cep_cancel_irp(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN IRP* p_irp )\r
{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
+ net32_t cid;\r
+ ib_al_handle_t h_al;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ kcep_t *p_cep;\r
\r
AL_ENTER( AL_DBG_CM );\r
\r
+ UNUSED_PARAM( p_dev_obj );\r
+ CL_ASSERT( p_irp );\r
+\r
+ cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0];\r
+ h_al = (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1];\r
CL_ASSERT( h_al );\r
- CL_ASSERT( pp_old_irp );\r
\r
KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
+ if( p_cep )\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
\r
- /* Always dequeue whatever IRP is there. */\r
- *pp_old_irp = p_cep->p_irp;\r
+ KeReleaseInStackQueuedSpinLock( &hdl );\r
\r
- /* Don't allow queueing the IRP if there are MADs to be reaped. */\r
- if( p_cep->p_mad_head )\r
- {\r
- p_cep->p_irp = NULL;\r
- status = IB_NOT_DONE;\r
- }\r
- else\r
- {\r
- p_cep->p_irp = p_new_irp;\r
- status = IB_SUCCESS;\r
- }\r
+ IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
AL_EXIT( AL_DBG_CM );\r
- return status;\r
}\r
\r
\r
-ib_api_status_t\r
-al_cep_cancel_irp(\r
+NTSTATUS\r
+al_cep_queue_irp(\r
IN ib_al_handle_t h_al,\r
IN net32_t cid,\r
IN IRP* const p_irp )\r
{\r
- ib_api_status_t status;\r
kcep_t *p_cep;\r
KLOCK_QUEUE_HANDLE hdl;\r
\r
{\r
KeReleaseInStackQueuedSpinLock( &hdl );\r
AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
+ return STATUS_INVALID_PARAMETER;\r
}\r
\r
- if( p_cep->p_irp == p_irp )\r
- {\r
- p_cep->p_irp = NULL;\r
- status = IB_SUCCESS;\r
- }\r
- else\r
- {\r
- status = IB_NOT_DONE;\r
- }\r
+ /*\r
+ * Store the CID an AL handle in the IRP's driver context\r
+ * so we can cancel it.\r
+ */\r
+ p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
+ p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al;\r
+#pragma warning(push, 3)\r
+ IoSetCancelRoutine( p_irp, __cep_cancel_irp );\r
+#pragma warning(pop)\r
+ IoMarkIrpPending( p_irp );\r
+\r
+ /* Always dequeue and complete whatever IRP is there. */\r
+ __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
+\r
+ InterlockedExchangePointer( &p_cep->p_irp, p_irp );\r
+\r
+ /* Complete the IRP if there are MADs to be reaped. */\r
+ if( p_cep->p_mad_head )\r
+ __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
\r
KeReleaseInStackQueuedSpinLock( &hdl );\r
AL_EXIT( AL_DBG_CM );\r
- return status;\r
+ return STATUS_PENDING;\r
}\r
\r
\r
#include "al_qp.h"\r
\r
\r
-static ib_api_status_t\r
-__proxy_cep_cb(\r
- IN ib_al_handle_t h_al,\r
- IN ib_cep_t* const p_cep );\r
-\r
-\r
static cl_status_t\r
proxy_create_cep(\r
IN void *p_open_context,\r
return CL_INVALID_PARAMETER;\r
}\r
\r
- p_ioctl->status = al_create_cep( p_context->h_al, __proxy_cep_cb,\r
+ /* We use IRPs as notification mechanism so the callback is NULL. */\r
+ p_ioctl->status = al_create_cep( p_context->h_al, NULL,\r
p_context, &p_ioctl->cid );\r
\r
*p_ret_bytes = sizeof(ual_create_cep_ioctl_t);\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes )\r
{\r
- ib_api_status_t status;\r
al_dev_open_context_t *p_context;\r
- IRP *p_irp;\r
\r
AL_ENTER( AL_DBG_CM );\r
\r
return CL_INVALID_PARAMETER;\r
}\r
\r
- status = al_cep_xchg_irp(\r
- p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL, &p_irp );\r
- if( status != IB_INVALID_HANDLE && p_irp )\r
- __complete_get_event_ioctl( p_context->h_al, p_irp, STATUS_CANCELLED );\r
-\r
al_destroy_cep( p_context->h_al,\r
*(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL );\r
\r
}\r
\r
\r
-static void\r
-__proxy_cancel_cep(\r
- IN DEVICE_OBJECT* p_dev_obj,\r
- IN IRP* p_irp )\r
-{\r
- al_dev_open_context_t *p_context;\r
- PIO_STACK_LOCATION p_io_stack;\r
- net32_t cid;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- /* Get the stack location. */\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-\r
- cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0];\r
- if( al_cep_cancel_irp( p_context->h_al, cid, p_irp ) == IB_SUCCESS )\r
- __complete_get_event_ioctl( p_context->h_al, p_irp, STATUS_CANCELLED );\r
-\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
-}\r
-\r
-\r
static cl_status_t\r
proxy_cep_get_event(\r
IN void *p_open_context,\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes )\r
{\r
- ib_api_status_t status;\r
+ NTSTATUS status;\r
IO_STACK_LOCATION *p_io_stack;\r
al_dev_open_context_t *p_context;\r
net32_t cid;\r
- IRP *p_old_irp;\r
\r
AL_ENTER( AL_DBG_CM );\r
\r
\r
cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl );\r
\r
- /* Store the CID in the IRP's driver context so we can cancel it. */\r
- h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, __proxy_cancel_cep );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( h_ioctl );\r
-\r
- ref_al_obj( &p_context->h_al->obj );\r
-\r
- /* Attempt to queue the IRP in the CEP. */\r
- status = al_cep_xchg_irp( p_context->h_al, cid, h_ioctl, &p_old_irp );\r
- switch( status )\r
+ status = al_cep_queue_irp( p_context->h_al, cid, h_ioctl );\r
+ if( status != STATUS_PENDING )\r
{\r
- case IB_SUCCESS:\r
- break;\r
-\r
- case IB_NOT_DONE:\r
- /* There are queued MADs - complete the IOCTL now. */\r
- __complete_get_event_ioctl( p_context->h_al, h_ioctl, STATUS_SUCCESS );\r
- break;\r
-\r
- default:\r
/* Invalid CID. Complete the request. */\r
- __complete_get_event_ioctl(\r
- p_context->h_al, h_ioctl, STATUS_INVALID_PARAMETER );\r
AL_EXIT( AL_DBG_CM );\r
- return CL_PENDING;\r
- }\r
-\r
- /* Check for an existing IRP. */\r
- if( p_old_irp )\r
- {\r
- /*\r
- * We must handle the race between this IOCTL and a cancellation of\r
- * the previous one both trying to cancel the old IRP.\r
- */\r
- __complete_get_event_ioctl(\r
- p_context->h_al, p_old_irp, STATUS_CANCELLED );\r
+ return CL_INVALID_PARAMETER;\r
}\r
\r
AL_EXIT( AL_DBG_CM );\r
}\r
\r
\r
-static ib_api_status_t\r
-__proxy_cep_cb(\r
- IN ib_al_handle_t h_al,\r
- IN ib_cep_t* const p_cep )\r
-{\r
- ib_api_status_t status;\r
- IRP *p_irp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- status = al_cep_xchg_irp( h_al, p_cep->cid, NULL, &p_irp );\r
- CL_ASSERT( status != IB_INVALID_HANDLE );\r
- if( p_irp )\r
- __complete_get_event_ioctl( h_al, p_irp, STATUS_SUCCESS );\r
-\r
- /*\r
- * Note that we always return IB_ERROR here. This causes the CEP manager\r
- * to signal the callback event. While this can result in more calls to\r
- * this function, it does eliminate potential races between destruction\r
- * and callbacks.\r
- */\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_ERROR;\r
-}\r
-\r
-\r
-\r
cl_status_t cep_ioctl(\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes )\r
al_pfn_cep_cb_t pfn_cb;\r
ib_al_handle_t h_al;\r
cl_list_item_t al_item;\r
- cl_spinlock_t lock;\r
\r
ib_pfn_destroy_cb_t pfn_destroy_cb;\r
\r
__destroy_ucep(\r
IN ucep_t* const p_cep )\r
{\r
- cl_spinlock_destroy( &p_cep->lock );\r
if( p_cep->pfn_destroy_cb )\r
p_cep->pfn_destroy_cb( p_cep->cep.context );\r
cl_free( p_cep );\r
return IB_INSUFFICIENT_MEMORY;\r
}\r
\r
- cl_spinlock_construct( &p_cep->lock );\r
-\r
- if( cl_spinlock_init( &p_cep->lock ) != CL_SUCCESS )\r
- {\r
- __destroy_ucep( p_cep );\r
- AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to initialize event.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
/* Initialize to two - one for the CEP, and one for the IOCTL. */\r
p_cep->ref_cnt = 2;\r
\r
{\r
ucep_t *p_cep;\r
DWORD bytes_ret;\r
- int32_t ref_cnt;\r
\r
AL_ENTER( AL_DBG_CM );\r
\r
cl_qlist_remove_item( &h_al->cep_list, &p_cep->al_item );\r
cl_spinlock_release( &h_al->obj.lock );\r
\r
- /*\r
- * Decrement the reference count so that we stop issuing IOCTLs. Note\r
- * that we must serialize with the IOCTL completion handler to close a\r
- * race where the IOCTL completion handler could issue the next IOCTL\r
- * and have its request be passed by this one.\r
- */\r
- cl_spinlock_acquire( &p_cep->lock );\r
- ref_cnt = cl_atomic_dec( &p_cep->ref_cnt );\r
-\r
/* Destroy the kernel CEP right away. */\r
DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid,\r
sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL );\r
- cl_spinlock_release( &p_cep->lock );\r
\r
- if( !ref_cnt )\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) )\r
{\r
/* We have no remaining refrences. */\r
__destroy_ucep( p_cep );\r
\r
p_cep = PARENT_STRUCT( p_ov, ucep_t, ov );\r
\r
- cl_atomic_inc( &p_cep->ref_cnt );\r
if( !error_code )\r
- p_cep->pfn_cb( p_cep->h_al, &p_cep->cep );\r
-\r
- /* Lock against destruction. */\r
- cl_spinlock_acquire( &p_cep->lock );\r
-\r
- /*\r
- * Under normal circumstances, the reference count here will be 3 -\r
- * one to indicate the object is alive and well, a second for this IOCTL,\r
- * and a third that we just took for the callback.\r
- *\r
- * If a user tries to destroy the CEP, it will have decremented the count\r
- * by one. Thus, if when we release the callback reference we reach 1,\r
- * we know to cleanup.\r
- */\r
- if( cl_atomic_dec( &p_cep->ref_cnt ) == 1 )\r
{\r
- /* The CEP needs to be freed. */\r
- cl_spinlock_release( &p_cep->lock );\r
- __destroy_ucep( p_cep );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
+ p_cep->pfn_cb( p_cep->h_al, &p_cep->cep );\r
\r
- if( !error_code )\r
- {\r
if( !DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT,\r
&p_cep->cep.cid, sizeof(p_cep->cep.cid), NULL, 0,\r
NULL, &p_cep->ov ) && GetLastError() == ERROR_IO_PENDING )\r
{\r
- cl_spinlock_release( &p_cep->lock );\r
AL_EXIT( AL_DBG_CM );\r
return;\r
}\r
-\r
- AL_TRACE( AL_DBG_ERROR,\r
- ("DeviceIoControl for CEP callback request returned %d.\n",\r
- GetLastError()) );\r
+ else if( GetLastError() != ERROR_INVALID_PARAMETER )\r
+ {\r
+ /* We can get ERROR_INVALID_PARAMETER if the CEP was destroyed. */\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("DeviceIoControl for CEP callback request returned %d.\n",\r
+ GetLastError()) );\r
+ }\r
}\r
else\r
{\r
AL_TRACE( AL_DBG_WARN,\r
- ("UAL_CEP_GET_EVENT IOCTL returned ERROR_OPERATION_ABORTED.\n") );\r
+ ("UAL_CEP_GET_EVENT IOCTL returned %d.\n", error_code) );\r
}\r
\r
/*\r
* We failed to issue the next request or the previous request was\r
* cancelled. Release the reference held by the previous IOCTL and exit.\r
*/\r
- cl_atomic_dec( &p_cep->ref_cnt );\r
- cl_spinlock_release( &p_cep->lock );\r
+ if( !cl_atomic_dec( &p_cep->ref_cnt ) )\r
+ __destroy_ucep( p_cep );\r
+\r
AL_EXIT( AL_DBG_CM );\r
}\r