}\r
}\r
\r
+ NdisMGetDeviceProperty(h_adapter, &p_adapter->pdo, NULL, NULL, NULL, NULL);\r
+ ASSERT(p_adapter->pdo != NULL);\r
+\r
p_adapter->p_stat = ipoib_st_dev_add();\r
if ( p_adapter->p_stat ) \r
p_adapter->p_stat->p_adapter = p_adapter;\r
{\r
cl_obj_t obj;\r
NDIS_HANDLE h_adapter;\r
+ PDEVICE_OBJECT pdo;\r
ipoib_ifc_data_t guids;\r
\r
cl_list_item_t entry;\r
static void __port_do_mcast_garbage(ipoib_port_t* const p_port );\r
\r
\r
-static void __recv_cb_dpc(KDPC *p_gc_dpc,void *context,void *s_arg1, void *s_arg2);\r
-\r
-\r
/******************************************************************************\r
*\r
* Declarations\r
\r
__endpt_mgr_construct( p_port );\r
\r
+ p_port->pPoWorkItem = NULL;\r
+\r
KeInitializeEvent( &p_port->sa_event, NotificationEvent, TRUE );\r
KeInitializeEvent( &p_port->leave_mcast_event, NotificationEvent, TRUE );\r
\r
p_port->port_num = p_pnp_rec->p_port_attr->port_num;\r
p_port->p_adapter = p_adapter;\r
\r
+ p_port->pPoWorkItem = IoAllocateWorkItem(p_adapter->pdo);\r
+ if( p_port->pPoWorkItem == NULL ) {\r
+ IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, ("IoAllocateWorkItem returned NULL\n") );\r
+ return IB_ERROR;\r
+ }\r
+ \r
cl_status = cl_spinlock_init( &p_port->send_lock );\r
if( cl_status != CL_SUCCESS )\r
{\r
return status;\r
}\r
\r
- KeInitializeDpc(&p_port->recv_dpc,(PKDEFERRED_ROUTINE)__recv_cb_dpc,p_port);\r
-\r
-\r
/* Initialize multicast garbage collector timer and DPC object */\r
KeInitializeDpc(&p_port->gc_dpc,(PKDEFERRED_ROUTINE)__port_mcast_garbage_dpc,p_port);\r
KeInitializeTimerEx(&p_port->gc_timer,SynchronizationTimer);\r
\r
cl_obj_deinit( p_obj );\r
\r
+ IoFreeWorkItem( p_port->pPoWorkItem );\r
cl_free( p_port );\r
\r
IPOIB_EXIT( IPOIB_DBG_INIT );\r
IPOIB_EXIT( IPOIB_DBG_RECV );\r
}\r
\r
-static void __recv_cb_dpc(KDPC *p_gc_dpc,void *context,void * s_arg1 , void * s_arg2)\r
-{\r
+static BOOLEAN\r
+__recv_cb_internal(\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN void *cq_context,\r
+ IN uint32_t *p_recv_cnt\r
+ );\r
\r
- ipoib_port_t *p_port = context;\r
\r
- UNREFERENCED_PARAMETER(p_gc_dpc);\r
- UNREFERENCED_PARAMETER(s_arg1);\r
- UNREFERENCED_PARAMETER(s_arg2);\r
+static void\r
+__iopoib_WorkItem(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN void* context )\r
+{\r
\r
+ ipoib_port_t *p_port = ( ipoib_port_t* ) context;\r
+ BOOLEAN WorkToDo = TRUE;\r
+ KIRQL irql;\r
+ uint32_t recv_cnt = 0;\r
+ uint32_t total_recv_cnt = 0;\r
\r
- __recv_cb(NULL, p_port);\r
- ipoib_port_deref( p_port, ref_recv_cb );\r
+ UNREFERENCED_PARAMETER(p_dev_obj);\r
\r
+ while (WorkToDo && total_recv_cnt < 512) {\r
+ irql = KeRaiseIrqlToDpcLevel();\r
+ WorkToDo = __recv_cb_internal(NULL, p_port, &recv_cnt);\r
+ KeLowerIrql(irql);\r
+ total_recv_cnt += recv_cnt;\r
+ }\r
\r
+ if (WorkToDo) {\r
+ IoQueueWorkItem( p_port->pPoWorkItem, __iopoib_WorkItem, DelayedWorkQueue, p_port);\r
+ } else {\r
+ // Release the reference count that was incremented when queued the work item.\r
+ ipoib_port_deref( p_port, ref_recv_cb );\r
+ }\r
}\r
\r
\r
-static void\r
-__recv_cb(\r
+static BOOLEAN\r
+__recv_cb_internal(\r
IN const ib_cq_handle_t h_cq,\r
- IN void *cq_context )\r
+ IN void *cq_context,\r
+ IN uint32_t* p_recv_cnt)\r
{\r
ipoib_port_t *p_port;\r
ib_api_status_t status;\r
int32_t pkt_cnt, recv_cnt = 0, shortage, discarded;\r
cl_qlist_t done_list, bad_list;\r
size_t i;\r
+ BOOLEAN WorkToDo = FALSE;\r
+ \r
PERF_DECLARE( RecvCompBundle );\r
PERF_DECLARE( RecvCb );\r
PERF_DECLARE( PollRecv );\r
\r
cl_perf_start( RecvCb );\r
\r
- UNUSED_PARAM( h_cq );\r
-\r
p_port = (ipoib_port_t*)cq_context;\r
\r
cl_qlist_init( &done_list );\r
\r
} while( (!p_free) && (recv_cnt < 128));\r
\r
+ *p_recv_cnt = (uint32_t)recv_cnt;\r
+\r
/* We're done looking at the endpoint map, release the reference. */\r
cl_atomic_dec( &p_port->endpt_rdr );\r
\r
* and eliminate the possibility of having a call to\r
* __endpt_mgr_insert find a duplicate.\r
*/\r
+ ASSERT(WorkToDo == FALSE);\r
cl_perf_start( RearmRecv );\r
status = p_port->p_adapter->p_ifc->rearm_cq(\r
p_port->ib_mgr.h_recv_cq, FALSE );\r
cl_perf_stop( &p_port->p_adapter->perf, RearmRecv );\r
CL_ASSERT( status == IB_SUCCESS );\r
\r
- ipoib_port_deref( p_port, ref_recv_cb );\r
} else {\r
- // Please note the reference is still up\r
- KeInsertQueueDpc(&p_port->recv_dpc, NULL, NULL);\r
+ if (h_cq) {\r
+ // increment reference to ensure no one release the object while work iteam is queued\r
+ ipoib_port_ref( p_port, ref_recv_cb );\r
+ IoQueueWorkItem( p_port->pPoWorkItem, __iopoib_WorkItem, DelayedWorkQueue, p_port);\r
+ WorkToDo = FALSE;\r
+ } else {\r
+ WorkToDo = TRUE;\r
+ }\r
}\r
-\r
+ ipoib_port_deref( p_port, ref_recv_cb );\r
cl_perf_stop( &p_port->p_adapter->perf, RecvCb );\r
\r
IPOIB_EXIT( IPOIB_DBG_RECV );\r
+ return WorkToDo;\r
+}\r
+\r
+\r
+static void\r
+__recv_cb(\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN void *cq_context )\r
+{\r
+ uint32_t recv_cnt;\r
+ \r
+ __recv_cb_internal(h_cq, cq_context, &recv_cnt);\r
}\r
\r
\r
ipoib_recv_mgr_t recv_mgr;\r
ipoib_send_mgr_t send_mgr;\r
\r
- KDPC recv_dpc;\r
-\r
ipoib_endpt_mgr_t endpt_mgr;\r
\r
ipoib_endpt_t *p_local_endpt;\r
KTIMER gc_timer;\r
uint32_t bc_join_retry_cnt;\r
ib_net16_t base_lid;\r
+ PIO_WORKITEM pPoWorkItem;\r
ipoib_hdr_t hdr[1]; /* Must be last! */\r
\r
} ipoib_port_t;\r
}\r
}\r
\r
+ NdisMGetDeviceProperty(h_adapter, &p_adapter->pdo, NULL, NULL, NULL, NULL);\r
+ ASSERT(p_adapter->pdo != NULL);\r
+\r
p_adapter->p_stat = ipoib_st_dev_add();\r
if ( p_adapter->p_stat ) \r
p_adapter->p_stat->p_adapter = p_adapter;\r
{\r
cl_obj_t obj;\r
NDIS_HANDLE h_adapter;\r
+ PDEVICE_OBJECT pdo;\r
ipoib_ifc_data_t guids;\r
\r
cl_list_item_t entry;\r
static void __port_mcast_garbage_dpc(KDPC *p_gc_dpc,void *context,void *s_arg1, void *s_arg2);\r
static void __port_do_mcast_garbage(ipoib_port_t* const p_port );\r
\r
-\r
-static void __recv_cb_dpc(KDPC *p_gc_dpc,void *context,void *s_arg1, void *s_arg2);\r
-\r
#if 0\r
#ifndef _IPOIB_DEBUG_NDIS6\r
#define _IPOIB_DEBUG_NDIS6\r
\r
__endpt_mgr_construct( p_port );\r
\r
+ p_port->pPoWorkItem = NULL;\r
+\r
KeInitializeEvent( &p_port->sa_event, NotificationEvent, TRUE );\r
KeInitializeEvent( &p_port->leave_mcast_event, NotificationEvent, TRUE );\r
\r
p_port->port_num = p_pnp_rec->p_port_attr->port_num;\r
p_port->p_adapter = p_adapter;\r
\r
+ p_port->pPoWorkItem = IoAllocateWorkItem(p_adapter->pdo);\r
+ if( p_port->pPoWorkItem == NULL ) {\r
+ IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+ ("IoAllocateWorkItem returned NULL\n") );\r
+ return IB_ERROR;\r
+ }\r
+\r
cl_status = cl_spinlock_init( &p_port->send_lock );\r
if( cl_status != CL_SUCCESS )\r
{\r
return status;\r
}\r
\r
- KeInitializeDpc(&p_port->recv_dpc,(PKDEFERRED_ROUTINE)__recv_cb_dpc,p_port);\r
-\r
-\r
/* Initialize multicast garbage collector timer and DPC object */\r
KeInitializeDpc(&p_port->gc_dpc,(PKDEFERRED_ROUTINE)__port_mcast_garbage_dpc,p_port);\r
KeInitializeTimerEx(&p_port->gc_timer,SynchronizationTimer);\r
{\r
cl_free ( p_port->p_ca_attrs );\r
}\r
+\r
+ IoFreeWorkItem( p_port->pPoWorkItem );\r
cl_free( p_port );\r
\r
IPOIB_EXIT( IPOIB_DBG_INIT );\r
IPOIB_EXIT( IPOIB_DBG_RECV );\r
}\r
\r
-static void __recv_cb_dpc(KDPC *p_gc_dpc,void *context,void * s_arg1 , void * s_arg2)\r
+static BOOLEAN\r
+__recv_cb_internal(\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN void *cq_context,\r
+ IN uint32_t *p_recv_cnt\r
+ );\r
+\r
+\r
+static void\r
+__iopoib_WorkItem(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN void* context )\r
{\r
\r
- ipoib_port_t *p_port = (ipoib_port_t *) context;\r
+ ipoib_port_t *p_port = ( ipoib_port_t* ) context;\r
+ BOOLEAN WorkToDo = TRUE;\r
+ KIRQL irql;\r
+ uint32_t recv_cnt = 0;\r
+ uint32_t total_recv_cnt = 0;\r
\r
- UNREFERENCED_PARAMETER(p_gc_dpc);\r
- UNREFERENCED_PARAMETER(s_arg1);\r
- UNREFERENCED_PARAMETER(s_arg2);\r
+ UNREFERENCED_PARAMETER(p_dev_obj);\r
\r
- __recv_cb(NULL, p_port);\r
- ipoib_port_deref( p_port, ref_recv_cb );\r
+ while (WorkToDo && total_recv_cnt < 512) {\r
+ irql = KeRaiseIrqlToDpcLevel();\r
+ WorkToDo = __recv_cb_internal(NULL, p_port, &recv_cnt);\r
+ KeLowerIrql(irql);\r
+ total_recv_cnt += recv_cnt;\r
+ }\r
+\r
+ if (WorkToDo) {\r
+ IoQueueWorkItem( p_port->pPoWorkItem, __iopoib_WorkItem, DelayedWorkQueue, p_port);\r
+ } else {\r
+ // Release the reference count that was incremented when queued the work item.\r
+ ipoib_port_deref( p_port, ref_recv_cb );\r
+ }\r
}\r
\r
\r
-static void\r
-__recv_cb(\r
+static BOOLEAN\r
+__recv_cb_internal(\r
IN const ib_cq_handle_t h_cq,\r
- IN void *cq_context )\r
+ IN void *cq_context,\r
+ IN uint32_t* p_recv_cnt)\r
{\r
ipoib_port_t *p_port;\r
ib_api_status_t status;\r
cl_qlist_t done_list, bad_list;\r
ULONG recv_complete_flags = 0;\r
BOOLEAN res;\r
+ BOOLEAN WorkToDo = FALSE;\r
\r
PERF_DECLARE( RecvCompBundle );\r
PERF_DECLARE( RecvCb );\r
recv_cnt += __recv_mgr_filter( p_port, p_wc, &done_list, &bad_list );\r
cl_perf_stop( &p_port->p_adapter->perf, FilterRecv );\r
\r
- } while( ( !p_free ) && ( recv_cnt < 16 )); //TODO restore back to 128\r
+ } while( ( !p_free ) && ( recv_cnt < 128 )); \r
+\r
+ *p_recv_cnt = (uint32_t)recv_cnt;\r
\r
/* We're done looking at the endpoint map, release the reference. */\r
cl_atomic_dec( &p_port->endpt_rdr );\r
* and eliminate the possibility of having a call to\r
* __endpt_mgr_insert find a duplicate.\r
*/\r
+ ASSERT(WorkToDo == FALSE);\r
cl_perf_start( RearmRecv );\r
\r
status =\r
cl_perf_stop( &p_port->p_adapter->perf, RearmRecv );\r
CL_ASSERT( status == IB_SUCCESS );\r
\r
- ipoib_port_deref( p_port, ref_recv_cb );\r
} else {\r
- // Please note the reference is still up\r
- KeInsertQueueDpc(&p_port->recv_dpc, NULL, NULL);\r
+ if (h_cq) {\r
+ // increment reference to ensure no one release the object while work iteam is queued\r
+ ipoib_port_ref( p_port, ref_recv_cb );\r
+ IoQueueWorkItem( p_port->pPoWorkItem, __iopoib_WorkItem, DelayedWorkQueue, p_port);\r
+ WorkToDo = FALSE;\r
+ } else {\r
+ WorkToDo = TRUE;\r
+ }\r
}\r
+ ipoib_port_deref( p_port, ref_recv_cb );\r
cl_perf_stop( &p_port->p_adapter->perf, RecvCb );\r
\r
IPOIB_EXIT( IPOIB_DBG_RECV );\r
+ return WorkToDo;\r
+}\r
+ \r
+ \r
+static void\r
+__recv_cb(\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN void *cq_context )\r
+{\r
+ uint32_t recv_cnt;\r
+ \r
+ __recv_cb_internal(h_cq, cq_context, &recv_cnt);\r
}\r
\r
\r
ipoib_send_mgr_t send_mgr;\r
ipoib_send_desc_t * p_desc;\r
\r
- KDPC recv_dpc;\r
-\r
ipoib_endpt_mgr_t endpt_mgr;\r
endpt_buf_mgr_t cm_buf_mgr;\r
endpt_recv_mgr_t cm_recv_mgr;\r
uint32_t bc_join_retry_cnt;\r
ib_net16_t base_lid;\r
LONG n_no_progress;\r
+ PIO_WORKITEM pPoWorkItem;\r
ipoib_hdr_t hdr[1]; /* Must be last! */\r
\r
} ipoib_port_t;\r