--- /dev/null
+Bottom: 84611ba9217ac3e6de75f53e37b41fcca0916e87
+Top: 64105095b4aa923073d592c38bb183bf6fd53d92
+Author: Sean Hefty <sean.hefty@intel.com>
+Date: 2010-09-17 12:57:46 -0700
+
+Refresh of dapl-evd
+
+---
+
+diff --git a/trunk/ulp/dapl2/dapl/common/dapl_cno_util.c b/trunk/ulp/dapl2/dapl/common/dapl_cno_util.c
+index 2215f29..cad9747 100644
+--- a/trunk/ulp/dapl2/dapl/common/dapl_cno_util.c
++++ b/trunk/ulp/dapl2/dapl/common/dapl_cno_util.c
+@@ -148,9 +148,6 @@ void dapl_cno_dealloc(IN DAPL_CNO * cno_ptr)
+ void dapl_internal_cno_trigger(IN DAPL_CNO * cno_ptr, IN DAPL_EVD * evd_ptr)
+ {
+ DAT_RETURN dat_status;
+-#if defined(__KDAPL__)
+- DAT_EVENT event;
+-#endif /* defined(__KDAPL__) */
+
+ dat_status = DAT_SUCCESS;
+
+@@ -167,20 +164,14 @@ void dapl_internal_cno_trigger(IN DAPL_CNO * cno_ptr, IN DAPL_EVD * evd_ptr)
+ dapl_os_assert(cno_ptr->cno_state != DAPL_CNO_STATE_DEAD);
+
+ if (cno_ptr->cno_state == DAPL_CNO_STATE_UNTRIGGERED) {
+-#if !defined(__KDAPL__)
+ DAT_OS_WAIT_PROXY_AGENT agent;
+
+ /* Squirrel away wait agent, and delete link. */
+ agent = cno_ptr->cno_wait_agent;
+-#endif /* !defined(__KDAPL__) */
+
+ /* Separate assignments for windows compiler. */
+ #ifndef _WIN32
+-#if defined(__KDAPL__)
+- cno_ptr->cno_upcall = DAT_UPCALL_NULL;
+-#else
+ cno_ptr->cno_wait_agent = DAT_OS_WAIT_PROXY_AGENT_NULL;
+-#endif /* defined(__KDAPL__) */
+ #else
+ cno_ptr->cno_wait_agent.instance_data = NULL;
+ cno_ptr->cno_wait_agent.proxy_agent_func = NULL;
+@@ -200,43 +191,12 @@ void dapl_internal_cno_trigger(IN DAPL_CNO * cno_ptr, IN DAPL_EVD * evd_ptr)
+ dapl_os_unlock(&cno_ptr->header.lock);
+
+ /* Trigger the OS proxy wait agent, if one exists. */
+-#if defined(__KDAPL__)
+- dat_status = dapl_evd_dequeue((DAT_EVD_HANDLE) evd_ptr, &event);
+- while (dat_status == DAT_SUCCESS) {
+- if (cno_ptr->cno_upcall.upcall_func !=
+- (DAT_UPCALL_FUNC) NULL) {
+- cno_ptr->cno_upcall.upcall_func(cno_ptr->
+- cno_upcall.
+- instance_data,
+- &event,
+- DAT_FALSE);
+- }
+- dat_status = dapl_evd_dequeue((DAT_EVD_HANDLE) evd_ptr,
+- &event);
+- }
+-#else
+ if (agent.proxy_agent_func != (DAT_AGENT_FUNC) NULL) {
+ agent.proxy_agent_func(agent.instance_data,
+ (DAT_EVD_HANDLE) evd_ptr);
+ }
+-#endif /* defined(__KDAPL__) */
+ } else {
+ dapl_os_unlock(&cno_ptr->header.lock);
+-#if defined(__KDAPL__)
+- dat_status = dapl_evd_dequeue((DAT_EVD_HANDLE) evd_ptr, &event);
+- while (dat_status == DAT_SUCCESS) {
+- if (cno_ptr->cno_upcall.upcall_func !=
+- (DAT_UPCALL_FUNC) NULL) {
+- cno_ptr->cno_upcall.upcall_func(cno_ptr->
+- cno_upcall.
+- instance_data,
+- &event,
+- DAT_FALSE);
+- }
+- dat_status = dapl_evd_dequeue((DAT_EVD_HANDLE) evd_ptr,
+- &event);
+- }
+-#endif /* defined(__KDAPL__) */
+ }
+
+ return;
+diff --git a/trunk/ulp/dapl2/dapl/common/dapl_ep_util.c b/trunk/ulp/dapl2/dapl/common/dapl_ep_util.c
+index bd91fc7..fc911a6 100644
+--- a/trunk/ulp/dapl2/dapl/common/dapl_ep_util.c
++++ b/trunk/ulp/dapl2/dapl/common/dapl_ep_util.c
+@@ -608,13 +608,18 @@ void dapl_ep_unlink_cm(IN DAPL_EP *ep_ptr, IN dp_ib_cm_handle_t cm_ptr)
+
+ static void dapli_ep_flush_evd(DAPL_EVD *evd_ptr)
+ {
++ DAT_RETURN dat_status;
++
+ dapl_os_lock(&evd_ptr->header.lock);
+- dapls_evd_copy_cq(evd_ptr);
++ dat_status = dapls_evd_copy_cq(evd_ptr);
+ dapl_os_unlock(&evd_ptr->header.lock);
++
++ if (dat_status == DAT_QUEUE_FULL)
++ dapls_evd_post_overflow_event(evd_ptr);
+ }
+
+-void dapls_ep_flush_cqs(DAPL_EP * ep_ptr)\r
+-{\r
++void dapls_ep_flush_cqs(DAPL_EP * ep_ptr)
++{
+ if (ep_ptr->param.request_evd_handle)
+ dapli_ep_flush_evd((DAPL_EVD *) ep_ptr->param.request_evd_handle);
+
+diff --git a/trunk/ulp/dapl2/dapl/common/dapl_evd_util.c b/trunk/ulp/dapl2/dapl/common/dapl_evd_util.c
+index 675e948..2e041c1 100644
+--- a/trunk/ulp/dapl2/dapl/common/dapl_evd_util.c
++++ b/trunk/ulp/dapl2/dapl/common/dapl_evd_util.c
+@@ -160,15 +160,6 @@ dapls_evd_internal_create(DAPL_IA * ia_ptr,
+ goto bail;\r
+ }\r
+ \r
+- /*\r
+- * If we are dealing with event streams besides a CQ event stream,\r
+- * be conservative and set producer side locking. Otherwise, no.\r
+- * Note: CNO is not considered CQ event stream.\r
+- */\r
+- evd_ptr->evd_producer_locking_needed =\r
+- (!(evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) ||\r
+- evd_ptr->cno_ptr);\r
+-\r
+ /* Before we setup any callbacks, transition state to OPEN. */\r
+ evd_ptr->evd_state = DAPL_EVD_STATE_OPEN;\r
+ \r
+@@ -299,7 +290,6 @@ DAPL_EVD *dapls_evd_alloc(IN DAPL_IA * ia_ptr,
+ evd_ptr->evd_flags = evd_flags;\r
+ evd_ptr->evd_enabled = DAT_TRUE;\r
+ evd_ptr->evd_waitable = DAT_TRUE;\r
+- evd_ptr->evd_producer_locking_needed = 1; /* Conservative value. */\r
+ evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;\r
+ dapl_os_atomic_set(&evd_ptr->evd_ref_count, 0);\r
+ evd_ptr->catastrophic_overflow = DAT_FALSE;\r
+@@ -583,60 +573,12 @@ void dapli_evd_eh_print_cqe(IN ib_work_completion_t * cqe_ptr)
+ * Event posting code follows.\r
+ */\r
+ \r
+-/*\r
+- * These next two functions (dapli_evd_get_event and dapli_evd_post_event)\r
+- * are a pair. They are always called together, from one of the functions\r
+- * at the end of this file (dapl_evd_post_*_event).\r
+- *\r
+- * Note that if producer side locking is enabled, the first one takes the\r
+- * EVD lock and the second releases it.\r
+- */\r
+-\r
+-/* dapli_evd_get_event\r
+- *\r
+- * Get an event struct from the evd. The caller should fill in the event\r
+- * and call dapl_evd_post_event.\r
+- *\r
+- * If there are no events available, an overflow event is generated to the\r
+- * async EVD handler.\r
+- *\r
+- * If this EVD required producer locking, a successful return implies\r
+- * that the lock is held.\r
+- *\r
+- * Input:\r
+- * evd_ptr\r
+- *\r
+- * Output:\r
+- * event\r
+- *\r
+- */\r
+-\r
+-static DAT_EVENT *dapli_evd_get_event(DAPL_EVD * evd_ptr)\r
+-{\r
+- DAT_EVENT *event;\r
+-\r
+- if (evd_ptr->evd_producer_locking_needed) {\r
+- dapl_os_lock(&evd_ptr->header.lock);\r
+- }\r
+-\r
+- event = (DAT_EVENT *) dapls_rbuf_remove(&evd_ptr->free_event_queue);\r
+-\r
+- /* Release the lock if it was taken and the call failed. */\r
+- if (!event && evd_ptr->evd_producer_locking_needed) {\r
+- dapl_os_unlock(&evd_ptr->header.lock);\r
+- }\r
+-\r
+- return event;\r
+-}\r
+ \r
+ /* dapli_evd_post_event\r
+ *\r
+ * Post the <event> to the evd. If possible, invoke the evd's CNO.\r
+ * Otherwise post the event on the pending queue.\r
+ *\r
+- * If producer side locking is required, the EVD lock must be held upon\r
+- * entry to this function.\r
+- *\r
+ * Input:\r
+ * evd_ptr\r
+ * event\r
+@@ -650,7 +592,6 @@ static void
+ dapli_evd_post_event(IN DAPL_EVD * evd_ptr, IN const DAT_EVENT * event_ptr)\r
+ {\r
+ DAT_RETURN dat_status;\r
+- DAPL_CNO *cno_to_trigger = NULL;\r
+ \r
+ dapl_dbg_log(DAPL_DBG_TYPE_EVD, "%s: %s evd %p state %d\n",\r
+ __FUNCTION__, dapl_event_str(event_ptr->event_number), \r
+@@ -665,102 +606,37 @@ dapli_evd_post_event(IN DAPL_EVD * evd_ptr, IN const DAT_EVENT * event_ptr)
+ \r
+ if (evd_ptr->evd_state == DAPL_EVD_STATE_OPEN) {\r
+ /* No waiter. Arrange to trigger a CNO if it exists. */\r
+-\r
+- if (evd_ptr->evd_enabled) {\r
+- cno_to_trigger = evd_ptr->cno_ptr;\r
+- }\r
+- if (evd_ptr->evd_producer_locking_needed) {\r
+- dapl_os_unlock(&evd_ptr->header.lock);\r
+- }\r
++ if (evd_ptr->evd_enabled && evd_ptr->cno_ptr)\r
++ dapl_internal_cno_trigger(evd_ptr->cno_ptr, evd_ptr);\r
+ } else {\r
+ if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED\r
+ && (dapls_rbuf_count(&evd_ptr->pending_event_queue)\r
+ >= evd_ptr->threshold)) {\r
+- dapl_os_unlock(&evd_ptr->header.lock);\r
+ \r
+ if (evd_ptr->evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) {\r
+ dapls_evd_dto_wakeup(evd_ptr);\r
+ } else {\r
+ dapl_os_wait_object_wakeup(&evd_ptr->wait_object);\r
+ }\r
+-\r
+- } else {\r
+- dapl_os_unlock(&evd_ptr->header.lock);\r
+ }\r
+ }\r
+-\r
+- if (cno_to_trigger != NULL) {\r
+- dapl_internal_cno_trigger(cno_to_trigger, evd_ptr);\r
+- }\r
+ }\r
+ \r
+-/* dapli_evd_post_event_nosignal\r
+- *\r
+- * Post the <event> to the evd. Do not do any wakeup processing.\r
+- * This function should only be called if it is known that there are\r
+- * no waiters that it is appropriate to wakeup on this EVD. An example\r
+- * of such a situation is during internal dat_evd_wait() processing.\r
+- *\r
+- * If producer side locking is required, the EVD lock must be held upon\r
+- * entry to this function.\r
+- *\r
+- * Input:\r
+- * evd_ptr\r
+- * event\r
+- *\r
+- * Output:\r
+- * none\r
+- *\r
+- */\r
+-\r
+-static void\r
+-dapli_evd_post_event_nosignal(IN DAPL_EVD * evd_ptr,\r
+- IN const DAT_EVENT * event_ptr)\r
++static DAT_EVENT *dapli_evd_get_and_init_event(IN DAPL_EVD * evd_ptr,\r
++ IN DAT_EVENT_NUMBER event_number)\r
+ {\r
+- DAT_RETURN dat_status;\r
+-\r
+- dapl_dbg_log(DAPL_DBG_TYPE_EVD, "%s: Called with event %s\n",\r
+- __FUNCTION__, dapl_event_str(event_ptr->event_number));\r
+-\r
+- dat_status = dapls_rbuf_add(&evd_ptr->pending_event_queue,\r
+- (void *)event_ptr);\r
+- dapl_os_assert(dat_status == DAT_SUCCESS);\r
+-\r
+- dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_WAITED\r
+- || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);\r
++ DAT_EVENT *event_ptr;\r
+ \r
+- if (evd_ptr->evd_producer_locking_needed) {\r
+- dapl_os_unlock(&evd_ptr->header.lock);\r
++ event_ptr = (DAT_EVENT *) dapls_rbuf_remove(&evd_ptr->free_event_queue);\r
++ if (event_ptr) {\r
++ event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
++ event_ptr->event_number = event_number;\r
+ }\r
+-}\r
+ \r
+-/* dapli_evd_format_overflow_event\r
+- *\r
+- * format an overflow event for posting\r
+- *\r
+- * Input:\r
+- * evd_ptr\r
+- * event_ptr\r
+- *\r
+- * Output:\r
+- * none\r
+- *\r
+- */\r
+-static void\r
+-dapli_evd_format_overflow_event(IN DAPL_EVD * evd_ptr,\r
+- OUT DAT_EVENT * event_ptr)\r
+-{\r
+- DAPL_IA *ia_ptr;\r
+-\r
+- ia_ptr = evd_ptr->header.owner_ia;\r
+-\r
+- event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
+- event_ptr->event_number = DAT_ASYNC_ERROR_EVD_OVERFLOW;\r
+- event_ptr->event_data.asynch_error_event_data.dat_handle =\r
+- (DAT_HANDLE) ia_ptr;\r
++ return event_ptr;\r
+ }\r
+ \r
+-/* dapli_evd_post_overflow_event\r
++/* dapls_evd_post_overflow_event\r
+ *\r
+ * post an overflow event\r
+ *\r
+@@ -772,52 +648,38 @@ dapli_evd_format_overflow_event(IN DAPL_EVD * evd_ptr,
+ * none\r
+ *\r
+ */\r
+-static void\r
+-dapli_evd_post_overflow_event(IN DAPL_EVD * async_evd_ptr,\r
+- IN DAPL_EVD * overflow_evd_ptr)\r
++void\r
++dapls_evd_post_overflow_event(IN DAPL_EVD * evd_ptr)\r
+ {\r
+- DAT_EVENT *overflow_event;\r
++ DAPL_EVD *async_evd_ptr = evd_ptr->header.owner_ia->async_error_evd;\r
++ DAT_EVENT *event_ptr;\r
+ \r
+- /* The overflow_evd_ptr mght be the same as evd.\r
+- * In that case we've got a catastrophic overflow.\r
+- */\r
+- dapl_log(DAPL_DBG_TYPE_WARN,\r
+- " WARNING: overflow event on EVD %p/n", overflow_evd_ptr);\r
++ dapl_log(DAPL_DBG_TYPE_WARN, " WARNING: overflow event on EVD %p/n", evd_ptr);\r
+ \r
+- if (async_evd_ptr == overflow_evd_ptr) {\r
+- async_evd_ptr->catastrophic_overflow = DAT_TRUE;\r
+- async_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD;\r
+- return;\r
+- }\r
++ dapl_os_lock(&async_evd_ptr->header.lock);\r
+ \r
+- overflow_event = dapli_evd_get_event(overflow_evd_ptr);\r
+- if (!overflow_event) {\r
+- /* this is not good */\r
+- overflow_evd_ptr->catastrophic_overflow = DAT_TRUE;\r
+- overflow_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD;\r
+- return;\r
+- }\r
+- dapli_evd_format_overflow_event(overflow_evd_ptr, overflow_event);\r
+- dapli_evd_post_event(overflow_evd_ptr, overflow_event);\r
++ /* The overflow evd_ptr mght be the same as the async evd.\r
++ * In that case we've got a catastrophic overflow.\r
++ */\r
++ if (async_evd_ptr == evd_ptr)\r
++ goto err;\r
++\r
++ event_ptr = dapli_evd_get_and_init_event(async_evd_ptr,\r
++ DAT_ASYNC_ERROR_EVD_OVERFLOW);\r
++ if (!event_ptr)\r
++ goto err;\r
++ \r
++ event_ptr->event_data.asynch_error_event_data.dat_handle =\r
++ (DAT_HANDLE) evd_ptr->header.owner_ia;\r
+ \r
++ dapli_evd_post_event(async_evd_ptr, event_ptr);\r
++ dapl_os_unlock(&async_evd_ptr->header.lock);\r
+ return;\r
+-}\r
+ \r
+-static DAT_EVENT *dapli_evd_get_and_init_event(IN DAPL_EVD * evd_ptr,\r
+- IN DAT_EVENT_NUMBER event_number)\r
+-{\r
+- DAT_EVENT *event_ptr;\r
+-\r
+- event_ptr = dapli_evd_get_event(evd_ptr);\r
+- if (NULL == event_ptr) {\r
+- dapli_evd_post_overflow_event(evd_ptr->header.owner_ia->\r
+- async_error_evd, evd_ptr);\r
+- } else {\r
+- event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
+- event_ptr->event_number = event_number;\r
+- }\r
+-\r
+- return event_ptr;\r
++err:\r
++ async_evd_ptr->catastrophic_overflow = DAT_TRUE;\r
++ async_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD;\r
++ dapl_os_unlock(&async_evd_ptr->header.lock);\r
+ }\r
+ \r
+ DAT_RETURN\r
+@@ -829,17 +691,11 @@ dapls_evd_post_cr_arrival_event(IN DAPL_EVD * evd_ptr,
+ DAT_CR_HANDLE cr_handle)\r
+ {\r
+ DAT_EVENT *event_ptr;\r
+- event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+ \r
+- if (event_ptr == NULL) {\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+- }\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
++ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
++ if (event_ptr == NULL)\r
++ goto err;\r
+ \r
+ event_ptr->event_data.cr_arrival_event_data.sp_handle = sp_handle;\r
+ event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr\r
+@@ -848,8 +704,13 @@ dapls_evd_post_cr_arrival_event(IN DAPL_EVD * evd_ptr,
+ event_ptr->event_data.cr_arrival_event_data.cr_handle = cr_handle;\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ \r
+ DAT_RETURN\r
+@@ -860,17 +721,11 @@ dapls_evd_post_connection_event(IN DAPL_EVD * evd_ptr,
+ IN DAT_PVOID private_data)\r
+ {\r
+ DAT_EVENT *event_ptr;\r
+- event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+ \r
+- if (event_ptr == NULL) {\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+- }\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
++ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
++ if (event_ptr == NULL)\r
++ goto err;\r
+ \r
+ event_ptr->event_data.connect_event_data.ep_handle = ep_handle;\r
+ event_ptr->event_data.connect_event_data.private_data_size\r
+@@ -878,8 +733,13 @@ dapls_evd_post_connection_event(IN DAPL_EVD * evd_ptr,
+ event_ptr->event_data.connect_event_data.private_data = private_data;\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ \r
+ DAT_RETURN\r
+@@ -888,27 +748,27 @@ dapls_evd_post_async_error_event(IN DAPL_EVD * evd_ptr,
+ IN DAT_IA_HANDLE ia_handle)\r
+ {\r
+ DAT_EVENT *event_ptr;\r
+- event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
++\r
+ dapl_log(DAPL_DBG_TYPE_WARN,\r
+ " WARNING: async event - %s evd=%p/n",\r
+ dapl_event_str(event_number), evd_ptr);\r
+ \r
+- if (event_ptr == NULL) {\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+- }\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
++ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
++ if (event_ptr == NULL)\r
++ goto err;\r
+ \r
+ event_ptr->event_data.asynch_error_event_data.dat_handle =\r
+ (DAT_HANDLE) ia_handle;\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ \r
+ DAT_RETURN\r
+@@ -917,23 +777,22 @@ dapls_evd_post_software_event(IN DAPL_EVD * evd_ptr,
+ IN DAT_PVOID pointer)\r
+ {\r
+ DAT_EVENT *event_ptr;\r
+- event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+ \r
+- if (event_ptr == NULL) {\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+- }\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
++ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
++ if (event_ptr == NULL)\r
++ goto err;\r
+ \r
+ event_ptr->event_data.software_event_data.pointer = pointer;\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ \r
+ /*\r
+@@ -960,27 +819,58 @@ dapls_evd_post_generic_event(IN DAPL_EVD * evd_ptr,
+ {\r
+ DAT_EVENT *event_ptr;\r
+ \r
++ dapl_os_lock(&evd_ptr->header.lock);\r
+ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+-\r
+- if (event_ptr == NULL) {\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+- }\r
++ if (event_ptr == NULL)\r
++ goto err;\r
+ \r
+ event_ptr->event_data = *data;\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ \r
+ #ifdef DAT_EXTENSIONS\r
+ DAT_RETURN\r
++dapls_evd_do_post_cr_event_ext(IN DAPL_EVD * evd_ptr,\r
++ IN DAT_EVENT_NUMBER event_number,\r
++ IN DAPL_SP *sp_ptr,\r
++ IN DAPL_CR *cr_ptr,\r
++ IN DAT_PVOID ext_data)\r
++{\r
++ DAT_EVENT *event_ptr;\r
++\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
++ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
++ if (event_ptr == NULL)\r
++ goto err;\r
++\r
++ event_ptr->event_data.cr_arrival_event_data.sp_handle.psp_handle =\r
++ (DAT_PSP_HANDLE) sp_ptr;\r
++ event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr =\r
++ (DAT_IA_ADDRESS_PTR) &sp_ptr->header.owner_ia->hca_ptr->hca_address;\r
++ event_ptr->event_data.cr_arrival_event_data.conn_qual = sp_ptr->conn_qual;\r
++ event_ptr->event_data.cr_arrival_event_data.cr_handle = (DAT_CR_HANDLE) cr_ptr;\r
++\r
++ dapl_os_memcpy(&event_ptr->event_extension_data[0], ext_data, 64);\r
++\r
++ dapli_evd_post_event(sp_ptr->evd_handle, event_ptr);\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
++}\r
++\r
++DAT_RETURN\r
+ dapls_evd_post_cr_event_ext(IN DAPL_SP * sp_ptr,\r
+ IN DAT_EVENT_NUMBER event_number,\r
+ IN dp_ib_cm_handle_t ib_cm_handle,\r
+@@ -990,7 +880,6 @@ dapls_evd_post_cr_event_ext(IN DAPL_SP * sp_ptr,
+ DAPL_CR *cr_ptr;\r
+ DAPL_EP *ep_ptr;\r
+ DAT_EVENT *event_ptr;\r
+- DAT_SP_HANDLE sp_handle;\r
+ \r
+ dapl_os_lock(&sp_ptr->header.lock);\r
+ if (sp_ptr->listening == DAT_FALSE) {\r
+@@ -1079,36 +968,8 @@ dapls_evd_post_cr_event_ext(IN DAPL_SP * sp_ptr,
+ /* link the CR onto the SP so we can pick it up later */\r
+ dapl_sp_link_cr(sp_ptr, cr_ptr);\r
+ \r
+- /* assign sp_ptr to union to avoid typecast errors from some compilers */\r
+- sp_handle.psp_handle = (DAT_PSP_HANDLE) sp_ptr;\r
+-\r
+- /* Post the event. */\r
+-\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+- event_ptr = dapli_evd_get_and_init_event(sp_ptr->evd_handle,\r
+- event_number);\r
+- if (event_ptr == NULL)\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
+-\r
+- event_ptr->event_data.cr_arrival_event_data.sp_handle = sp_handle;\r
+- event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr =\r
+- (DAT_IA_ADDRESS_PTR) & sp_ptr->header.owner_ia->hca_ptr->\r
+- hca_address;\r
+- event_ptr->event_data.cr_arrival_event_data.conn_qual =\r
+- sp_ptr->conn_qual;\r
+- event_ptr->event_data.cr_arrival_event_data.cr_handle =\r
+- (DAT_HANDLE) cr_ptr;\r
+-\r
+- dapl_os_memcpy(&event_ptr->event_extension_data[0], ext_data, 64);\r
+-\r
+- dapli_evd_post_event(sp_ptr->evd_handle, event_ptr);\r
+-\r
+- return DAT_SUCCESS;\r
++ return dapls_evd_do_post_cr_event_ext(sp_ptr->evd_handle, event_number,\r
++ sp_ptr, cr_ptr, ext_data);\r
+ }\r
+ \r
+ DAT_RETURN\r
+@@ -1120,15 +981,11 @@ dapls_evd_post_connection_event_ext(IN DAPL_EVD * evd_ptr,
+ IN DAT_PVOID ext_data)\r
+ {\r
+ DAT_EVENT *event_ptr;\r
++\r
++ dapl_os_lock(&evd_ptr->header.lock);\r
+ event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
+- /*\r
+- * Note event lock may be held on successful return\r
+- * to be released by dapli_evd_post_event(), if provider side locking\r
+- * is needed.\r
+- */\r
+ if (event_ptr == NULL)\r
+- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
+- DAT_RESOURCE_MEMORY);\r
++ goto err;\r
+ \r
+ event_ptr->event_data.connect_event_data.ep_handle = ep_handle;\r
+ event_ptr->event_data.connect_event_data.private_data_size\r
+@@ -1138,8 +995,13 @@ dapls_evd_post_connection_event_ext(IN DAPL_EVD * evd_ptr,
+ dapl_os_memcpy(&event_ptr->event_extension_data[0], ext_data, 64);\r
+ \r
+ dapli_evd_post_event(evd_ptr, event_ptr);\r
+-\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
+ return DAT_SUCCESS;\r
++\r
++err:\r
++ dapl_os_unlock(&evd_ptr->header.lock);\r
++ dapls_evd_post_overflow_event(evd_ptr);\r
++ return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
+ }\r
+ #endif\r
+ \r
+@@ -1331,18 +1193,8 @@ dapli_evd_cqe_to_event(IN DAPL_EVD * evd_ptr,
+ * Copy all entries on a CQ associated with the EVD onto that EVD\r
+ * Up to caller to handle races, if any. Note that no EVD waiters will\r
+ * be awoken by this copy.\r
+- *\r
+- * Input:\r
+- * evd_ptr\r
+- *\r
+- * Output:\r
+- * None\r
+- *\r
+- * Returns:\r
+- * none\r
+- *\r
+ */\r
+-void dapls_evd_copy_cq(DAPL_EVD * evd_ptr)\r
++DAT_RETURN dapls_evd_copy_cq(DAPL_EVD * evd_ptr)\r
+ {\r
+ ib_work_completion_t cur_cqe;\r
+ DAT_RETURN dat_status;\r
+@@ -1350,7 +1202,7 @@ void dapls_evd_copy_cq(DAPL_EVD * evd_ptr)
+ \r
+ if (evd_ptr->ib_cq_handle == IB_INVALID_HANDLE) {\r
+ /* Nothing to do if no CQ. */\r
+- return;\r
++ return DAT_QUEUE_EMPTY;\r
+ }\r
+ \r
+ while (1) {\r
+@@ -1369,14 +1221,9 @@ void dapls_evd_copy_cq(DAPL_EVD * evd_ptr)
+ * Can use DAT_DTO_COMPLETION_EVENT because dapli_evd_cqe_to_event\r
+ * will overwrite.\r
+ */\r
+-\r
+- event =\r
+- dapli_evd_get_and_init_event(evd_ptr,\r
+- DAT_DTO_COMPLETION_EVENT);\r
+- if (event == NULL) {\r
+- /* We've already attempted the overflow post; return. */\r
+- return;\r
+- }\r
++ event = dapli_evd_get_and_init_event(evd_ptr, DAT_DTO_COMPLETION_EVENT);\r
++ if (event == NULL)\r
++ return DAT_QUEUE_FULL;\r
+ \r
+ dapli_evd_cqe_to_event(evd_ptr, &cur_cqe, event);\r
+ \r
+@@ -1389,6 +1236,7 @@ void dapls_evd_copy_cq(DAPL_EVD * evd_ptr)
+ dat_status);\r
+ dapl_os_assert(!"Bad return from dapls_ib_completion_poll");\r
+ }\r
++ return dat_status;\r
+ }\r
+ \r
+ /*\r
+diff --git a/trunk/ulp/dapl2/dapl/common/dapl_evd_util.h b/trunk/ulp/dapl2/dapl/common/dapl_evd_util.h
+index e5a7c3f..9a68c4f 100644
+--- a/trunk/ulp/dapl2/dapl/common/dapl_evd_util.h
++++ b/trunk/ulp/dapl2/dapl/common/dapl_evd_util.h
+@@ -165,11 +165,14 @@ extern void dapl_evd_qp_async_error_callback (
+ IN ib_error_record_t * cause_ptr,
+ IN void * context);
+
+-extern void dapls_evd_copy_cq (
++extern DAT_RETURN dapls_evd_copy_cq (
+ DAPL_EVD *evd_ptr);
+
+ extern DAT_RETURN dapls_evd_cq_poll_to_event (
+ IN DAPL_EVD *evd_ptr,
+ OUT DAT_EVENT *event);
+
++extern void dapls_evd_post_overflow_event (\r
++ IN DAPL_EVD *evd_ptr);\r
++
+ #endif
+diff --git a/trunk/ulp/dapl2/dapl/include/dapl.h b/trunk/ulp/dapl2/dapl/include/dapl.h
+index 8dab61e..a522f15 100644
+--- a/trunk/ulp/dapl2/dapl/include/dapl.h
++++ b/trunk/ulp/dapl2/dapl/include/dapl.h
+@@ -349,9 +349,6 @@ struct dapl_evd
+ DAT_BOOLEAN evd_enabled; /* For attached CNO. */
+ DAT_BOOLEAN evd_waitable; /* EVD state. */
+
+- /* Derived from evd_flags; see dapls_evd_internal_create. */
+- DAT_BOOLEAN evd_producer_locking_needed;
+-
+ /* Every EVD has a CQ unless it is a SOFTWARE_EVENT only EVD */
+ ib_cq_handle_t ib_cq_handle;
+
+diff --git a/trunk/ulp/dapl2/dapl/udapl/dapl_evd_set_unwaitable.c b/trunk/ulp/dapl2/dapl/udapl/dapl_evd_set_unwaitable.c
+index 718e433..36b632a 100644
+--- a/trunk/ulp/dapl2/dapl/udapl/dapl_evd_set_unwaitable.c
++++ b/trunk/ulp/dapl2/dapl/udapl/dapl_evd_set_unwaitable.c
+@@ -71,7 +71,6 @@ DAT_RETURN DAT_API dapl_evd_set_unwaitable(IN DAT_EVD_HANDLE evd_handle)
+ }
+ dapl_os_lock(&evd_ptr->header.lock);
+ evd_ptr->evd_waitable = DAT_FALSE;
+- dapl_os_unlock(&evd_ptr->header.lock);
+
+ /*
+ * If this evd is waiting, wake it up. There is an obvious race
+@@ -85,6 +84,7 @@ DAT_RETURN DAT_API dapl_evd_set_unwaitable(IN DAT_EVD_HANDLE evd_handle)
+ else
+ dapl_os_wait_object_wakeup(&evd_ptr->wait_object);
+ }
++ dapl_os_unlock(&evd_ptr->header.lock);
+ bail:
+ return dat_status;
+ }
+diff --git a/trunk/ulp/dapl2/dapl/udapl/dapl_evd_wait.c b/trunk/ulp/dapl2/dapl/udapl/dapl_evd_wait.c
+index 135951c..33cec50 100644
+--- a/trunk/ulp/dapl2/dapl/udapl/dapl_evd_wait.c
++++ b/trunk/ulp/dapl2/dapl/udapl/dapl_evd_wait.c
+@@ -168,12 +168,12 @@ DAT_RETURN DAT_API dapl_evd_wait(IN DAT_EVD_HANDLE evd_handle,
+ * return right away if the ib_cq_handle associate with these evd
+ * equal to IB_INVALID_HANDLE
+ */
+- dapls_evd_copy_cq(evd_ptr);
++ dat_status = dapls_evd_copy_cq(evd_ptr);
++ if (dat_status == DAT_QUEUE_FULL)
++ goto bail;
+
+- if (dapls_rbuf_count(&evd_ptr->pending_event_queue) >=
+- threshold) {
++ if (dapls_rbuf_count(&evd_ptr->pending_event_queue) >= threshold)
+ break;
+- }
+
+ /*
+ * Do not enable the completion notification if this evd is not
+@@ -264,6 +264,8 @@ DAT_RETURN DAT_API dapl_evd_wait(IN DAT_EVD_HANDLE evd_handle,
+ if (dat_status) {
+ dapl_dbg_log(DAPL_DBG_TYPE_RTN,
+ "dapl_evd_wait () returns 0x%x\n", dat_status);
++ if (dat_status == DAT_QUEUE_FULL)
++ dapls_evd_post_overflow_event(evd_ptr);
+ }
+ return dat_status;
+ }
+diff --git a/trunk/ulp/librdmacm/src/cma.cpp b/trunk/ulp/librdmacm/src/cma.cpp
+index 835d020..215bdf6 100644
+--- a/trunk/ulp/librdmacm/src/cma.cpp
++++ b/trunk/ulp/librdmacm/src/cma.cpp
+@@ -765,6 +765,10 @@ static int ucma_valid_param(struct cma_id_private *id_priv,
+ return rdma_seterrno(EINVAL);\r
+ }\r
+ \r
++ if (param->private_data_len > sizeof(((WV_CONNECT_PARAM *) NULL)->Data)) {\r
++ return rdma_seterrno(EINVAL);\r
++ }\r
++\r
+ return 0;\r
+ }