evd_ptr->ib_cq_handle->tp = &ia_ptr->hca_ptr->ib_trans;
evd_ptr->ib_cq_handle->evd = evd_ptr;
+#ifdef _OPENIB_MCM_
+ /* shadow support for TX, MPXYD */
+ if (ia_ptr->hca_ptr->ib_trans.scif_ep) {
+ ret = dapli_mix_cq_create(evd_ptr->ib_cq_handle, *cqlen);
+ if (ret)
+ goto err;
+
+ /* cross-socket: shadow both RX and TX, no IB CQ on MIC */
+ if (MXS_EP(&ia_ptr->hca_ptr->ib_trans.addr))
+ return DAT_SUCCESS;
+ }
+#endif
if (!evd_ptr->cno_ptr)
channel = ibv_create_comp_channel(ia_ptr->hca_ptr->ib_hca_handle);
else
/* update with returned cq entry size */
*cqlen = evd_ptr->ib_cq_handle->ib_cq->cqe;
-#ifdef _OPENIB_MCM_
- /* shadow support, MPXYD */
- if (ia_ptr->hca_ptr->ib_trans.scif_ep) {
- ret = dapli_mix_cq_create(evd_ptr->ib_cq_handle);
- if (ret)
- goto err;
- }
-#endif
-
dapl_dbg_log(DAPL_DBG_TYPE_UTIL,
"dapls_ib_cq_alloc: new_cq %p cqlen=%d \n",
evd_ptr->ib_cq_handle, *cqlen);
return DAT_SUCCESS;
err:
+ dapl_log(DAPL_DBG_TYPE_ERR,
+ "ib_cq_alloc ERR (%d): new_cq %p cqlen=%d ret %d %s\n",
+ evd_ptr->ib_cq_handle, *cqlen, ret, strerror(errno));
+
if (evd_ptr->ib_cq_handle)
dapl_os_free(evd_ptr->ib_cq_handle, sizeof(struct dcm_ib_cq));
struct ibv_comp_channel *channel;
if (evd_ptr->ib_cq_handle != IB_INVALID_HANDLE) {
+
+#ifdef _OPENIB_MCM_
+ /* shadow support, MPXYD */
+ if (ia_ptr->hca_ptr->ib_trans.scif_ep) {
+ dapli_mix_cq_free(evd_ptr->ib_cq_handle);
+ if (!evd_ptr->ib_cq_handle->ib_cq) {
+ dapl_os_free(evd_ptr->ib_cq_handle,
+ sizeof(struct dcm_ib_cq));
+ evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;
+ return DAT_SUCCESS;
+ }
+ }
+#endif
/* pull off CQ and EVD entries and toss */
while (ibv_poll_cq(evd_ptr->ib_cq_handle->ib_cq, 1, &wc) == 1) ;
while (dapl_evd_dequeue(evd_ptr, &event) == DAT_SUCCESS) ;
return (dapl_convert_errno(errno, "ibv_destroy_cq"));
if (!evd_ptr->cno_ptr)
ibv_destroy_comp_channel(channel);
-#ifdef _OPENIB_MCM_
- /* shadow support, MPXYD */
- if (ia_ptr->hca_ptr->ib_trans.scif_ep)
- dapli_mix_cq_free(evd_ptr->ib_cq_handle);
-#endif
+
dapl_os_free(evd_ptr->ib_cq_handle, sizeof(struct dcm_ib_cq));
evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;
}
dapl_os_wait_object_wakeup(&evd_ptr->wait_object);
#ifdef _OPENIB_MCM_
- if ((evd_ptr->ib_cq_handle->tp->scif_ep) &&
- (evd_ptr->ib_cq_handle->type & DCM_CQ_SND))
+ if (evd_ptr->ib_cq_handle->tp->scif_ep &&
+ ((evd_ptr->ib_cq_handle->type & DCM_CQ_SND) ||
+ (!evd_ptr->ib_cq_handle->ib_cq))) {
dapl_os_wait_object_wakeup(&evd_ptr->wait_object);
+ }
#endif
-
/* otherwise, no wake up mechanism */
return DAT_SUCCESS;
}
DAT_RETURN
dapls_evd_dto_wait(IN DAPL_EVD * evd_ptr, IN uint32_t timeout)
{
- struct ibv_comp_channel *channel = evd_ptr->ib_cq_handle->ib_cq->channel;
+ struct ibv_comp_channel *channel;
struct ibv_cq *ibv_cq = NULL;
void *context;
int status;
evd_ptr, timeout);
#ifdef _OPENIB_MCM_
- if ((evd_ptr->ib_cq_handle->tp->scif_ep) &&
- (evd_ptr->ib_cq_handle->type & DCM_CQ_SND)) {
+ if (evd_ptr->ib_cq_handle->tp->scif_ep &&
+ ((evd_ptr->ib_cq_handle->type & DCM_CQ_SND) ||
+ (!evd_ptr->ib_cq_handle->ib_cq))) {
return (dapl_os_wait_object_wait(&evd_ptr->wait_object, timeout));
}
#endif
+ channel = evd_ptr->ib_cq_handle->ib_cq->channel;
status = dapls_wait_comp_channel(channel, timeout);
if (!status) {
if (!ibv_get_cq_event(channel, &ibv_cq, &context)) {
dapl_evd_dto_callback(tp->ib_ctx,
evd->ib_cq_handle, (void*)evd);
}
-
ibv_ack_cq_events(ibv_cq, 1);
}
}
*/
DAT_RETURN dapls_set_cq_notify(IN DAPL_IA * ia_ptr, IN DAPL_EVD * evd_ptr)
{
- if (ibv_req_notify_cq(evd_ptr->ib_cq_handle->ib_cq, 0))
+ if (evd_ptr->ib_cq_handle->ib_cq &&
+ ibv_req_notify_cq(evd_ptr->ib_cq_handle->ib_cq, 0))
return (dapl_convert_errno(errno, "notify_cq"));
else
return DAT_SUCCESS;
IN DAPL_EVD * evd_ptr,
IN ib_notification_type_t type)
{
- if (ibv_req_notify_cq(evd_ptr->ib_cq_handle->ib_cq, type))
+ if (evd_ptr->ib_cq_handle->ib_cq &&
+ ibv_req_notify_cq(evd_ptr->ib_cq_handle->ib_cq, type))
return (dapl_convert_errno(errno, "notify_cq_type"));
else
return DAT_SUCCESS;
int ret;
#ifdef _OPENIB_MCM_
- if ((evd_ptr->ib_cq_handle->tp->scif_ep) &&
- (evd_ptr->ib_cq_handle->type & DCM_CQ_SND)) {
+ if (evd_ptr->ib_cq_handle->tp->scif_ep &&
+ ((evd_ptr->ib_cq_handle->type & DCM_CQ_SND) ||
+ (!evd_ptr->ib_cq_handle->ib_cq))) {
ret = dapli_mix_cq_poll(evd_ptr->ib_cq_handle, wc_ptr);
if (ret == 1)
return DAT_SUCCESS;
return DAT_QUEUE_EMPTY;
}
#endif
-
ret = ibv_poll_cq(evd_ptr->ib_cq_handle->ib_cq, 1, wc_ptr);
if (ret == 1)
return DAT_SUCCESS;
#ifdef _OPENIB_CMA_
dp_ib_cm_handle_t conn;
#endif
- dapl_dbg_log(DAPL_DBG_TYPE_EP,
- " qp_alloc: ia_ptr %p ep_ptr %p ep_ctx_ptr %p\n",
- ia_ptr, ep_ptr, ep_ctx_ptr);
attr = &ep_ptr->param.ep_attr;
ib_pd_handle = ((DAPL_PZ *) ep_ptr->param.pz_handle)->pd_handle;
rcv_evd = (DAPL_EVD *) ep_ptr->param.recv_evd_handle;
req_evd = (DAPL_EVD *) ep_ptr->param.request_evd_handle;
+ dapl_dbg_log(DAPL_DBG_TYPE_EP,
+ " qp_alloc: ia %p ep %p ctx %p: SQ %d,%d evd %p - RQ %d,%d evd %p\n",
+ ia_ptr, ep_ptr, ep_ctx_ptr,
+ attr->max_request_dtos, attr->max_request_iov, req_evd,
+ attr->max_recv_dtos, attr->max_recv_iov, rcv_evd);
+
/*
* DAT allows usage model of EP's with no EVD's but IB does not.
* Create a CQ with zero entries under the covers to support and
channel = ibv_create_comp_channel(ia_ptr->hca_ptr->ib_hca_handle);
if (!channel)
- return (dapl_convert_errno(ENOMEM, "create_cq_chan"));
+ return (dapl_convert_errno(ENOMEM, "QP create_cq_chan"));
/* Call IB verbs to create CQ */
rcv_cq = dapl_os_alloc(sizeof(struct dcm_ib_cq));
if (!rcv_cq)
- return (dapl_convert_errno(ENOMEM, " alloc cq"));
+ return (dapl_convert_errno(ENOMEM, "QP alloc cq"));
dapl_os_memzero(rcv_cq, sizeof(struct dcm_ib_cq));
if (!rcv_cq->ib_cq) {
ibv_destroy_comp_channel(channel);
- return (dapl_convert_errno(ENOMEM, "create_cq"));
+ return (dapl_convert_errno(ENOMEM, "QP create_cq"));
}
ia_ptr->hca_ptr->ib_trans.ib_cq_empty = rcv_cq;
}
/* create QP object */
ep_ptr->qp_handle = dapl_os_alloc(sizeof(struct dcm_ib_qp));
if (!ep_ptr->qp_handle)
- return (dapl_convert_errno(ENOMEM, "create_qp"));
+ return (dapl_convert_errno(errno, "create_qp"));
dapl_os_memzero(ep_ptr->qp_handle, sizeof(struct dcm_ib_qp));
ep_ptr->qp_handle->tp = &ia_ptr->hca_ptr->ib_trans;
qp_create.qp_context = (void *)ep_ptr;
#ifdef DAT_EXTENSIONS
- if (attr->service_type == DAT_IB_SERVICE_TYPE_UD) {
+ if ((int)attr->service_type == (int)DAT_IB_SERVICE_TYPE_UD) {
#ifdef _OPENIB_CMA_
goto err;
#endif
qp_create.cap.max_send_wr = 1;
qp_create.cap.max_send_sge = 1;
}
+
+ /* Don't create any QP if MIC xsocket, QPt and QPr both on MPXYD */
+ if (!ia_ptr->hca_ptr->ib_trans.scif_ep ||
+ (ia_ptr->hca_ptr->ib_trans.scif_ep &&
+ !MXS_EP(&ia_ptr->hca_ptr->ib_trans.addr)))
#endif
- dapl_dbg_log(DAPL_DBG_TYPE_EP,
- " 1 - QP_ALLOC: QPr sq %d,%d rq %d,%d\n",
- qp_create.cap.max_send_wr,
- qp_create.cap.max_send_sge,
- qp_create.cap.max_recv_wr,
- qp_create.cap.max_recv_sge);
-
- ep_ptr->qp_handle->qp = ibv_create_qp(ib_pd_handle, &qp_create);
- if (!ep_ptr->qp_handle->qp) {
- ret = errno;
- goto err;
+ {
+ ep_ptr->qp_handle->qp = ibv_create_qp(ib_pd_handle, &qp_create);
+ if (!ep_ptr->qp_handle->qp) {
+ dapl_log(1," qp_alloc ERR %d %s line %d on device %s\n",
+ errno, strerror(errno), __LINE__ ,
+ ibv_get_device_name(ia_ptr->hca_ptr->ib_trans.ib_dev));
+ ret = errno;
+ goto err;
+ }
+ dapl_dbg_log(DAPL_DBG_TYPE_EP,
+ " QP_ALLOC: QPr 0x%x sq %d,%d rq %d,%d\n",
+ ep_ptr->qp_handle->qp->qp_num,
+ qp_create.cap.max_send_wr,
+ qp_create.cap.max_send_sge,
+ qp_create.cap.max_recv_wr,
+ qp_create.cap.max_recv_sge);
}
- dapl_dbg_log(DAPL_DBG_TYPE_EP,
- " 2 - QP_ALLOC: QPr 0x%x sq %d,%d rq %d,%d\n",
- ep_ptr->qp_handle->qp->qp_num,
- qp_create.cap.max_send_wr,
- qp_create.cap.max_send_sge,
- qp_create.cap.max_recv_wr,
- qp_create.cap.max_recv_sge);
-
#ifdef _OPENIB_MCM_
/* shadow support, MPXYD */
ep_ptr->qp_handle->qp_ctx = (uint64_t)ep_ptr;
- ep_ptr->qp_handle->qp_id = 0; /* ??? */
- if (ia_ptr->hca_ptr->ib_trans.scif_ep) { /* MIC: shadow on proxy node */
+ ep_ptr->qp_handle->qp_id = 0;
+ if (ia_ptr->hca_ptr->ib_trans.scif_ep) { /* MIC: shadow QPt on proxy */
qp_create.cap.max_inline_data = 32; /* setup for bw not latency */
qp_create.cap.max_send_wr = attr->max_request_dtos;
qp_create.cap.max_send_sge = attr->max_request_iov;
- qp_create.cap.max_recv_wr = 1;
- qp_create.cap.max_recv_sge = 2;
+ if (ep_ptr->qp_handle->qp) {
+ qp_create.cap.max_recv_wr = 1; /* MIC: unused shadow QPr on proxy */
+ qp_create.cap.max_recv_sge = 1;
+ } else {
+ qp_create.cap.max_recv_wr = attr->max_recv_dtos; /* MIC: shadow QPr on proxy */
+ qp_create.cap.max_recv_sge = attr->max_recv_iov;
+ }
dapl_dbg_log(DAPL_DBG_TYPE_EP,
- " 3 - QP_ALLOC: QPt (MPXYD) sq %d,%d rq %d,%d\n",
+ " QP_ALLOC: QPt -> (MPXYD) sq %d,%d %s rq %d,%d\n",
qp_create.cap.max_send_wr, qp_create.cap.max_send_sge,
+ ep_ptr->qp_handle->qp ? "":"QPr",
qp_create.cap.max_recv_wr, qp_create.cap.max_recv_sge);
ret = dapli_mix_qp_create(ep_ptr->qp_handle, &qp_create, req_cq, rcv_cq);
} else {
/* NON-MIC: need QPt, in case of shadowed QP's from MIC's */
qp_create.cap.max_recv_wr = 1;
- qp_create.cap.max_recv_sge = 2;
+ qp_create.cap.max_recv_sge = 1;
ep_ptr->qp_handle->sqp = ibv_create_qp(ib_pd_handle, &qp_create);
if (!ep_ptr->qp_handle->sqp) {
ret = errno;
goto err;
}
dapl_dbg_log(DAPL_DBG_TYPE_EP,
- " 3 - QP_ALLOC: QPt 0x%x sq %d,%d rq %d,%d\n",
+ " 3 - QP_ALLOC: QP (LOCAL) QPt 0x%x sq %d,%d QPr rq %d,%d\n",
ep_ptr->qp_handle->sqp->qp_num,
qp_create.cap.max_send_wr, qp_create.cap.max_send_sge,
qp_create.cap.max_recv_wr, qp_create.cap.max_recv_sge);
}
+ if (!ep_ptr->qp_handle->qp) { /* QPr and QPs both shadowed */
+ ep_ptr->qp_state = IBV_QPS_INIT;
+ return DAT_SUCCESS;
+ }
#endif
/* Setup QP attributes for INIT state on the way out */
if (dapls_modify_qp_state(ep_ptr->qp_handle->qp,
dapl_os_free(ep_ptr->qp_handle, sizeof(struct dcm_ib_qp));
ep_ptr->qp_handle = IB_INVALID_HANDLE;
-
return (dapl_convert_errno(ret, "create_qp"));
}
#endif
dapl_os_lock(&ep_ptr->header.lock);
- if (ep_ptr->qp_handle != NULL) {
+ if (ep_ptr->qp_handle) {
qp = ep_ptr->qp_handle->qp;
dapl_os_unlock(&ep_ptr->header.lock);
qp_attr.qp_state = IBV_QPS_ERR;
- ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE);
+ if (qp)
+ ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE);
dapls_ep_flush_cqs(ep_ptr);
#ifdef _OPENIB_CMA_
rdma_destroy_qp(cm_ptr->cm_id);
cm_ptr->cm_id->qp = NULL;
#else
- if (ibv_destroy_qp(qp)) {
+ if (qp && ibv_destroy_qp(qp)) {
dapl_log(DAPL_DBG_TYPE_ERR,
" qp_free: ibv_destroy_qp error - %s\n",
strerror(errno));
else /* NON MIC: local shadow queue */
ibv_destroy_qp(ep_ptr->qp_handle->sqp);
-
- /* TODO: flush shadow CQ on MPXYD */
#endif
} else {
dapl_os_unlock(&ep_ptr->header.lock);