ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
// ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
- ibal_port_p->port_num, ibal_port_p->port_guid));\r
+ ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));\r
}\r
}\r
\r
}\r
\r
//copy vendor specific data\r
- cl_memcpy(last_p, props.board_id, MTHCA_BOARD_ID_LEN);\r
+ cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN);\r
last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
\r
// Separate the loops to ensure that table pointers are always setup\r
}\r
}\r
\r
- HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:", port_num));\r
- for (i = 0; i < 16; i++)\r
- HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
- (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i]));\r
- HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("\n"));\r
+ HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
+ HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
+ (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
+ p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
}\r
\r
// set result size\r
atomic_dec(&pd->usecnt);
atomic_dec(&scq->usecnt);
atomic_dec(&rcq->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
if (srq)
atomic_dec(&srq->usecnt);
create_cq->mr.length, create_cq->mr.hca_va, TRUE );
if (IS_ERR(ib_mr)) {
err = PTR_ERR(ib_mr);
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("ibv_reg_mr failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("ibv_reg_mr failed (%d)\n", err));
goto err_alloc_mr;
}
user_handle = create_cq->user_handle;
cq = device->create_cq(device, cqe, context, p_umv_buf);
if (IS_ERR(cq)) {
err = PTR_ERR(cq);
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("create_qp failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err));
goto err_create_cq;
}
mr->pd = pd;
atomic_inc(&pd->usecnt);
atomic_set(&mr->usecnt, 0);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
atomic_dec(&old_pd->usecnt);
atomic_inc(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
ret = mr->device->dereg_mr(mr);
if (!ret) {
atomic_dec(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
}
mw->device = pd->device;
mw->pd = pd;
atomic_inc(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
ret = mw->device->dealloc_mw(mw);
if (!ret) {
atomic_dec(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
fmr->device = pd->device;
fmr->pd = pd;
atomic_inc(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
ret = fmr->device->dealloc_fmr(fmr);
if (!ret) {
atomic_dec(&pd->usecnt);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("PD%d use cnt %d \n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt));
}
} else {
{ // debug print
int i;
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Dumping QP context:\n"));
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
- for (i = 0; i < 0x100 / 4; ++i) {
- if (i % 8 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" [%02x] ", i * 4));
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" %08x",
- cl_ntoh32(((__be32 *) mailbox->buf)[i + 2])));
- if ((i + 1) % 8 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("\n"));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
+ for (i = 2; i < 0x100 / 4; i=i+4) {
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" [%02x] %08x %08x %08x %08x\n",i-2,
+ cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
}
}
}
if (mailbox) { // debug print
int i;
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Dumping QP context:\n"));
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
- for (i = 0; i < 0x100 / 4; ++i) {
- if (i % 8 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] ", i * 4));
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" %08x",
- cl_ntoh32(((__be32 *) mailbox->buf)[i + 2])));
- if ((i + 1) % 8 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("\n"));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
+ for (i = 2; i < 0x100 / 4; i=i+4) {
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" [%02x] %08x %08x %08x %08x\n",i-2,
+ cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
+ cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
}
}
} else
return;
}
- ++cq->arm_sn;
+ if (mthca_is_memfree(dev)) {
+ if (cq->ibcq.ucontext)
+ ++*cq->p_u_arm_sn;
+ else
+ ++cq->arm_sn;
+ }
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))
return;
- cqe->db_cnt = cl_hton16(cl_ntoh16((u16)(cqe->db_cnt) - (u16)dbd));
+ cqe->db_cnt = cl_hton16(cl_ntoh16(cqe->db_cnt) - (u16)dbd);
cqe->wqe = new_wqe;
cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
{ // debug print
HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Dumping EQ context %02x:\n", eq->eqn));
- for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
- if (i % 4 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] ", i * 4));
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,(" %08x", cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4))));
- if ((i + 1) % 4 == 0)
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("\n"));
+ for (i = 0; i < sizeof (struct mthca_eq_context) / 4; i=i+4) {
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] %08x %08x %08x %08x\n", i,
+ cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4)),
+ cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4)),
+ cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+2)*4)),
+ cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4))));
+
}
}
}
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
- struct mthca_user_db_table *db_tab, int index, u64 uaddr)
+ struct mthca_user_db_table *db_tab, int index, u64 uaddr, void **kva)
{
int ret = 0;
u8 status;
if (db_tab->page[i].refcount) {
++db_tab->page[i].refcount;
- goto out;
+ goto done;
}
ret = get_user_pages(dev, uaddr & PAGE_MASK, 1, 1,
db_tab->page[i].uvirt = uaddr;
db_tab->page[i].refcount = 1;
+done:
+ if (kva)
+ *kva = db_tab->page[i].mem.page;
+
out:
up(&db_tab->mutex);
return ret;
struct mthca_uar;
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
- struct mthca_user_db_table *db_tab, int index, u64 uaddr);
+ struct mthca_user_db_table *db_tab, int index, u64 uaddr, void **kva);
void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab, int index);
struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev);
props->vendor_part_id = cl_ntoh16(*(__be16 *) (out_mad->data + 30));
props->hw_ver = cl_ntoh32(*(__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
- memcpy(&props->board_id, mdev->board_id, MTHCA_BOARD_ID_LEN);
props->max_mr_size = ~0ull;
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index,
- ucmd.db_page);
+ ucmd.db_page, NULL);
if (err)
goto err_free;
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab,
- ucmd.sq_db_index, ucmd.sq_db_page);
+ ucmd.sq_db_index, ucmd.sq_db_page, NULL);
if (err)
goto err_map1;
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab,
- ucmd.rq_db_index, ucmd.rq_db_page);
+ ucmd.rq_db_index, ucmd.rq_db_page, NULL);
if (err)
goto err_map2;
struct mthca_cq *cq;
int nent;
int err;
+ void *u_arm_db_page = 0;
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
return ERR_PTR(-EINVAL);
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab,
- ucmd.set_db_index, ucmd.set_db_page);
+ ucmd.set_db_index, ucmd.set_db_page, NULL);
if (err)
return ERR_PTR(err);
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab,
- ucmd.arm_db_index, ucmd.arm_db_page);
+ ucmd.arm_db_index, ucmd.arm_db_page, NULL);
if (err)
goto err_unmap_set;
+
+ err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+ to_mucontext(context)->db_tab,
+ ucmd.u_arm_db_index,
+ (u64)(ULONG_PTR)PAGE_ALIGN(ucmd.u_arm_db_page),
+ &u_arm_db_page);
+ if (err)
+ goto err_unmap_arm;
}
cq = kmalloc(sizeof *cq, GFP_KERNEL);
if (!cq) {
err = -ENOMEM;
- goto err_unmap_arm;
+ goto err_unmap_ev;
}
if (context) {
cq->mr.ibmr.lkey = ucmd.lkey;
cq->set_ci_db_index = ucmd.set_db_index;
cq->arm_db_index = ucmd.arm_db_index;
+ cq->u_arm_db_index = ucmd.u_arm_db_index;
+ cq->p_u_arm_sn = (int*)((char*)u_arm_db_page + BYTE_OFFSET(ucmd.u_arm_db_page));
}
for (nent = 1; nent <= entries; nent <<= 1)
err_free:
kfree(cq);
+err_unmap_ev:
+ if (context)
+ mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+ to_mucontext(context)->db_tab, ucmd.u_arm_db_index);
+
err_unmap_arm:
if (context)
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
int mthca_destroy_cq(struct ib_cq *cq)
{
if (cq->ucontext) {
+ mthca_unmap_user_db(to_mdev(cq->device),
+ &to_mucontext(cq->ucontext)->uar,
+ to_mucontext(cq->ucontext)->db_tab,
+ to_mcq(cq)->u_arm_db_index);
mthca_unmap_user_db(to_mdev(cq->device),
&to_mucontext(cq->ucontext)->uar,
to_mucontext(cq->ucontext)->db_tab,
int arm_db_index;
__be32 *arm_db;
int arm_sn;
+ int u_arm_db_index;
+ int *p_u_arm_sn;
union mthca_buf queue;
struct mthca_mr mr;
struct ibv_reg_mr mr;
uint64_t arm_db_page;
uint64_t set_db_page;
+ uint64_t u_arm_db_page;
+ uint64_t user_handle;
uint32_t arm_db_index;
uint32_t set_db_index;
- uint64_t user_handle;
+ uint32_t u_arm_db_index;
uint32_t cqe;
uint32_t lkey; /* used only by kernel */
+ uint32_t reserved;
};
struct ibv_create_cq_resp {
uint8_t i, index = 0;\r
uint16_t num_gids;\r
\r
- p_gid_table = p_ca_attr->p_port_attr[port_num].p_gid_table;\r
+ p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table;\r
CL_ASSERT (p_gid_table);\r
\r
- num_gids = p_ca_attr->p_port_attr[port_num].num_gids;\r
+ num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids;\r
UVP_PRINT(TRACE_LEVEL_INFORMATION, UVP_DBG_AV, \r
("Port %d has %d gids\n", port_num, num_gids));\r
\r
\r
if (IB_SUCCESS == ioctl_status) {\r
\r
- page = ah->page;\r
- if (p_resp->use_mr) {\r
- // fill mr parameters\r
- page->mr.handle = p_resp->mr.mr_handle;\r
- page->mr.lkey = p_resp->mr.lkey;\r
- page->mr.rkey = p_resp->mr.rkey;\r
- page->mr.pd = p_pd->ibv_pd;\r
- page->mr.context = p_pd->ibv_pd->context;\r
+ if (!mthca_is_memfree(p_pd->ibv_pd->context)) {\r
+ page = ah->page;\r
+ if (p_resp->use_mr) {\r
+ // fill mr parameters\r
+ page->mr.handle = p_resp->mr.mr_handle;\r
+ page->mr.lkey = p_resp->mr.lkey;\r
+ page->mr.rkey = p_resp->mr.rkey;\r
+ page->mr.pd = p_pd->ibv_pd;\r
+ page->mr.context = p_pd->ibv_pd->context;\r
+ }\r
+ ah->key = page->mr.lkey;\r
}\r
- ah->key = page->mr.lkey;\r
*ph_uvp_av = (ib_av_handle_t)ah;\r
}\r
goto end;\r
mthca_poll_cq,
mthca_poll_cq_list,
NULL, /* req_notify_cq */
- NULL, /* cq_event */
mthca_destroy_cq,
NULL, // mthca_create_srq,
NULL, // mthca_modify_srq,
if (mthca_is_memfree(&context->ibv_ctx)) {
context->ibv_ctx.ops.req_notify_cq = mthca_arbel_arm_cq;
- context->ibv_ctx.ops.cq_event = mthca_arbel_cq_event;
context->ibv_ctx.ops.post_send = mthca_arbel_post_send;
context->ibv_ctx.ops.post_recv = mthca_arbel_post_recv;
context->ibv_ctx.ops.post_srq_recv = mthca_arbel_post_srq_recv;
} else {
context->ibv_ctx.ops.req_notify_cq = mthca_tavor_arm_cq;
- context->ibv_ctx.ops.cq_event = NULL;
context->ibv_ctx.ops.post_send = mthca_tavor_post_send;
context->ibv_ctx.ops.post_recv = mthca_tavor_post_recv;
context->ibv_ctx.ops.post_srq_recv = mthca_tavor_post_srq_recv;
uint32_t *set_ci_db;
int arm_db_index;
uint32_t *arm_db;
- int arm_sn;
+ int u_arm_db_index;
+ uint32_t *p_u_arm_sn;
};
struct mthca_srq {
struct _ib_wc** const pp_done_wclist );
int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);
int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);
-void mthca_arbel_cq_event(struct ibv_cq *cq);
void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,
struct mthca_srq *srq);
void mthca_init_cq_buf(struct mthca_cq *cq, int nent);
uint32_t sn;
uint32_t ci;
- sn = cq->arm_sn & 3;
+ sn = *cq->p_u_arm_sn & 3;
ci = cl_hton32(cq->cons_index);
doorbell[0] = ci;
return 0;
}
-void mthca_arbel_cq_event(struct ibv_cq *cq)
-{
- to_mcq(cq)->arm_sn++;
-}
-
static inline int is_recv_cqe(struct mthca_cqe *cqe)
{
if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
//__asm movq target_p,mm0
//__asm emms
}
-
static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db)
{
- //TODO: can we save mm0 and not to use emms, as Linux do ?
- __asm movq mm0,val
- __asm movq db,mm0
- __asm emms
+ db[0] = val[0];
+ wmb();
+ db[1] = val[1];
}
+
+
#endif
#endif /* MTHCA_H */
mthca_init_cq_buf(cq, nent);
if (mthca_is_memfree(context)) {
- cq->arm_sn = 1;
cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
MTHCA_DB_TYPE_CQ_SET_CI,
&cq->set_ci_db);
if (cq->arm_db_index < 0)
goto err_set_db;
+ cq->u_arm_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
+ MTHCA_DB_TYPE_CQ_ARM,
+ &cq->p_u_arm_sn);
+ if (cq->u_arm_db_index < 0)
+ goto err_arm_db;
+
+ *cq->p_u_arm_sn = 1;
+
req->arm_db_page = db_align(cq->arm_db);
req->set_db_page = db_align(cq->set_ci_db);
+ req->u_arm_db_page = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn;
req->arm_db_index = cq->arm_db_index;
req->set_db_index = cq->set_ci_db_index;
+ req->u_arm_db_index = cq->u_arm_db_index;
}
req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
#endif
return &cq->ibv_cq;
+err_arm_db:
+ if (mthca_is_memfree(context))
+ mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
+ cq->arm_db_index);
+
err_set_db:
if (mthca_is_memfree(context))
mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
int ret;
if (mthca_is_memfree(cq->context)) {
+ mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
+ to_mcq(cq)->u_arm_db_index);
mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
to_mcq(cq)->set_ci_db_index);
mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
struct _ib_wc** const pp_free_wclist,
struct _ib_wc** const pp_done_wclist );
int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
- void (*cq_event)(struct ibv_cq *cq);
int (*destroy_cq)(struct ibv_cq *cq);
struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
struct ibv_srq_init_attr *srq_init_attr);