#define INIT_DBLY_LIST_HEAD(ptr) INIT_DBLY_LIST_NODE(ptr.node)
-static __inline__ void __list_add_node(struct ocrdma_list_node *new,
+static inline void __list_add_node(struct ocrdma_list_node *new,
struct ocrdma_list_node *prev,
struct ocrdma_list_node *next)
{
prev->next = new;
}
-static __inline__ void list_add_node_tail(struct ocrdma_list_node *new,
+static inline void list_add_node_tail(struct ocrdma_list_node *new,
struct ocrdma_list_head *head)
{
__list_add_node(new, head->node.prev, &head->node);
}
-static __inline__ void __list_del_node(struct ocrdma_list_node *prev,
+static inline void __list_del_node(struct ocrdma_list_node *prev,
struct ocrdma_list_node *next)
{
next->prev = prev;
prev->next = next;
}
-static __inline__ void list_del_node(struct ocrdma_list_node *entry)
+static inline void list_del_node(struct ocrdma_list_node *entry)
{
__list_del_node(entry->prev, entry->next);
entry->next = entry->prev = 0;
void *map_addr;
cq = malloc(sizeof *cq);
- if (!cq) {
+ if (!cq)
return NULL;
- }
+
bzero(cq, sizeof *cq);
cmd.dpp_cq = dpp_cq;
status = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
#ifdef DPP_CQ_SUPPORT
if (attrs->cap.max_inline_data) {
qp->dpp_cq = ocrdma_create_dpp_cq(pd->context,
- OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT);
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT);
if (qp->dpp_cq) {
cmd.enable_dpp_cq = 1;
cmd.dpp_cq_id = qp->dpp_cq->cq_id;
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->max_inline_data = attrs->cap.max_inline_data;
-
+
qp->signaled = attrs->sq_sig_all;
qp->sq.max_cnt = resp.num_wqe_allocated;
static inline void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
{
- return (q->va + (q->head * q->entry_size));
+ return q->va + (q->head * q->entry_size);
}
static inline void *ocrdma_wq_tail(struct ocrdma_qp_hwq_info *q)
{
- return (q->va + (q->tail * q->entry_size));
+ return q->va + (q->tail * q->entry_size);
}
static inline void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
uint32_t idx)
{
- return (q->va + (idx * q->entry_size));
+ return q->va + (idx * q->entry_size);
}
static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
pthread_spin_lock(&cq->cq_lock);
/* traverse through the CQEs in the hw CQ,
- * find the matching CQE for a given qp,
+ * find the matching CQE for a given qp,
* mark the matching one discarded=1.
* discard the cqe.
- * ring the doorbell in the poll_cq() as
+ * ring the doorbell in the poll_cq() as
* we don't complete out of order cqe.
*/
cur_getp = cq->getp;
- /* find upto when do we reap the cq. */
+ /* find upto when do we reap the cq.*/
stop_getp = cur_getp;
do {
if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
break;
cqe = cq->va + cur_getp;
- /* if (a) no valid cqe, or (b) done reading full hw cq, or
+ /* if (a) no valid cqe, or (b) done reading full hw cq, or
* (c) qp_xq becomes empty.
* then exit
*/
qpn = ocrdma_le_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK;
- /* if previously discarded cqe found, skip that too. */
- /* check for matching qp */
+ /* if previously discarded cqe found, skip that too.
+ * check for matching qp
+ */
if ((qpn == 0) || (qpn != qp->id))
goto skip_cqe;
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
id = dev->id;
- /*
+ /*
* acquire CQ lock while destroy is in progress, in order to
* protect against proessing in-flight CQEs for this QP.
*/
if (qp->sq.va)
munmap(qp->sq.va, qp->sq.len);
- /* ensure that CQEs for newly created QP (whose id may be same with
+ /* ensure that CQEs for newly created QP (whose id may be same with
* one which just getting destroyed are same), dont get
* discarded until the old CQEs are discarded.
*/
static inline uint32_t ocrdma_sglist_len(struct ibv_sge *sg_list, int num_sge)
{
uint32_t total_len = 0, i;
-
+
for (i = 0; i < num_sge; i++)
total_len += sg_list[i].length;
- return total_len;
+ return total_len;
}
static inline int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
char *dpp_addr;
if (wr->send_flags & IBV_SEND_INLINE && qp->qp_type != IBV_QPT_UD) {
-
hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
if (hdr->total_len > qp->max_inline_data) {
ocrdma_err
hdr_sz += sizeof(struct ocrdma_ewqe_ud_hdr);
if (hdr->cw & (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT))
hdr_sz += sizeof(struct ocrdma_sge);
- return (hdr_sz / sizeof(uint32_t));
+ return hdr_sz / sizeof(uint32_t);
}
static void ocrdma_build_dpp_wqe(void *va, struct ocrdma_hdr_wqe *wqe,
qp->wqe_wr_id_tbl[qp->sq.head].dpp_wqe = 1;
qp->wqe_wr_id_tbl[qp->sq.head].dpp_wqe_idx = qp->dpp_q.head;
/* if dpp cq is not enabled, we can post
- * wqe as soon as we receive and adapter
+ * wqe as soon as we receive and adapter
* takes care of flow control.
*/
if (qp->dpp_cq)
ibwc->wc_flags = 0;
if (qp->qp_type == IBV_QPT_UD)
status = (ocrdma_le_to_cpu(cqe->flags_status_srcqpn) &
- OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
+ OCRDMA_CQE_UD_STATUS_MASK) >>
+ OCRDMA_CQE_UD_STATUS_SHIFT;
else
status = (ocrdma_le_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
cq->armed = 1;
cq->solicited = solicited;
- /* check whether any valid cqe exist or not, if not then safe to
- * arm. If cqe is not yet consumed, then let it get consumed and then
+ /* check whether any valid cqe exist or not, if not then safe to
+ * arm. If cqe is not yet consumed, then let it get consumed and then
* we arm it to avoid 0 interrupts.
*/
if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {