]> git.openfabrics.org - ~emulex/libocrdma.git/commitdiff
RDMA/libocrdma: formatting fix
authorDevesh Sharma <devesh.sharma@emulex.com>
Mon, 23 Dec 2013 07:15:49 +0000 (12:45 +0530)
committerDevesh Sharma <devesh.sharma@emulex.com>
Mon, 23 Dec 2013 07:15:49 +0000 (12:45 +0530)
formatting of the code is corrected where ever possible.

Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
src/ocrdma_list.h
src/ocrdma_main.c
src/ocrdma_verbs.c

index 906ab3f6ea13195ea1ae4db5817e2f01e4bfd233..1e0f1ff6dbb404ea1c15fd76a82ef9e02f105236 100644 (file)
@@ -56,7 +56,7 @@ struct ocrdma_list_head {
 
 #define INIT_DBLY_LIST_HEAD(ptr) INIT_DBLY_LIST_NODE(ptr.node)
 
-static __inline__ void __list_add_node(struct ocrdma_list_node *new,
+static inline void __list_add_node(struct ocrdma_list_node *new,
                                       struct ocrdma_list_node *prev,
                                       struct ocrdma_list_node *next)
 {
@@ -66,20 +66,20 @@ static __inline__ void __list_add_node(struct ocrdma_list_node *new,
        prev->next = new;
 }
 
-static __inline__ void list_add_node_tail(struct ocrdma_list_node *new,
+static inline void list_add_node_tail(struct ocrdma_list_node *new,
                                          struct ocrdma_list_head *head)
 {
        __list_add_node(new, head->node.prev, &head->node);
 }
 
-static __inline__ void __list_del_node(struct ocrdma_list_node *prev,
+static inline void __list_del_node(struct ocrdma_list_node *prev,
                                       struct ocrdma_list_node *next)
 {
        next->prev = prev;
        prev->next = next;
 }
 
-static __inline__ void list_del_node(struct ocrdma_list_node *entry)
+static inline void list_del_node(struct ocrdma_list_node *entry)
 {
        __list_del_node(entry->prev, entry->next);
        entry->next = entry->prev = 0;
index e86eeb1195a5292950260f1b1f241e092313349c..cdefe244367ace8f4b655984273f9e4f37f2800d 100644 (file)
@@ -254,12 +254,12 @@ void ocrdma_unregister_driver(void)
                pthread_mutex_destroy(&dev->dev_lock);
                pthread_spin_destroy(&dev->flush_q_lock);
                list_del_node(&dev->entry);
-               /* 
-                * Avoid freeing the dev here since MPI get SIGSEGV 
+               /*
+                * Avoid freeing the dev here since MPI get SIGSEGV
                 * in few error cases because of reference to ib_dev
                 * after free.
                 * TODO Bug 135437 fix it properly to avoid mem leak
-                */  
+                */
                /* free(dev); */
        }
        list_unlock(&ocrdma_dev_list);
index 09b6c58c40a20852f7116ad8a46be30ca5aefd85..ee8411aa06252f33aa868d6aa7723c1e4b986dc1 100644 (file)
@@ -271,9 +271,9 @@ static struct ibv_cq *ocrdma_create_cq_common(struct ibv_context *context,
        void *map_addr;
 
        cq = malloc(sizeof *cq);
-       if (!cq) {
+       if (!cq)
                return NULL;
-       }
+
        bzero(cq, sizeof *cq);
        cmd.dpp_cq = dpp_cq;
        status = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
@@ -523,7 +523,7 @@ struct ibv_qp *ocrdma_create_qp(struct ibv_pd *pd,
 #ifdef DPP_CQ_SUPPORT
        if (attrs->cap.max_inline_data) {
                qp->dpp_cq = ocrdma_create_dpp_cq(pd->context,
-                                                 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT);
+                                       OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT);
                if (qp->dpp_cq) {
                        cmd.enable_dpp_cq = 1;
                        cmd.dpp_cq_id = qp->dpp_cq->cq_id;
@@ -548,7 +548,7 @@ struct ibv_qp *ocrdma_create_qp(struct ibv_pd *pd,
 
        qp->sq.max_sges = attrs->cap.max_send_sge;
        qp->max_inline_data = attrs->cap.max_inline_data;
-       
+
        qp->signaled = attrs->sq_sig_all;
 
        qp->sq.max_cnt = resp.num_wqe_allocated;
@@ -938,18 +938,18 @@ static inline int is_hw_rq_empty(struct ocrdma_qp *qp)
 
 static inline void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
 {
-       return (q->va + (q->head * q->entry_size));
+       return q->va + (q->head * q->entry_size);
 }
 
 static inline void *ocrdma_wq_tail(struct ocrdma_qp_hwq_info *q)
 {
-       return (q->va + (q->tail * q->entry_size));
+       return q->va + (q->tail * q->entry_size);
 }
 
 static inline void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
                                             uint32_t idx)
 {
-       return (q->va + (idx * q->entry_size));
+       return q->va + (idx * q->entry_size);
 }
 
 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
@@ -1022,27 +1022,28 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
        pthread_spin_lock(&cq->cq_lock);
 
        /* traverse through the CQEs in the hw CQ,
-        * find the matching CQE for a given qp, 
+        * find the matching CQE for a given qp,
         * mark the matching one discarded=1.
         * discard the cqe.
-        * ring the doorbell in the poll_cq() as 
+        * ring the doorbell in the poll_cq() as
         * we don't complete out of order cqe.
         */
        cur_getp = cq->getp;
-       /* find upto when do we reap the cq. */
+       /* find upto when do we reap the cq.*/
        stop_getp = cur_getp;
        do {
                if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
                        break;
 
                cqe = cq->va + cur_getp;
-               /* if (a) no valid cqe, or (b) done reading full hw cq, or 
+               /* if (a) no valid cqe, or (b) done reading full hw cq, or
                 *    (c) qp_xq becomes empty.
                 * then exit
                 */
                qpn = ocrdma_le_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK;
-               /* if previously discarded cqe found, skip that too. */
-               /* check for matching qp */
+               /* if previously discarded cqe found, skip that too.
+                * check for matching qp
+                */
                if ((qpn == 0) || (qpn != qp->id))
                        goto skip_cqe;
 
@@ -1082,7 +1083,7 @@ int ocrdma_destroy_qp(struct ibv_qp *ibqp)
        qp = get_ocrdma_qp(ibqp);
        dev = qp->dev;
        id = dev->id;
-       /* 
+       /*
         * acquire CQ lock while destroy is in progress, in order to
         * protect against proessing in-flight CQEs for this QP.
         */
@@ -1105,7 +1106,7 @@ int ocrdma_destroy_qp(struct ibv_qp *ibqp)
        if (qp->sq.va)
                munmap(qp->sq.va, qp->sq.len);
 
-       /* ensure that CQEs for newly created QP (whose id may be same with 
+       /* ensure that CQEs for newly created QP (whose id may be same with
         * one which just getting destroyed are same), dont get
         * discarded until the old CQEs are discarded.
         */
@@ -1200,10 +1201,10 @@ static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
 static inline uint32_t ocrdma_sglist_len(struct ibv_sge *sg_list, int num_sge)
 {
        uint32_t total_len = 0, i;
-       
+
        for (i = 0; i < num_sge; i++)
                total_len += sg_list[i].length;
-       return total_len;       
+       return total_len;
 }
 
 static inline int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
@@ -1216,7 +1217,6 @@ static inline int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
        char *dpp_addr;
 
        if (wr->send_flags & IBV_SEND_INLINE && qp->qp_type != IBV_QPT_UD) {
-       
                hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
                if (hdr->total_len > qp->max_inline_data) {
                        ocrdma_err
@@ -1333,7 +1333,7 @@ static uint32_t ocrdma_get_hdr_len(struct ocrdma_qp *qp,
                hdr_sz += sizeof(struct ocrdma_ewqe_ud_hdr);
        if (hdr->cw & (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT))
                hdr_sz += sizeof(struct ocrdma_sge);
-       return (hdr_sz / sizeof(uint32_t));
+       return hdr_sz / sizeof(uint32_t);
 }
 
 static void ocrdma_build_dpp_wqe(void *va, struct ocrdma_hdr_wqe *wqe,
@@ -1362,7 +1362,7 @@ static void ocrdma_post_dpp_wqe(struct ocrdma_qp *qp,
                qp->wqe_wr_id_tbl[qp->sq.head].dpp_wqe = 1;
                qp->wqe_wr_id_tbl[qp->sq.head].dpp_wqe_idx = qp->dpp_q.head;
                /* if dpp cq is not enabled, we can post
-                * wqe as soon as we receive and adapter 
+                * wqe as soon as we receive and adapter
                 * takes care of flow control.
                 */
                if (qp->dpp_cq)
@@ -1881,7 +1881,8 @@ static int ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
        ibwc->wc_flags = 0;
        if (qp->qp_type == IBV_QPT_UD)
                status = (ocrdma_le_to_cpu(cqe->flags_status_srcqpn) &
-                         OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
+                         OCRDMA_CQE_UD_STATUS_MASK) >>
+                               OCRDMA_CQE_UD_STATUS_SHIFT;
        else
                status = (ocrdma_le_to_cpu(cqe->flags_status_srcqpn) &
                          OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
@@ -2045,8 +2046,8 @@ int ocrdma_arm_cq(struct ibv_cq *ibcq, int solicited)
 
        cq->armed = 1;
        cq->solicited = solicited;
-       /* check whether any valid cqe exist or not, if not then safe to 
-        * arm. If cqe is not yet consumed, then let it get consumed and then 
+       /* check whether any valid cqe exist or not, if not then safe to
+        * arm. If cqe is not yet consumed, then let it get consumed and then
         * we arm it to avoid 0 interrupts.
         */
        if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {