From: leonidk Date: Sun, 5 Oct 2008 19:29:40 +0000 (+0000) Subject: [MTHCA] add query_qp support. [mlnx: 3138, 3150] X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=daa1d6677f369d04fbbd196eac17764238fba9dc;p=~shefty%2Frdma-win.git [MTHCA] add query_qp support. [mlnx: 3138, 3150] git-svn-id: svn://openib.tc.cornell.edu/gen1@1630 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- diff --git a/trunk/hw/mlx4/kernel/bus/core/pa_cash.c b/trunk/hw/mlx4/kernel/bus/core/pa_cash.c index e663b755..26420c54 100644 --- a/trunk/hw/mlx4/kernel/bus/core/pa_cash.c +++ b/trunk/hw/mlx4/kernel/bus/core/pa_cash.c @@ -291,7 +291,7 @@ void pa_deregister(iobuf_t *iobuf_p) void pa_cash_print() { - MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_LOW, + MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW, ("pa_cash_print: max_nr_pages %d (%#x), cur_nr_pages %d (%#x), free_list_hdr %d, free_threshold %d\n", g_cash.max_nr_pages, g_cash.max_nr_pages, g_cash.cur_nr_pages, g_cash.cur_nr_pages, diff --git a/trunk/hw/mthca/kernel/hca_data.c b/trunk/hw/mthca/kernel/hca_data.c index 0809c21f..92a39187 100644 --- a/trunk/hw/mthca/kernel/hca_data.c +++ b/trunk/hw/mthca/kernel/hca_data.c @@ -845,3 +845,120 @@ mlnx_modify_ah( mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p ); } +uint8_t from_rate(enum ib_rate ib_rate) +{ + if (ib_rate == IB_RATE_2_5_GBPS) return IB_PATH_RECORD_RATE_2_5_GBS; + if (ib_rate == IB_RATE_5_GBPS) return IB_PATH_RECORD_RATE_5_GBS; + if (ib_rate == IB_RATE_10_GBPS) return IB_PATH_RECORD_RATE_10_GBS; + if (ib_rate == IB_RATE_20_GBPS) return IB_PATH_RECORD_RATE_20_GBS; + if (ib_rate == IB_RATE_30_GBPS) return IB_PATH_RECORD_RATE_30_GBS; + if (ib_rate == IB_RATE_40_GBPS) return IB_PATH_RECORD_RATE_40_GBS; + if (ib_rate == IB_RATE_60_GBPS) return IB_PATH_RECORD_RATE_60_GBS; + if (ib_rate == IB_RATE_80_GBPS) return IB_PATH_RECORD_RATE_80_GBS; + if (ib_rate == IB_RATE_120_GBPS) return IB_PATH_RECORD_RATE_120_GBS; + return 0; +} + +int from_av( + IN const struct ib_device *p_ib_dev, + IN struct ib_qp_attr *p_ib_qp_attr, + IN struct ib_ah_attr *p_ib_ah_attr, + OUT ib_av_attr_t *p_ib_av_attr) +{ + int err = 0; + + p_ib_av_attr->port_num = p_ib_ah_attr->port_num; + p_ib_av_attr->sl = p_ib_ah_attr->sl; + p_ib_av_attr->dlid = cl_hton16(p_ib_ah_attr->dlid); + p_ib_av_attr->static_rate = from_rate(p_ib_ah_attr->static_rate); + p_ib_av_attr->path_bits = p_ib_ah_attr->src_path_bits; + + if (p_ib_qp_attr) { + p_ib_av_attr->conn.path_mtu = p_ib_qp_attr->path_mtu; // MTU + p_ib_av_attr->conn.local_ack_timeout = p_ib_qp_attr->timeout; // MTU + p_ib_av_attr->conn.seq_err_retry_cnt = p_ib_qp_attr->retry_cnt; // MTU + p_ib_av_attr->conn.rnr_retry_cnt = p_ib_qp_attr->rnr_retry; // MTU + } + + if (p_ib_ah_attr->ah_flags & IB_AH_GRH) { + p_ib_av_attr->grh_valid = TRUE; + p_ib_av_attr->grh.hop_limit = p_ib_ah_attr->grh.hop_limit; + p_ib_av_attr->grh.ver_class_flow = ib_grh_set_ver_class_flow( + 0, p_ib_ah_attr->grh.traffic_class, p_ib_ah_attr->grh.flow_label ); + RtlCopyMemory(p_ib_av_attr->grh.dest_gid.raw, + p_ib_ah_attr->grh.dgid.raw, sizeof(p_ib_av_attr->grh.dest_gid)); + err = ib_get_cached_gid((struct ib_device *)p_ib_dev, + p_ib_ah_attr->port_num, p_ib_ah_attr->grh.sgid_index, + (union ib_gid*)p_ib_av_attr->grh.src_gid.raw ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM , + ("ib_get_cached_gid failed %d (%#x). Using default: sgid_index = 0\n", err, err)); + } + } + else + p_ib_av_attr->grh_valid = FALSE; + + + return err; +} + +ib_apm_state_t from_apm_state(enum ib_mig_state apm) +{ + if (apm == IB_MIG_MIGRATED) return IB_APM_MIGRATED; + if (apm == IB_MIG_REARM) return IB_APM_REARM; + if (apm == IB_MIG_ARMED) return IB_APM_ARMED; + return 0xffffffff; +} + +ib_api_status_t +mlnx_conv_qp_attr( + IN const struct ib_qp *p_ib_qp, + IN struct ib_qp_attr *p_ib_qp_attr, + OUT ib_qp_attr_t *p_qp_attr + ) +{ + int err; + RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr ); + p_qp_attr->h_pd = (ib_pd_handle_t)p_ib_qp->pd; + p_qp_attr->qp_type = p_ib_qp->qp_type; + p_qp_attr->access_ctrl = map_qp_mthca_acl(p_ib_qp_attr->qp_access_flags); + p_qp_attr->pkey_index = p_ib_qp_attr->pkey_index; + + p_qp_attr->sq_max_inline = p_ib_qp_attr->cap.max_inline_data; + p_qp_attr->sq_depth = p_ib_qp_attr->cap.max_send_wr; + p_qp_attr->rq_depth = p_ib_qp_attr->cap.max_recv_wr; + p_qp_attr->sq_sge = p_ib_qp_attr->cap.max_send_sge; + p_qp_attr->rq_sge = p_ib_qp_attr->cap.max_recv_sge; + p_qp_attr->init_depth = p_ib_qp_attr->max_rd_atomic; + p_qp_attr->resp_res = p_ib_qp_attr->max_dest_rd_atomic; + + p_qp_attr->h_sq_cq = (ib_cq_handle_t)p_ib_qp->send_cq; + p_qp_attr->h_rq_cq = (ib_cq_handle_t)p_ib_qp->recv_cq; + p_qp_attr->h_srq = (ib_srq_handle_t)p_ib_qp->srq; + + p_qp_attr->sq_signaled = (((struct mthca_qp *)p_ib_qp)->sq_policy == IB_SIGNAL_ALL_WR) ? TRUE : FALSE; + + p_qp_attr->state = mlnx_qps_to_ibal( p_ib_qp_attr->qp_state); + p_qp_attr->num = cl_hton32(p_ib_qp->qp_num); + p_qp_attr->dest_num = cl_hton32(p_ib_qp_attr->dest_qp_num); + p_qp_attr->qkey = cl_hton32(p_ib_qp_attr->qkey); + + p_qp_attr->sq_psn = cl_hton32(p_ib_qp_attr->sq_psn); + p_qp_attr->rq_psn = cl_hton32(p_ib_qp_attr->rq_psn); + + p_qp_attr->primary_port = p_ib_qp_attr->port_num; + p_qp_attr->alternate_port = p_ib_qp_attr->alt_port_num; + err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->ah_attr, &p_qp_attr->primary_av); + if (err) + goto err_av; + err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->alt_ah_attr, &p_qp_attr->alternate_av); + if (err) + goto err_av; + p_qp_attr->apm_state = from_apm_state(p_ib_qp_attr->path_mig_state); + + return IB_SUCCESS; + +err_av: + return errno_to_iberr(err); +} + diff --git a/trunk/hw/mthca/kernel/hca_data.h b/trunk/hw/mthca/kernel/hca_data.h index bb3bfa1f..57d4c513 100644 --- a/trunk/hw/mthca/kernel/hca_data.h +++ b/trunk/hw/mthca/kernel/hca_data.h @@ -385,4 +385,11 @@ mlnx_modify_ah( void set_skip_tavor_reset(); +ib_api_status_t +mlnx_conv_qp_attr( + IN const struct ib_qp *p_ib_qp, + IN struct ib_qp_attr *p_ib_qp_attr, + OUT ib_qp_attr_t *p_qp_attr + ); + #endif diff --git a/trunk/hw/mthca/kernel/hca_verbs.c b/trunk/hw/mthca/kernel/hca_verbs.c index 87c5586e..3bd5acc5 100644 --- a/trunk/hw/mthca/kernel/hca_verbs.c +++ b/trunk/hw/mthca/kernel/hca_verbs.c @@ -1283,58 +1283,68 @@ mlnx_ndi_modify_qp ( return status; } + + ib_api_status_t mlnx_query_qp ( IN const ib_qp_handle_t h_qp, OUT ib_qp_attr_t *p_qp_attr, IN OUT ci_umv_buf_t *p_umv_buf ) { + int err; + int qp_attr_mask = 0; ib_api_status_t status = IB_SUCCESS; struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr qp_init_attr; struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p; UNREFERENCED_PARAMETER(p_umv_buf); HCA_ENTER( HCA_DBG_QP); - // sanity checks - - // clean the structure - RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr ); - - // fill the structure - //TODO: this function is to be implemented via ibv_query_qp, which is not supported now - p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd; - p_qp_attr->qp_type = qp_p->ibqp.qp_type; - p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data; - p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr; - p_qp_attr->rq_depth = qp_p->qp_init_attr.cap.max_recv_wr; - p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge; - p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge; - p_qp_attr->resp_res = qp_p->resp_depth; - p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq; - p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq; - p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR; - p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state ); - p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num); - -#ifdef WIN_TO_BE_CHANGED -//TODO: don't know how to fill the following fields without support of query_qp in MTHCA - p_qp_attr->access_ctrl = qp_p-> - p_qp_attr->pkey_index = qp_p-> - p_qp_attr->dest_num = qp_p- - p_qp_attr->init_depth = qp_p- - p_qp_attr->qkey = qp_p- - p_qp_attr->sq_psn = qp_p- - p_qp_attr->rq_psn = qp_p- - p_qp_attr->primary_port = qp_p- - p_qp_attr->alternate_port = qp_p- - p_qp_attr->primary_av = qp_p- - p_qp_attr->alternate_av = qp_p- - p_qp_attr->apm_state = qp_p- -#endif - status = IB_SUCCESS; + // sanity checks + if (!p_qp_attr) { + status = IB_INVALID_PARAMETER; + goto err_parm; + } + + memset( &qp_attr, 0, sizeof(struct ib_qp_attr) ); + + if (qp_p->state == IBQPS_RESET) { + // the QP doesn't yet exist in HW - fill what we can fill now + p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd; + p_qp_attr->qp_type = qp_p->ibqp.qp_type; + p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data; + p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr; + p_qp_attr->rq_depth = qp_p->qp_init_attr.cap.max_recv_wr; + p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge; + p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge; + p_qp_attr->resp_res = qp_p->resp_depth; + p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq; + p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq; + p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR; + p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state ); + p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num); + p_qp_attr->primary_port = qp_p->qp_init_attr.port_num; + } + else { + //request the info from the card + err = ib_qp_p->device->query_qp( ib_qp_p, &qp_attr, + qp_attr_mask, &qp_init_attr); + if (err){ + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD, + ("ib_query_qp failed (%#x)\n", status)); + goto err_query_qp; + } + + // convert the results back to IBAL + status = mlnx_conv_qp_attr( ib_qp_p, &qp_attr, p_qp_attr ); + } +err_query_qp: +err_parm: HCA_EXIT(HCA_DBG_QP); return status; } diff --git a/trunk/hw/mthca/kernel/ib_verbs.h b/trunk/hw/mthca/kernel/ib_verbs.h index 625ede84..bc24e4d2 100644 --- a/trunk/hw/mthca/kernel/ib_verbs.h +++ b/trunk/hw/mthca/kernel/ib_verbs.h @@ -170,6 +170,19 @@ enum ib_port_width { IB_WIDTH_12X = 8 }; +enum ib_rate { + IB_RATE_PORT_CURRENT = 0, + IB_RATE_2_5_GBPS = 2, + IB_RATE_5_GBPS = 5, + IB_RATE_10_GBPS = 3, + IB_RATE_20_GBPS = 6, + IB_RATE_30_GBPS = 4, + IB_RATE_40_GBPS = 7, + IB_RATE_60_GBPS = 8, + IB_RATE_80_GBPS = 9, + IB_RATE_120_GBPS = 10 +}; + static inline int ib_width_enum_to_int(enum ib_port_width width) { switch (width) { @@ -407,6 +420,11 @@ enum ib_qp_state { IBQPS_ERR }; +enum ib_mig_state { + IB_MIG_MIGRATED, + IB_MIG_REARM, + IB_MIG_ARMED +}; struct ib_qp_attr { enum ib_qp_state qp_state; diff --git a/trunk/hw/mthca/kernel/mt_pa_cash.c b/trunk/hw/mthca/kernel/mt_pa_cash.c index 3b0f27bc..7ff2baf6 100644 --- a/trunk/hw/mthca/kernel/mt_pa_cash.c +++ b/trunk/hw/mthca/kernel/mt_pa_cash.c @@ -289,7 +289,7 @@ void pa_deregister(mt_iobuf_t *iobuf_p) void pa_cash_print() { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW, + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW, ("pa_cash_print: max_nr_pages %d (%#x), cur_nr_pages %d (%#x), free_list_hdr %d, free_threshold %d\n", g_cash.max_nr_pages, g_cash.max_nr_pages, g_cash.cur_nr_pages, g_cash.cur_nr_pages, diff --git a/trunk/hw/mthca/kernel/mthca_dev.h b/trunk/hw/mthca/kernel/mthca_dev.h index ea7715ad..a3dd1774 100644 --- a/trunk/hw/mthca/kernel/mthca_dev.h +++ b/trunk/hw/mthca/kernel/mthca_dev.h @@ -509,6 +509,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr, void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type, u8 vendor_code); int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); +int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr, struct _ib_send_wr **bad_wr); int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr, diff --git a/trunk/hw/mthca/kernel/mthca_provider.c b/trunk/hw/mthca/kernel/mthca_provider.c index 60d80799..817ccece 100644 --- a/trunk/hw/mthca/kernel/mthca_provider.c +++ b/trunk/hw/mthca/kernel/mthca_provider.c @@ -1286,6 +1286,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.modify_qp = mthca_modify_qp; + dev->ib_dev.query_qp = mthca_query_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp; dev->ib_dev.create_cq = mthca_create_cq; dev->ib_dev.destroy_cq = mthca_destroy_cq; diff --git a/trunk/hw/mthca/kernel/mthca_qp.c b/trunk/hw/mthca/kernel/mthca_qp.c index 62224d74..8242c62b 100644 --- a/trunk/hw/mthca/kernel/mthca_qp.c +++ b/trunk/hw/mthca/kernel/mthca_qp.c @@ -442,6 +442,169 @@ static int to_mthca_st(int transport) } } +static inline enum ib_qp_state to_ib_qp_state(int mthca_state) +{ + switch (mthca_state) { + case MTHCA_QP_STATE_RST: return IBQPS_RESET; + case MTHCA_QP_STATE_INIT: return IBQPS_INIT; + case MTHCA_QP_STATE_RTR: return IBQPS_RTR; + case MTHCA_QP_STATE_RTS: return IBQPS_RTS; + case MTHCA_QP_STATE_SQD: return IBQPS_SQD; + case MTHCA_QP_STATE_DRAINING: return IBQPS_SQD; + case MTHCA_QP_STATE_SQE: return IBQPS_SQE; + case MTHCA_QP_STATE_ERR: return IBQPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) +{ + switch (mthca_mig_state) { + case 0: return IB_MIG_ARMED; + case 1: return IB_MIG_REARM; + case 3: return IB_MIG_MIGRATED; + default: return -1; + } +} + +static int to_ib_qp_access_flags(int mthca_flags) +{ + int ib_flags = 0; + + if (mthca_flags & MTHCA_QP_BIT_RRE) + ib_flags |= MTHCA_ACCESS_REMOTE_READ; + if (mthca_flags & MTHCA_QP_BIT_RWE) + ib_flags |= MTHCA_ACCESS_REMOTE_WRITE; + if (mthca_flags & MTHCA_QP_BIT_RAE) + ib_flags |= MTHCA_ACCESS_REMOTE_ATOMIC; + + return ib_flags; +} + +static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, + struct mthca_qp_path *path) +{ + memset(ib_ah_attr, 0, sizeof *ib_ah_attr); + ib_ah_attr->port_num = (u8)((cl_ntoh32(path->port_pkey) >> 24) & 0x3); + + if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) + return; + + ib_ah_attr->dlid = cl_ntoh16(path->rlid); + ib_ah_attr->sl = (u8)(cl_ntoh32(path->sl_tclass_flowlabel) >> 28); + ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; + //TODO: work around: set always full speed - really, it's much more complicate + ib_ah_attr->static_rate = 0; + ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; + if (ib_ah_attr->ah_flags) { + ib_ah_attr->grh.sgid_index = (u8)(path->mgid_index & (dev->limits.gid_table_len - 1)); + ib_ah_attr->grh.hop_limit = path->hop_limit; + ib_ah_attr->grh.traffic_class = + (u8)((cl_ntoh32(path->sl_tclass_flowlabel) >> 20) & 0xff); + ib_ah_attr->grh.flow_label = + cl_ntoh32(path->sl_tclass_flowlabel) & 0xfffff; + memcpy(ib_ah_attr->grh.dgid.raw, + path->rgid, sizeof ib_ah_attr->grh.dgid.raw); + } +} + +int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + int err = 0; + struct mthca_mailbox *mailbox = NULL; + struct mthca_qp_param *qp_param; + struct mthca_qp_context *context; + int mthca_state; + u8 status; + + UNUSED_PARAM(qp_attr_mask); + + down( &qp->mutex ); + + if (qp->state == IBQPS_RESET) { + qp_attr->qp_state = IBQPS_RESET; + goto done; + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto out; + } + + err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); + if (err) + goto out_mailbox; + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_QP, + ("QUERY_QP returned status %02x\n", status)); + err = -EINVAL; + goto out_mailbox; + } + + qp_param = mailbox->buf; + context = &qp_param->context; + mthca_state = cl_ntoh32(context->flags) >> 28; + + qp->state = to_ib_qp_state(mthca_state); + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_msgmax >> 5; + qp_attr->path_mig_state = + to_ib_mig_state((cl_ntoh32(context->flags) >> 11) & 0x3); + qp_attr->qkey = cl_ntoh32(context->qkey); + qp_attr->rq_psn = cl_ntoh32(context->rnr_nextrecvpsn) & 0xffffff; + qp_attr->sq_psn = cl_ntoh32(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = cl_ntoh32(context->remote_qpn) & 0xffffff; + qp_attr->qp_access_flags = + to_ib_qp_access_flags(cl_ntoh32(context->params2)); + + if (qp->transport == RC || qp->transport == UC) { + to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); + to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); + qp_attr->alt_pkey_index = + (u16)(cl_ntoh32(context->alt_path.port_pkey) & 0x7f); + qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; + } + + qp_attr->pkey_index = (u16)(cl_ntoh32(context->pri_path.port_pkey) & 0x7f); + qp_attr->port_num = + (u8)((cl_ntoh32(context->pri_path.port_pkey) >> 24) & 0x3); + + /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ + qp_attr->sq_draining = (u8)(mthca_state == MTHCA_QP_STATE_DRAINING); + + qp_attr->max_rd_atomic = (u8)(1 << ((cl_ntoh32(context->params1) >> 21) & 0x7)); + + qp_attr->max_dest_rd_atomic = + (u8)(1 << ((cl_ntoh32(context->params2) >> 21) & 0x7)); + qp_attr->min_rnr_timer = + (u8)((cl_ntoh32(context->rnr_nextrecvpsn) >> 24) & 0x1f); + qp_attr->timeout = context->pri_path.ackto >> 3; + qp_attr->retry_cnt = (u8)((cl_ntoh32(context->params1) >> 16) & 0x7); + qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; + qp_attr->alt_timeout = context->alt_path.ackto >> 3; + +done: + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_send_wr = qp->sq.max; + qp_attr->cap.max_recv_wr = qp->rq.max; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + qp_attr->cap.max_inline_data = qp->max_inline_data; + + qp_init_attr->cap = qp_attr->cap; + +out_mailbox: + mthca_free_mailbox(dev, mailbox); + +out: + up(&qp->mutex); + return err; +} + static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, int attr_mask) {