void pa_cash_print()
{
- MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_LOW,
+ MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW,
("pa_cash_print: max_nr_pages %d (%#x), cur_nr_pages %d (%#x), free_list_hdr %d, free_threshold %d\n",
g_cash.max_nr_pages, g_cash.max_nr_pages,
g_cash.cur_nr_pages, g_cash.cur_nr_pages,
mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );\r
}\r
\r
+uint8_t from_rate(enum ib_rate ib_rate)\r
+{\r
+ if (ib_rate == IB_RATE_2_5_GBPS) return IB_PATH_RECORD_RATE_2_5_GBS;\r
+ if (ib_rate == IB_RATE_5_GBPS) return IB_PATH_RECORD_RATE_5_GBS;\r
+ if (ib_rate == IB_RATE_10_GBPS) return IB_PATH_RECORD_RATE_10_GBS;\r
+ if (ib_rate == IB_RATE_20_GBPS) return IB_PATH_RECORD_RATE_20_GBS;\r
+ if (ib_rate == IB_RATE_30_GBPS) return IB_PATH_RECORD_RATE_30_GBS;\r
+ if (ib_rate == IB_RATE_40_GBPS) return IB_PATH_RECORD_RATE_40_GBS;\r
+ if (ib_rate == IB_RATE_60_GBPS) return IB_PATH_RECORD_RATE_60_GBS;\r
+ if (ib_rate == IB_RATE_80_GBPS) return IB_PATH_RECORD_RATE_80_GBS;\r
+ if (ib_rate == IB_RATE_120_GBPS) return IB_PATH_RECORD_RATE_120_GBS;\r
+ return 0;\r
+}\r
+\r
+int from_av(\r
+ IN const struct ib_device *p_ib_dev,\r
+ IN struct ib_qp_attr *p_ib_qp_attr,\r
+ IN struct ib_ah_attr *p_ib_ah_attr,\r
+ OUT ib_av_attr_t *p_ib_av_attr)\r
+{\r
+ int err = 0;\r
+ \r
+ p_ib_av_attr->port_num = p_ib_ah_attr->port_num;\r
+ p_ib_av_attr->sl = p_ib_ah_attr->sl;\r
+ p_ib_av_attr->dlid = cl_hton16(p_ib_ah_attr->dlid);\r
+ p_ib_av_attr->static_rate = from_rate(p_ib_ah_attr->static_rate);\r
+ p_ib_av_attr->path_bits = p_ib_ah_attr->src_path_bits;\r
+\r
+ if (p_ib_qp_attr) {\r
+ p_ib_av_attr->conn.path_mtu = p_ib_qp_attr->path_mtu; // MTU\r
+ p_ib_av_attr->conn.local_ack_timeout = p_ib_qp_attr->timeout; // MTU\r
+ p_ib_av_attr->conn.seq_err_retry_cnt = p_ib_qp_attr->retry_cnt; // MTU\r
+ p_ib_av_attr->conn.rnr_retry_cnt = p_ib_qp_attr->rnr_retry; // MTU\r
+ }\r
+\r
+ if (p_ib_ah_attr->ah_flags & IB_AH_GRH) {\r
+ p_ib_av_attr->grh_valid = TRUE;\r
+ p_ib_av_attr->grh.hop_limit = p_ib_ah_attr->grh.hop_limit;\r
+ p_ib_av_attr->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 0, p_ib_ah_attr->grh.traffic_class, p_ib_ah_attr->grh.flow_label );\r
+ RtlCopyMemory(p_ib_av_attr->grh.dest_gid.raw, \r
+ p_ib_ah_attr->grh.dgid.raw, sizeof(p_ib_av_attr->grh.dest_gid));\r
+ err = ib_get_cached_gid((struct ib_device *)p_ib_dev, \r
+ p_ib_ah_attr->port_num, p_ib_ah_attr->grh.sgid_index,\r
+ (union ib_gid*)p_ib_av_attr->grh.src_gid.raw );\r
+ if (err) {\r\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
+ ("ib_get_cached_gid failed %d (%#x). Using default: sgid_index = 0\n", err, err));\r
+ }\r
+ }\r
+ else\r
+ p_ib_av_attr->grh_valid = FALSE;\r
+ \r
+\r
+ return err;\r
+}\r
+\r
+ib_apm_state_t from_apm_state(enum ib_mig_state apm)\r
+{\r
+ if (apm == IB_MIG_MIGRATED) return IB_APM_MIGRATED;\r
+ if (apm == IB_MIG_REARM) return IB_APM_REARM;\r
+ if (apm == IB_MIG_ARMED) return IB_APM_ARMED;\r
+ return 0xffffffff;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_conv_qp_attr(\r
+ IN const struct ib_qp *p_ib_qp,\r
+ IN struct ib_qp_attr *p_ib_qp_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr\r
+ )\r
+{\r
+ int err;\r
+ RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
+ p_qp_attr->h_pd = (ib_pd_handle_t)p_ib_qp->pd;\r
+ p_qp_attr->qp_type = p_ib_qp->qp_type;\r
+ p_qp_attr->access_ctrl = map_qp_mthca_acl(p_ib_qp_attr->qp_access_flags);\r
+ p_qp_attr->pkey_index = p_ib_qp_attr->pkey_index;\r
+\r
+ p_qp_attr->sq_max_inline = p_ib_qp_attr->cap.max_inline_data;\r
+ p_qp_attr->sq_depth = p_ib_qp_attr->cap.max_send_wr;\r
+ p_qp_attr->rq_depth = p_ib_qp_attr->cap.max_recv_wr;\r
+ p_qp_attr->sq_sge = p_ib_qp_attr->cap.max_send_sge;\r
+ p_qp_attr->rq_sge = p_ib_qp_attr->cap.max_recv_sge;\r
+ p_qp_attr->init_depth = p_ib_qp_attr->max_rd_atomic;\r
+ p_qp_attr->resp_res = p_ib_qp_attr->max_dest_rd_atomic;\r
+\r
+ p_qp_attr->h_sq_cq = (ib_cq_handle_t)p_ib_qp->send_cq;\r
+ p_qp_attr->h_rq_cq = (ib_cq_handle_t)p_ib_qp->recv_cq;\r
+ p_qp_attr->h_srq = (ib_srq_handle_t)p_ib_qp->srq;\r
+\r
+ p_qp_attr->sq_signaled = (((struct mthca_qp *)p_ib_qp)->sq_policy == IB_SIGNAL_ALL_WR) ? TRUE : FALSE;\r
+\r
+ p_qp_attr->state = mlnx_qps_to_ibal( p_ib_qp_attr->qp_state);\r
+ p_qp_attr->num = cl_hton32(p_ib_qp->qp_num);\r
+ p_qp_attr->dest_num = cl_hton32(p_ib_qp_attr->dest_qp_num);\r
+ p_qp_attr->qkey = cl_hton32(p_ib_qp_attr->qkey);\r
+\r
+ p_qp_attr->sq_psn = cl_hton32(p_ib_qp_attr->sq_psn);\r
+ p_qp_attr->rq_psn = cl_hton32(p_ib_qp_attr->rq_psn);\r
+\r
+ p_qp_attr->primary_port = p_ib_qp_attr->port_num;\r
+ p_qp_attr->alternate_port = p_ib_qp_attr->alt_port_num;\r
+ err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->ah_attr, &p_qp_attr->primary_av);\r
+ if (err)\r
+ goto err_av;\r
+ err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->alt_ah_attr, &p_qp_attr->alternate_av);\r
+ if (err)\r
+ goto err_av;\r
+ p_qp_attr->apm_state = from_apm_state(p_ib_qp_attr->path_mig_state);\r
+\r
+ return IB_SUCCESS;\r
+\r
+err_av:\r
+ return errno_to_iberr(err);\r
+}\r
+\r
\r
void set_skip_tavor_reset();\r
\r
+ib_api_status_t\r
+mlnx_conv_qp_attr(\r
+ IN const struct ib_qp *p_ib_qp,\r
+ IN struct ib_qp_attr *p_ib_qp_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr\r
+ );\r
+\r
#endif\r
return status;\r
}\r
\r
+\r
+\r
ib_api_status_t\r
mlnx_query_qp (\r
IN const ib_qp_handle_t h_qp,\r
OUT ib_qp_attr_t *p_qp_attr,\r
IN OUT ci_umv_buf_t *p_umv_buf )\r
{\r
+ int err;\r
+ int qp_attr_mask = 0;\r
ib_api_status_t status = IB_SUCCESS;\r
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+ struct ib_qp_attr qp_attr;\r
+ struct ib_qp_init_attr qp_init_attr;\r
struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
\r
UNREFERENCED_PARAMETER(p_umv_buf);\r
\r
HCA_ENTER( HCA_DBG_QP);\r
- // sanity checks\r
-\r
- // clean the structure\r
- RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
- \r
- // fill the structure\r
- //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
- p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd;\r
- p_qp_attr->qp_type = qp_p->ibqp.qp_type;\r
- p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data;\r
- p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr;\r
- p_qp_attr->rq_depth = qp_p->qp_init_attr.cap.max_recv_wr;\r
- p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge;\r
- p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge;\r
- p_qp_attr->resp_res = qp_p->resp_depth;\r
- p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
- p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
- p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
- p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state );\r
- p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num);\r
-\r
-#ifdef WIN_TO_BE_CHANGED\r
-//TODO: don't know how to fill the following fields without support of query_qp in MTHCA \r
- p_qp_attr->access_ctrl = qp_p->\r
- p_qp_attr->pkey_index = qp_p->\r
- p_qp_attr->dest_num = qp_p-\r
- p_qp_attr->init_depth = qp_p-\r
- p_qp_attr->qkey = qp_p-\r
- p_qp_attr->sq_psn = qp_p-\r
- p_qp_attr->rq_psn = qp_p-\r
- p_qp_attr->primary_port = qp_p-\r
- p_qp_attr->alternate_port = qp_p-\r
- p_qp_attr->primary_av = qp_p-\r
- p_qp_attr->alternate_av = qp_p-\r
- p_qp_attr->apm_state = qp_p-\r
-#endif \r
\r
- status = IB_SUCCESS;\r
+ // sanity checks\r
+ if (!p_qp_attr) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_parm;\r
+ }\r
+\r
+ memset( &qp_attr, 0, sizeof(struct ib_qp_attr) );\r
+\r
+ if (qp_p->state == IBQPS_RESET) {\r
+ // the QP doesn't yet exist in HW - fill what we can fill now\r
+ p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd;\r
+ p_qp_attr->qp_type = qp_p->ibqp.qp_type;\r
+ p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data;\r
+ p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr;\r
+ p_qp_attr->rq_depth = qp_p->qp_init_attr.cap.max_recv_wr;\r
+ p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge;\r
+ p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge;\r
+ p_qp_attr->resp_res = qp_p->resp_depth;\r
+ p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
+ p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
+ p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
+ p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state );\r
+ p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num);\r
+ p_qp_attr->primary_port = qp_p->qp_init_attr.port_num;\r
+ }\r
+ else {\r
+ //request the info from the card\r
+ err = ib_qp_p->device->query_qp( ib_qp_p, &qp_attr, \r
+ qp_attr_mask, &qp_init_attr);\r
+ if (err){\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
+ ("ib_query_qp failed (%#x)\n", status));\r
+ goto err_query_qp;\r
+ }\r
+ \r
+ // convert the results back to IBAL\r
+ status = mlnx_conv_qp_attr( ib_qp_p, &qp_attr, p_qp_attr );\r
+ }\r
\r
+err_query_qp:\r
+err_parm:\r
HCA_EXIT(HCA_DBG_QP);\r
return status;\r
}\r
IB_WIDTH_12X = 8\r
};\r
\r
+enum ib_rate {\r
+ IB_RATE_PORT_CURRENT = 0,\r
+ IB_RATE_2_5_GBPS = 2,\r
+ IB_RATE_5_GBPS = 5,\r
+ IB_RATE_10_GBPS = 3,\r
+ IB_RATE_20_GBPS = 6,\r
+ IB_RATE_30_GBPS = 4,\r
+ IB_RATE_40_GBPS = 7,\r
+ IB_RATE_60_GBPS = 8,\r
+ IB_RATE_80_GBPS = 9,\r
+ IB_RATE_120_GBPS = 10\r
+};\r
+\r
static inline int ib_width_enum_to_int(enum ib_port_width width)\r
{\r
switch (width) {\r
IBQPS_ERR\r
};\r
\r
+enum ib_mig_state {\r
+ IB_MIG_MIGRATED,\r
+ IB_MIG_REARM,\r
+ IB_MIG_ARMED\r
+};\r
\r
struct ib_qp_attr {\r
enum ib_qp_state qp_state;\r
void pa_cash_print()
{
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW,
("pa_cash_print: max_nr_pages %d (%#x), cur_nr_pages %d (%#x), free_list_hdr %d, free_threshold %d\n",
g_cash.max_nr_pages, g_cash.max_nr_pages,
g_cash.cur_nr_pages, g_cash.cur_nr_pages,
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type, u8 vendor_code);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
\r
dev->ib_dev.create_qp = mthca_create_qp;\r
dev->ib_dev.modify_qp = mthca_modify_qp;\r
+ dev->ib_dev.query_qp = mthca_query_qp;\r
dev->ib_dev.destroy_qp = mthca_destroy_qp;\r
dev->ib_dev.create_cq = mthca_create_cq;\r
dev->ib_dev.destroy_cq = mthca_destroy_cq;\r
}\r
}\r
\r
+static inline enum ib_qp_state to_ib_qp_state(int mthca_state)\r
+{\r
+ switch (mthca_state) {\r
+ case MTHCA_QP_STATE_RST: return IBQPS_RESET;\r
+ case MTHCA_QP_STATE_INIT: return IBQPS_INIT;\r
+ case MTHCA_QP_STATE_RTR: return IBQPS_RTR;\r
+ case MTHCA_QP_STATE_RTS: return IBQPS_RTS;\r
+ case MTHCA_QP_STATE_SQD: return IBQPS_SQD;\r
+ case MTHCA_QP_STATE_DRAINING: return IBQPS_SQD;\r
+ case MTHCA_QP_STATE_SQE: return IBQPS_SQE;\r
+ case MTHCA_QP_STATE_ERR: return IBQPS_ERR;\r
+ default: return -1;\r
+ }\r
+}\r
+\r
+static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)\r
+{\r
+ switch (mthca_mig_state) {\r
+ case 0: return IB_MIG_ARMED;\r
+ case 1: return IB_MIG_REARM;\r
+ case 3: return IB_MIG_MIGRATED;\r
+ default: return -1;\r
+ }\r
+}\r
+\r
+static int to_ib_qp_access_flags(int mthca_flags)\r
+{\r
+ int ib_flags = 0;\r
+\r
+ if (mthca_flags & MTHCA_QP_BIT_RRE)\r
+ ib_flags |= MTHCA_ACCESS_REMOTE_READ;\r
+ if (mthca_flags & MTHCA_QP_BIT_RWE)\r
+ ib_flags |= MTHCA_ACCESS_REMOTE_WRITE;\r
+ if (mthca_flags & MTHCA_QP_BIT_RAE)\r
+ ib_flags |= MTHCA_ACCESS_REMOTE_ATOMIC;\r
+\r
+ return ib_flags;\r
+}\r
+\r
+static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,\r
+ struct mthca_qp_path *path)\r
+{\r
+ memset(ib_ah_attr, 0, sizeof *ib_ah_attr);\r
+ ib_ah_attr->port_num = (u8)((cl_ntoh32(path->port_pkey) >> 24) & 0x3);\r
+\r
+ if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)\r
+ return;\r
+\r
+ ib_ah_attr->dlid = cl_ntoh16(path->rlid);\r
+ ib_ah_attr->sl = (u8)(cl_ntoh32(path->sl_tclass_flowlabel) >> 28);\r
+ ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;\r
+ //TODO: work around: set always full speed - really, it's much more complicate\r
+ ib_ah_attr->static_rate = 0;\r
+ ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;\r
+ if (ib_ah_attr->ah_flags) {\r
+ ib_ah_attr->grh.sgid_index = (u8)(path->mgid_index & (dev->limits.gid_table_len - 1));\r
+ ib_ah_attr->grh.hop_limit = path->hop_limit;\r
+ ib_ah_attr->grh.traffic_class =\r
+ (u8)((cl_ntoh32(path->sl_tclass_flowlabel) >> 20) & 0xff);\r
+ ib_ah_attr->grh.flow_label =\r
+ cl_ntoh32(path->sl_tclass_flowlabel) & 0xfffff;\r
+ memcpy(ib_ah_attr->grh.dgid.raw,\r
+ path->rgid, sizeof ib_ah_attr->grh.dgid.raw);\r
+ }\r
+}\r
+\r
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,\r
+ struct ib_qp_init_attr *qp_init_attr)\r
+{\r
+ struct mthca_dev *dev = to_mdev(ibqp->device);\r
+ struct mthca_qp *qp = to_mqp(ibqp);\r
+ int err = 0;\r
+ struct mthca_mailbox *mailbox = NULL;\r
+ struct mthca_qp_param *qp_param;\r
+ struct mthca_qp_context *context;\r
+ int mthca_state;\r
+ u8 status;\r
+\r
+ UNUSED_PARAM(qp_attr_mask);\r
+ \r
+ down( &qp->mutex );\r
+\r
+ if (qp->state == IBQPS_RESET) {\r
+ qp_attr->qp_state = IBQPS_RESET;\r
+ goto done;\r
+ }\r
+\r
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);\r
+ if (IS_ERR(mailbox)) {\r
+ err = PTR_ERR(mailbox);\r
+ goto out;\r
+ }\r
+\r
+ err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);\r
+ if (err)\r
+ goto out_mailbox;\r
+ if (status) {\r
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_QP,\r
+ ("QUERY_QP returned status %02x\n", status));\r
+ err = -EINVAL;\r
+ goto out_mailbox;\r
+ }\r
+\r
+ qp_param = mailbox->buf;\r
+ context = &qp_param->context;\r
+ mthca_state = cl_ntoh32(context->flags) >> 28;\r
+\r
+ qp->state = to_ib_qp_state(mthca_state);\r
+ qp_attr->qp_state = qp->state;\r
+ qp_attr->path_mtu = context->mtu_msgmax >> 5;\r
+ qp_attr->path_mig_state =\r
+ to_ib_mig_state((cl_ntoh32(context->flags) >> 11) & 0x3);\r
+ qp_attr->qkey = cl_ntoh32(context->qkey);\r
+ qp_attr->rq_psn = cl_ntoh32(context->rnr_nextrecvpsn) & 0xffffff;\r
+ qp_attr->sq_psn = cl_ntoh32(context->next_send_psn) & 0xffffff;\r
+ qp_attr->dest_qp_num = cl_ntoh32(context->remote_qpn) & 0xffffff;\r
+ qp_attr->qp_access_flags =\r
+ to_ib_qp_access_flags(cl_ntoh32(context->params2));\r
+\r
+ if (qp->transport == RC || qp->transport == UC) {\r
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);\r
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);\r
+ qp_attr->alt_pkey_index =\r
+ (u16)(cl_ntoh32(context->alt_path.port_pkey) & 0x7f);\r
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;\r
+ }\r
+\r
+ qp_attr->pkey_index = (u16)(cl_ntoh32(context->pri_path.port_pkey) & 0x7f);\r
+ qp_attr->port_num =\r
+ (u8)((cl_ntoh32(context->pri_path.port_pkey) >> 24) & 0x3);\r
+\r
+ /* qp_attr->en_sqd_async_notify is only applicable in modify qp */\r
+ qp_attr->sq_draining = (u8)(mthca_state == MTHCA_QP_STATE_DRAINING);\r
+\r
+ qp_attr->max_rd_atomic = (u8)(1 << ((cl_ntoh32(context->params1) >> 21) & 0x7));\r
+\r
+ qp_attr->max_dest_rd_atomic =\r
+ (u8)(1 << ((cl_ntoh32(context->params2) >> 21) & 0x7));\r
+ qp_attr->min_rnr_timer =\r
+ (u8)((cl_ntoh32(context->rnr_nextrecvpsn) >> 24) & 0x1f);\r
+ qp_attr->timeout = context->pri_path.ackto >> 3;\r
+ qp_attr->retry_cnt = (u8)((cl_ntoh32(context->params1) >> 16) & 0x7);\r
+ qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;\r
+ qp_attr->alt_timeout = context->alt_path.ackto >> 3;\r
+\r
+done:\r
+ qp_attr->cur_qp_state = qp_attr->qp_state;\r
+ qp_attr->cap.max_send_wr = qp->sq.max;\r
+ qp_attr->cap.max_recv_wr = qp->rq.max;\r
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;\r
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;\r
+ qp_attr->cap.max_inline_data = qp->max_inline_data;\r
+\r
+ qp_init_attr->cap = qp_attr->cap;\r
+\r
+out_mailbox:\r
+ mthca_free_mailbox(dev, mailbox);\r
+\r
+out:\r
+ up(&qp->mutex);\r
+ return err;\r
+}\r
+\r
static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,\r
int attr_mask)\r
{\r