u32 req_param, opt_param;
u32 sqd_event = 0;
u8 status;
- int err;
+ int err = -EINVAL;
SPIN_LOCK_PREP(lhs);
SPIN_LOCK_PREP(lhr);
+ down( &qp->mutex );
+
if (attr_mask & IB_QP_CUR_STATE) {
if (attr->cur_qp_state != IBQPS_RTR &&
- attr->cur_qp_state != IBQPS_RTS &&
- attr->cur_qp_state != IBQPS_SQD &&
- attr->cur_qp_state != IBQPS_SQE)
- return -EINVAL;
+ attr->cur_qp_state != IBQPS_RTS &&
+ attr->cur_qp_state != IBQPS_SQD &&
+ attr->cur_qp_state != IBQPS_SQE)
+ goto out;
else
cur_state = attr->cur_qp_state;
} else {
}
if (attr_mask & IB_QP_STATE) {
- if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR)
- return -EINVAL;
+ if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR)
+ goto out;
new_state = attr->qp_state;
} else
new_state = cur_state;
if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Illegal QP transition "
"%d->%d\n", cur_state, new_state));
- return -EINVAL;
+ goto out;
}
req_param = state_table[cur_state][new_state].req_param[qp->transport];
cur_state, new_state,
req_param & ~attr_mask));
//NB: IBAL doesn't use all the fields, so we can miss some mandatory flags
- return -EINVAL;
+ goto out;
}
if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
attr_mask & ~(req_param | opt_param |
IB_QP_STATE)));
//NB: The old code sometimes uses optional flags that are not so in this code
- return -EINVAL;
+ goto out;
}
if ((attr_mask & IB_QP_PKEY_INDEX) &&
- attr->pkey_index >= dev->limits.pkey_table_len) {
+ attr->pkey_index >= dev->limits.pkey_table_len) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("PKey index (%u) too large. max is %d\n",
attr->pkey_index,dev->limits.pkey_table_len-1));
- return -EINVAL;
+ goto out;
}
if ((attr_mask & IB_QP_PORT) &&
- (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+ (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Port number (%u) is invalid\n", attr->port_num));
- return -EINVAL;
+ goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+ attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as initiator %u too large (max is %d)\n",
attr->max_rd_atomic, dev->limits.max_qp_init_rdma));
- return -EINVAL;
+ goto out;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as responder %u too large (max %d)\n",
attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift));
- return -EINVAL;
+ goto out;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto out;
+ }
qp_param = mailbox->buf;
qp_context = &qp_param->context;
RtlZeroMemory(qp_param, sizeof *qp_param);
if (qp->transport == MLX || qp->transport == UD)
qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
- else if (attr_mask & IB_QP_PATH_MTU)
+ else if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("path MTU (%u) is invalid\n", attr->path_mtu));
+ goto out_mailbox;
+ }
qp_context->mtu_msgmax = (u8)((attr->path_mtu << 5) | 31);
+ }
if (mthca_is_memfree(dev)) {
if (qp->rq.max)
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, mailbox, sqd_event, &status);
+ if (err)
+ goto out_mailbox;
if (status) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
- state_table[cur_state][new_state].trans, status));
+ state_table[cur_state][new_state].trans, status));
err = -EINVAL;
+ goto out_mailbox;
}
- if (!err) {
- qp->state = new_state;
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->atomic_rd_en = (u8)attr->qp_access_flags;
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->resp_depth = attr->max_dest_rd_atomic;
- }
-
- mthca_free_mailbox(dev, mailbox);
+ qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = (u8)attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
*/
if (is_qp0(dev, qp)) {
if (cur_state != IBQPS_RTR &&
- new_state == IBQPS_RTR)
+ new_state == IBQPS_RTR)
init_port(dev, to_msqp(qp)->port);
if (cur_state != IBQPS_RESET &&
- cur_state != IBQPS_ERR &&
- (new_state == IBQPS_RESET ||
- new_state == IBQPS_ERR))
+ cur_state != IBQPS_ERR &&
+ (new_state == IBQPS_RESET ||
+ new_state == IBQPS_ERR))
mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
}
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (!err && new_state == IB_QPS_RESET && !qp->ibqp.ucontext) {
+ if (new_state == IB_QPS_RESET && !qp->ibqp.ucontext) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
}
}
+out_mailbox:
+ mthca_free_mailbox(dev, mailbox);
+
+out:
+ up( &qp->mutex );
return err;
}
atomic_set(&qp->refcount, 1);
init_waitqueue_head(&qp->wait);
+ KeInitializeMutex(&qp->mutex, 0);
+
qp->state = IBQPS_RESET;
qp->atomic_rd_en = 0;
qp->resp_depth = 0;
u16 pkey;
CPU_2_BE64_PREP;
+ if (!wr->dgrm.ud.h_av) {
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_AV,
+ ("absent AV in send wr %p\n", wr));
+ return -EINVAL;
+ }
+
ib_ud_header_init(256, /* assume a MAD */
mthca_ah_grh_present(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)),
&sqp->ud_header);