if (rs->sq_size > max_size)
rs->sq_size = max_size;
- else if (rs->sq_size < 2)
- rs->sq_size = 2;
+ else if (rs->sq_size < 4)
+ rs->sq_size = 4;
if (rs->sq_size <= (RS_QP_CTRL_SIZE << 2))
- rs->ctrl_avail = 1;
+ rs->ctrl_avail = 2;
if (rs->rq_size > max_size)
rs->rq_size = max_size;
- else if (rs->rq_size < 2)
- rs->rq_size = 2;
+ else if (rs->rq_size < 4)
+ rs->rq_size = 4;
}
static void ds_set_qp_size(struct rsocket *rs)
return ret;
}
-/*
-TODO:
-if MSG_SEND opt is set
- if !sgl
- post send with imm_data inline
- else
- post rdma write
- pst send with imm_data inline
-else
- existing code flow - rdma write with immediate
-
-post_write - rdma write only
-post msg - immediate data or send only
-post write msg - rdma write with msg (imm or send after)
-*/
-static int rs_post_write_msg(struct rsocket *rs,
- struct ibv_sge *sgl, int nsge,
- uint32_t imm_data, int flags,
- uint64_t addr, uint32_t rkey)
-{
- struct ibv_send_wr wr, *bad;
-
- wr.wr_id = rs_send_wr_id(imm_data);
- wr.next = NULL;
- wr.sg_list = sgl;
- wr.num_sge = nsge;
- wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM;
- wr.send_flags = flags;
- wr.imm_data = htonl(imm_data);
- wr.wr.rdma.remote_addr = addr;
- wr.wr.rdma.rkey = rkey;
-
- return rdma_seterrno(ibv_post_send(rs->cm_id->qp, &wr, &bad));
-}
-
static int rs_post_msg(struct rsocket *rs, uint32_t msg)
{
struct ibv_send_wr wr, *bad;
wr.wr_id = rs_send_wr_id(msg);
wr.next = NULL;
- if (rs->opts & RS_OPT_MSG_SEND) {
+ if (!(rs->opts & RS_OPT_MSG_SEND)) {
wr.sg_list = &sge;
wr.num_sge = 1;
wr.opcode = IBV_WR_SEND;
wr.sg_list = NULL;
wr.num_sge = 0;
wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM;
- wr.send_flags = flags;
- wr.imm_data = htonl(imm_data);
+ wr.send_flags = 0;
+ wr.imm_data = htonl(msg);
}
return rdma_seterrno(ibv_post_send(rs->cm_id->qp, &wr, &bad));
return rdma_seterrno(ibv_post_send(rs->cm_id->qp, &wr, &bad));
}
+static int rs_post_write_msg(struct rsocket *rs,
+ struct ibv_sge *sgl, int nsge,
+ uint32_t msg, int flags,
+ uint64_t addr, uint32_t rkey)
+{
+ struct ibv_send_wr wr, *bad;
+ int ret;
+
+ if (!(rs->opts & RS_OPT_MSG_SEND)) {
+ wr.wr_id = rs_send_wr_id(msg);
+ wr.next = NULL;
+ wr.sg_list = sgl;
+ wr.num_sge = nsge;
+ wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM;
+ wr.send_flags = flags;
+ wr.imm_data = htonl(msg);
+ wr.wr.rdma.remote_addr = addr;
+ wr.wr.rdma.rkey = rkey;
+
+ return rdma_seterrno(ibv_post_send(rs->cm_id->qp, &wr, &bad));
+ } else {
+ /* TODO: adjust sqe_avail and ctrl_avail for extra post */
+ ret = rs_post_write(rs, sgl, nsge, msg, flags, addr, rkey);
+ if (!ret)
+ ret = rs_post_msg(rs, msg);
+ return ret;
+ }
+}
+
static int ds_post_send(struct rsocket *rs, struct ibv_sge *sge,
uint32_t wr_data)
{
if (++rs->remote_sge == rs->remote_sgl.length)
rs->remote_sge = 0;
} else {
- rs_post_write_msg(rs, NULL, 0,
- rs_msg_set(RS_OP_SGL, rs->rseq_no + rs->rq_size),
- 0, 0, 0);
+ rs_post_msg(rs, rs_msg_set(RS_OP_SGL, rs->rseq_no + rs->rq_size));
}
}
*/
static int rs_can_send(struct rsocket *rs)
{
- if (rs->opts & RS_OPT_MSG_SEND) {
+ if (!(rs->opts & RS_OPT_MSG_SEND)) {
+ return rs->sqe_avail && (rs->sbuf_bytes_avail >= RS_SNDLOWAT) &&
+ (rs->sseq_no != rs->sseq_comp) &&
+ (rs->target_sgl[rs->target_sge].length != 0);
+ } else {
return (rs->sqe_avail >= 2) && (rs->sbuf_bytes_avail >= RS_SNDLOWAT) &&
(rs->sseq_no != rs->sseq_comp) &&
(rs->target_sgl[rs->target_sge].length != 0);
}
- return rs->sqe_avail && (rs->sbuf_bytes_avail >= RS_SNDLOWAT) &&
- (rs->sseq_no != rs->sseq_comp) &&
- (rs->target_sgl[rs->target_sge].length != 0);
}
static int ds_can_send(struct rsocket *rs)
if ((rs->state & rs_connected) && rs->ctrl_avail) {
rs->ctrl_avail--;
- ret = rs_post_write_msg(rs, NULL, 0,
- rs_msg_set(RS_OP_CTRL, ctrl), 0, 0, 0);
+ ret = rs_post_msg(rs, rs_msg_set(RS_OP_CTRL, ctrl));
}
}