/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2011-2012 Intel Corporation. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
*
#include <stdint.h>
#include <pthread.h>
#include <stddef.h>
+#include <errno.h>
#ifdef __cplusplus
# define BEGIN_C_DECLS extern "C" {
BEGIN_C_DECLS
union ibv_gid {
- uint8_t raw[16];
+ uint8_t raw[16];
struct {
- uint64_t subnet_prefix;
- uint64_t interface_id;
+ uint64_t subnet_prefix;
+ uint64_t interface_id;
} global;
};
#ifndef container_of
/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
*/
#define container_of(ptr, type, member) ({\
const typeof(((type *)0)->member) * __mptr = (ptr);\
#endif
enum ibv_node_type {
- IBV_NODE_UNKNOWN = -1,
- IBV_NODE_CA = 1,
+ IBV_NODE_UNKNOWN = -1,
+ IBV_NODE_CA = 1,
IBV_NODE_SWITCH,
IBV_NODE_ROUTER,
IBV_NODE_RNIC
};
enum ibv_transport_type {
- IBV_TRANSPORT_UNKNOWN = -1,
- IBV_TRANSPORT_IB = 0,
- IBV_TRANSPORT_IWARP
+ IBV_TRANSPORT_UNKNOWN = -1, IBV_TRANSPORT_IB = 0, IBV_TRANSPORT_IWARP
};
enum ibv_device_cap_flags {
- IBV_DEVICE_RESIZE_MAX_WR = 1,
- IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1,
- IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2,
- IBV_DEVICE_RAW_MULTI = 1 << 3,
- IBV_DEVICE_AUTO_PATH_MIG = 1 << 4,
- IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5,
- IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
- IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
- IBV_DEVICE_SHUTDOWN_PORT = 1 << 8,
- IBV_DEVICE_INIT_TYPE = 1 << 9,
- IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
- IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11,
- IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
- IBV_DEVICE_SRQ_RESIZE = 1 << 13,
- IBV_DEVICE_N_NOTIFY_CQ = 1 << 14
+ IBV_DEVICE_RESIZE_MAX_WR = 1,
+ IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1,
+ IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2,
+ IBV_DEVICE_RAW_MULTI = 1 << 3,
+ IBV_DEVICE_AUTO_PATH_MIG = 1 << 4,
+ IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5,
+ IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
+ IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
+ IBV_DEVICE_SHUTDOWN_PORT = 1 << 8,
+ IBV_DEVICE_INIT_TYPE = 1 << 9,
+ IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
+ IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11,
+ IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
+ IBV_DEVICE_SRQ_RESIZE = 1 << 13,
+ IBV_DEVICE_N_NOTIFY_CQ = 1 << 14
};
enum ibv_atomic_cap {
- IBV_ATOMIC_NONE,
- IBV_ATOMIC_HCA,
- IBV_ATOMIC_GLOB
+ IBV_ATOMIC_NONE, IBV_ATOMIC_HCA, IBV_ATOMIC_GLOB
};
struct ibv_device_attr {
- char fw_ver[64];
- uint64_t node_guid;
- uint64_t sys_image_guid;
- uint64_t max_mr_size;
- uint64_t page_size_cap;
- uint32_t vendor_id;
- uint32_t vendor_part_id;
- uint32_t hw_ver;
- int max_qp;
- int max_qp_wr;
- int device_cap_flags;
- int max_sge;
- int max_sge_rd;
- int max_cq;
- int max_cqe;
- int max_mr;
- int max_pd;
- int max_qp_rd_atom;
- int max_ee_rd_atom;
- int max_res_rd_atom;
- int max_qp_init_rd_atom;
- int max_ee_init_rd_atom;
- enum ibv_atomic_cap atomic_cap;
- int max_ee;
- int max_rdd;
- int max_mw;
- int max_raw_ipv6_qp;
- int max_raw_ethy_qp;
- int max_mcast_grp;
- int max_mcast_qp_attach;
- int max_total_mcast_qp_attach;
- int max_ah;
- int max_fmr;
- int max_map_per_fmr;
- int max_srq;
- int max_srq_wr;
- int max_srq_sge;
- uint16_t max_pkeys;
- uint8_t local_ca_ack_delay;
- uint8_t phys_port_cnt;
+ char fw_ver[64];
+ uint64_t node_guid;
+ uint64_t sys_image_guid;
+ uint64_t max_mr_size;
+ uint64_t page_size_cap;
+ uint32_t vendor_id;
+ uint32_t vendor_part_id;
+ uint32_t hw_ver;
+ int max_qp;
+ int max_qp_wr;
+ int device_cap_flags;
+ int max_sge;
+ int max_sge_rd;
+ int max_cq;
+ int max_cqe;
+ int max_mr;
+ int max_pd;
+ int max_qp_rd_atom;
+ int max_ee_rd_atom;
+ int max_res_rd_atom;
+ int max_qp_init_rd_atom;
+ int max_ee_init_rd_atom;
+ enum ibv_atomic_cap atomic_cap;
+ int max_ee;
+ int max_rdd;
+ int max_mw;
+ int max_raw_ipv6_qp;
+ int max_raw_ethy_qp;
+ int max_mcast_grp;
+ int max_mcast_qp_attach;
+ int max_total_mcast_qp_attach;
+ int max_ah;
+ int max_fmr;
+ int max_map_per_fmr;
+ int max_srq;
+ int max_srq_wr;
+ int max_srq_sge;
+ uint16_t max_pkeys;
+ uint8_t local_ca_ack_delay;
+ uint8_t phys_port_cnt;
};
enum ibv_mtu {
- IBV_MTU_256 = 1,
- IBV_MTU_512 = 2,
+ IBV_MTU_256 = 1,
+ IBV_MTU_512 = 2,
IBV_MTU_1024 = 3,
IBV_MTU_2048 = 4,
IBV_MTU_4096 = 5
};
enum ibv_port_state {
- IBV_PORT_NOP = 0,
- IBV_PORT_DOWN = 1,
- IBV_PORT_INIT = 2,
- IBV_PORT_ARMED = 3,
- IBV_PORT_ACTIVE = 4,
- IBV_PORT_ACTIVE_DEFER = 5
+ IBV_PORT_NOP = 0,
+ IBV_PORT_DOWN = 1,
+ IBV_PORT_INIT = 2,
+ IBV_PORT_ARMED = 3,
+ IBV_PORT_ACTIVE = 4,
+ IBV_PORT_ACTIVE_DEFER = 5
};
enum {
};
struct ibv_port_attr {
- enum ibv_port_state state;
- enum ibv_mtu max_mtu;
- enum ibv_mtu active_mtu;
- int gid_tbl_len;
- uint32_t port_cap_flags;
- uint32_t max_msg_sz;
- uint32_t bad_pkey_cntr;
- uint32_t qkey_viol_cntr;
- uint16_t pkey_tbl_len;
- uint16_t lid;
- uint16_t sm_lid;
- uint8_t lmc;
- uint8_t max_vl_num;
- uint8_t sm_sl;
- uint8_t subnet_timeout;
- uint8_t init_type_reply;
- uint8_t active_width;
- uint8_t active_speed;
- uint8_t phys_state;
- uint8_t link_layer;
- uint8_t reserved;
+ enum ibv_port_state state;
+ enum ibv_mtu max_mtu;
+ enum ibv_mtu active_mtu;
+ int gid_tbl_len;
+ uint32_t port_cap_flags;
+ uint32_t max_msg_sz;
+ uint32_t bad_pkey_cntr;
+ uint32_t qkey_viol_cntr;
+ uint16_t pkey_tbl_len;
+ uint16_t lid;
+ uint16_t sm_lid;
+ uint8_t lmc;
+ uint8_t max_vl_num;
+ uint8_t sm_sl;
+ uint8_t subnet_timeout;
+ uint8_t init_type_reply;
+ uint8_t active_width;
+ uint8_t active_speed;
+ uint8_t phys_state;
+ uint8_t link_layer;
+ uint8_t reserved;
};
enum ibv_event_type {
struct ibv_async_event {
union {
- struct ibv_cq *cq;
- struct ibv_qp *qp;
+ struct ibv_cq *cq;
+ struct ibv_qp *qp;
struct ibv_srq *srq;
- int port_num;
+ int port_num;
} element;
- enum ibv_event_type event_type;
+ enum ibv_event_type event_type;
};
enum ibv_wc_status {
IBV_WC_COMP_SWAP,
IBV_WC_FETCH_ADD,
IBV_WC_BIND_MW,
-/*
- * Set value of IBV_WC_RECV so consumers can test if a completion is a
- * receive by testing (opcode & IBV_WC_RECV).
- */
- IBV_WC_RECV = 1 << 7,
+ /*
+ * Set value of IBV_WC_RECV so consumers can test if a completion is a
+ * receive by testing (opcode & IBV_WC_RECV).
+ */
+ IBV_WC_RECV = 1 << 7,
IBV_WC_RECV_RDMA_WITH_IMM
};
enum ibv_wc_flags {
- IBV_WC_GRH = 1 << 0,
- IBV_WC_WITH_IMM = 1 << 1
+ IBV_WC_GRH = 1 << 0, IBV_WC_WITH_IMM = 1 << 1
};
struct ibv_wc {
- uint64_t wr_id;
- enum ibv_wc_status status;
- enum ibv_wc_opcode opcode;
- uint32_t vendor_err;
- uint32_t byte_len;
- uint32_t imm_data; /* in network byte order */
- uint32_t qp_num;
- uint32_t src_qp;
- int wc_flags;
- uint16_t pkey_index;
- uint16_t slid;
- uint8_t sl;
- uint8_t dlid_path_bits;
+ uint64_t wr_id;
+ enum ibv_wc_status status;
+ enum ibv_wc_opcode opcode;
+ uint32_t vendor_err;
+ uint32_t byte_len;
+ uint32_t imm_data; /* in network byte order */
+ uint32_t qp_num;
+ uint32_t src_qp;
+ int wc_flags;
+ uint16_t pkey_index;
+ uint16_t slid;
+ uint8_t sl;
+ uint8_t dlid_path_bits;
};
enum ibv_access_flags {
- IBV_ACCESS_LOCAL_WRITE = 1,
- IBV_ACCESS_REMOTE_WRITE = (1<<1),
- IBV_ACCESS_REMOTE_READ = (1<<2),
- IBV_ACCESS_REMOTE_ATOMIC = (1<<3),
- IBV_ACCESS_MW_BIND = (1<<4)
+ IBV_ACCESS_LOCAL_WRITE = 1,
+ IBV_ACCESS_REMOTE_WRITE = (1 << 1),
+ IBV_ACCESS_REMOTE_READ = (1 << 2),
+ IBV_ACCESS_REMOTE_ATOMIC = (1 << 3),
+ IBV_ACCESS_MW_BIND = (1 << 4)
};
struct ibv_pd {
- struct ibv_context *context;
- uint32_t handle;
+ struct ibv_context *context;
+ uint32_t handle;
+};
+
+enum ibv_xrcd_init_attr_mask {
+ IBV_XRCD_INIT_ATTR_FD = 1 << 0,
+ IBV_XRCD_INIT_ATTR_OFLAGS = 1 << 1,
+ IBV_XRCD_INIT_ATTR_RESERVED = 1 << 2
+};
+
+struct ibv_xrcd_init_attr {
+ uint32_t comp_mask;
+ int fd;
+ int oflags;
+};
+
+struct ibv_xrcd {
+ struct ibv_context *context;
};
enum ibv_rereg_mr_flags {
- IBV_REREG_MR_CHANGE_TRANSLATION = (1 << 0),
- IBV_REREG_MR_CHANGE_PD = (1 << 1),
- IBV_REREG_MR_CHANGE_ACCESS = (1 << 2),
- IBV_REREG_MR_KEEP_VALID = (1 << 3)
+ IBV_REREG_MR_CHANGE_TRANSLATION = (1 << 0),
+ IBV_REREG_MR_CHANGE_PD = (1 << 1),
+ IBV_REREG_MR_CHANGE_ACCESS = (1 << 2),
+ IBV_REREG_MR_KEEP_VALID = (1 << 3)
};
struct ibv_mr {
- struct ibv_context *context;
- struct ibv_pd *pd;
- void *addr;
- size_t length;
- uint32_t handle;
- uint32_t lkey;
- uint32_t rkey;
+ struct ibv_context *context;
+ struct ibv_pd *pd;
+ void *addr;
+ size_t length;
+ uint32_t handle;
+ uint32_t lkey;
+ uint32_t rkey;
};
enum ibv_mw_type {
- IBV_MW_TYPE_1 = 1,
- IBV_MW_TYPE_2 = 2
+ IBV_MW_TYPE_1 = 1, IBV_MW_TYPE_2 = 2
};
struct ibv_mw {
- struct ibv_context *context;
- struct ibv_pd *pd;
- uint32_t rkey;
+ struct ibv_context *context;
+ struct ibv_pd *pd;
+ uint32_t rkey;
};
struct ibv_global_route {
- union ibv_gid dgid;
- uint32_t flow_label;
- uint8_t sgid_index;
- uint8_t hop_limit;
- uint8_t traffic_class;
+ union ibv_gid dgid;
+ uint32_t flow_label;
+ uint8_t sgid_index;
+ uint8_t hop_limit;
+ uint8_t traffic_class;
};
struct ibv_grh {
- uint32_t version_tclass_flow;
- uint16_t paylen;
- uint8_t next_hdr;
- uint8_t hop_limit;
- union ibv_gid sgid;
- union ibv_gid dgid;
+ uint32_t version_tclass_flow;
+ uint16_t paylen;
+ uint8_t next_hdr;
+ uint8_t hop_limit;
+ union ibv_gid sgid;
+ union ibv_gid dgid;
};
enum ibv_rate {
- IBV_RATE_MAX = 0,
+ IBV_RATE_MAX = 0,
IBV_RATE_2_5_GBPS = 2,
- IBV_RATE_5_GBPS = 5,
- IBV_RATE_10_GBPS = 3,
- IBV_RATE_20_GBPS = 6,
- IBV_RATE_30_GBPS = 4,
- IBV_RATE_40_GBPS = 7,
- IBV_RATE_60_GBPS = 8,
- IBV_RATE_80_GBPS = 9,
+ IBV_RATE_5_GBPS = 5,
+ IBV_RATE_10_GBPS = 3,
+ IBV_RATE_20_GBPS = 6,
+ IBV_RATE_30_GBPS = 4,
+ IBV_RATE_40_GBPS = 7,
+ IBV_RATE_60_GBPS = 8,
+ IBV_RATE_80_GBPS = 9,
IBV_RATE_120_GBPS = 10
};
enum ibv_rate mult_to_ibv_rate(int mult) __attribute_const;
struct ibv_ah_attr {
- struct ibv_global_route grh;
- uint16_t dlid;
- uint8_t sl;
- uint8_t src_path_bits;
- uint8_t static_rate;
- uint8_t is_global;
- uint8_t port_num;
+ struct ibv_global_route grh;
+ uint16_t dlid;
+ uint8_t sl;
+ uint8_t src_path_bits;
+ uint8_t static_rate;
+ uint8_t is_global;
+ uint8_t port_num;
};
enum ibv_srq_attr_mask {
- IBV_SRQ_MAX_WR = 1 << 0,
- IBV_SRQ_LIMIT = 1 << 1
+ IBV_SRQ_MAX_WR = 1 << 0, IBV_SRQ_LIMIT = 1 << 1
};
struct ibv_srq_attr {
- uint32_t max_wr;
- uint32_t max_sge;
- uint32_t srq_limit;
+ uint32_t max_wr;
+ uint32_t max_sge;
+ uint32_t srq_limit;
};
struct ibv_srq_init_attr {
- void *srq_context;
- struct ibv_srq_attr attr;
+ void *srq_context;
+ struct ibv_srq_attr attr;
};
enum ibv_qp_type {
- IBV_QPT_RC = 2,
- IBV_QPT_UC,
- IBV_QPT_UD
+ IBV_QPT_RC = 2, IBV_QPT_UC, IBV_QPT_UD
};
struct ibv_qp_cap {
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
+ uint32_t max_send_wr;
+ uint32_t max_recv_wr;
+ uint32_t max_send_sge;
+ uint32_t max_recv_sge;
+ uint32_t max_inline_data;
};
struct ibv_qp_init_attr {
- void *qp_context;
- struct ibv_cq *send_cq;
- struct ibv_cq *recv_cq;
- struct ibv_srq *srq;
- struct ibv_qp_cap cap;
- enum ibv_qp_type qp_type;
- int sq_sig_all;
+ void *qp_context;
+ struct ibv_cq *send_cq;
+ struct ibv_cq *recv_cq;
+ struct ibv_srq *srq;
+ struct ibv_qp_cap cap;
+ enum ibv_qp_type qp_type;
+ int sq_sig_all;
};
enum ibv_qp_attr_mask {
- IBV_QP_STATE = 1 << 0,
- IBV_QP_CUR_STATE = 1 << 1,
- IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
- IBV_QP_ACCESS_FLAGS = 1 << 3,
- IBV_QP_PKEY_INDEX = 1 << 4,
- IBV_QP_PORT = 1 << 5,
- IBV_QP_QKEY = 1 << 6,
- IBV_QP_AV = 1 << 7,
- IBV_QP_PATH_MTU = 1 << 8,
- IBV_QP_TIMEOUT = 1 << 9,
- IBV_QP_RETRY_CNT = 1 << 10,
- IBV_QP_RNR_RETRY = 1 << 11,
- IBV_QP_RQ_PSN = 1 << 12,
- IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13,
- IBV_QP_ALT_PATH = 1 << 14,
- IBV_QP_MIN_RNR_TIMER = 1 << 15,
- IBV_QP_SQ_PSN = 1 << 16,
- IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
- IBV_QP_PATH_MIG_STATE = 1 << 18,
- IBV_QP_CAP = 1 << 19,
- IBV_QP_DEST_QPN = 1 << 20
+ IBV_QP_STATE = 1 << 0,
+ IBV_QP_CUR_STATE = 1 << 1,
+ IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
+ IBV_QP_ACCESS_FLAGS = 1 << 3,
+ IBV_QP_PKEY_INDEX = 1 << 4,
+ IBV_QP_PORT = 1 << 5,
+ IBV_QP_QKEY = 1 << 6,
+ IBV_QP_AV = 1 << 7,
+ IBV_QP_PATH_MTU = 1 << 8,
+ IBV_QP_TIMEOUT = 1 << 9,
+ IBV_QP_RETRY_CNT = 1 << 10,
+ IBV_QP_RNR_RETRY = 1 << 11,
+ IBV_QP_RQ_PSN = 1 << 12,
+ IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13,
+ IBV_QP_ALT_PATH = 1 << 14,
+ IBV_QP_MIN_RNR_TIMER = 1 << 15,
+ IBV_QP_SQ_PSN = 1 << 16,
+ IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
+ IBV_QP_PATH_MIG_STATE = 1 << 18,
+ IBV_QP_CAP = 1 << 19,
+ IBV_QP_DEST_QPN = 1 << 20
};
enum ibv_qp_state {
};
enum ibv_mig_state {
- IBV_MIG_MIGRATED,
- IBV_MIG_REARM,
- IBV_MIG_ARMED
+ IBV_MIG_MIGRATED, IBV_MIG_REARM, IBV_MIG_ARMED
};
struct ibv_qp_attr {
- enum ibv_qp_state qp_state;
- enum ibv_qp_state cur_qp_state;
- enum ibv_mtu path_mtu;
- enum ibv_mig_state path_mig_state;
- uint32_t qkey;
- uint32_t rq_psn;
- uint32_t sq_psn;
- uint32_t dest_qp_num;
- int qp_access_flags;
- struct ibv_qp_cap cap;
- struct ibv_ah_attr ah_attr;
- struct ibv_ah_attr alt_ah_attr;
- uint16_t pkey_index;
- uint16_t alt_pkey_index;
- uint8_t en_sqd_async_notify;
- uint8_t sq_draining;
- uint8_t max_rd_atomic;
- uint8_t max_dest_rd_atomic;
- uint8_t min_rnr_timer;
- uint8_t port_num;
- uint8_t timeout;
- uint8_t retry_cnt;
- uint8_t rnr_retry;
- uint8_t alt_port_num;
- uint8_t alt_timeout;
+ enum ibv_qp_state qp_state;
+ enum ibv_qp_state cur_qp_state;
+ enum ibv_mtu path_mtu;
+ enum ibv_mig_state path_mig_state;
+ uint32_t qkey;
+ uint32_t rq_psn;
+ uint32_t sq_psn;
+ uint32_t dest_qp_num;
+ int qp_access_flags;
+ struct ibv_qp_cap cap;
+ struct ibv_ah_attr ah_attr;
+ struct ibv_ah_attr alt_ah_attr;
+ uint16_t pkey_index;
+ uint16_t alt_pkey_index;
+ uint8_t en_sqd_async_notify;
+ uint8_t sq_draining;
+ uint8_t max_rd_atomic;
+ uint8_t max_dest_rd_atomic;
+ uint8_t min_rnr_timer;
+ uint8_t port_num;
+ uint8_t timeout;
+ uint8_t retry_cnt;
+ uint8_t rnr_retry;
+ uint8_t alt_port_num;
+ uint8_t alt_timeout;
};
enum ibv_wr_opcode {
};
enum ibv_send_flags {
- IBV_SEND_FENCE = 1 << 0,
- IBV_SEND_SIGNALED = 1 << 1,
- IBV_SEND_SOLICITED = 1 << 2,
- IBV_SEND_INLINE = 1 << 3
+ IBV_SEND_FENCE = 1 << 0,
+ IBV_SEND_SIGNALED = 1 << 1,
+ IBV_SEND_SOLICITED = 1 << 2,
+ IBV_SEND_INLINE = 1 << 3
};
struct ibv_sge {
- uint64_t addr;
- uint32_t length;
- uint32_t lkey;
+ uint64_t addr;
+ uint32_t length;
+ uint32_t lkey;
};
struct ibv_send_wr {
- uint64_t wr_id;
- struct ibv_send_wr *next;
- struct ibv_sge *sg_list;
- int num_sge;
- enum ibv_wr_opcode opcode;
- int send_flags;
- uint32_t imm_data; /* in network byte order */
+ uint64_t wr_id;
+ struct ibv_send_wr *next;
+ struct ibv_sge *sg_list;
+ int num_sge;
+ enum ibv_wr_opcode opcode;
+ int send_flags;
+ uint32_t imm_data; /* in network byte order */
union {
struct {
- uint64_t remote_addr;
- uint32_t rkey;
+ uint64_t remote_addr;
+ uint32_t rkey;
} rdma;
struct {
- uint64_t remote_addr;
- uint64_t compare_add;
- uint64_t swap;
- uint32_t rkey;
+ uint64_t remote_addr;
+ uint64_t compare_add;
+ uint64_t swap;
+ uint32_t rkey;
} atomic;
struct {
- struct ibv_ah *ah;
- uint32_t remote_qpn;
- uint32_t remote_qkey;
+ struct ibv_ah *ah;
+ uint32_t remote_qpn;
+ uint32_t remote_qkey;
} ud;
} wr;
};
struct ibv_recv_wr {
- uint64_t wr_id;
- struct ibv_recv_wr *next;
- struct ibv_sge *sg_list;
- int num_sge;
+ uint64_t wr_id;
+ struct ibv_recv_wr *next;
+ struct ibv_sge *sg_list;
+ int num_sge;
};
struct ibv_mw_bind {
- uint64_t wr_id;
- struct ibv_mr *mr;
- void *addr;
- size_t length;
- int send_flags;
- int mw_access_flags;
+ uint64_t wr_id;
+ struct ibv_mr *mr;
+ void *addr;
+ size_t length;
+ int send_flags;
+ int mw_access_flags;
};
struct ibv_srq {
- struct ibv_context *context;
- void *srq_context;
- struct ibv_pd *pd;
- uint32_t handle;
-
- pthread_mutex_t mutex;
- pthread_cond_t cond;
- uint32_t events_completed;
+ struct ibv_context *context;
+ void *srq_context;
+ struct ibv_pd *pd;
+ uint32_t handle;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t events_completed;
};
struct ibv_qp {
- struct ibv_context *context;
- void *qp_context;
- struct ibv_pd *pd;
- struct ibv_cq *send_cq;
- struct ibv_cq *recv_cq;
- struct ibv_srq *srq;
- uint32_t handle;
- uint32_t qp_num;
- enum ibv_qp_state state;
- enum ibv_qp_type qp_type;
-
- pthread_mutex_t mutex;
- pthread_cond_t cond;
- uint32_t events_completed;
+ struct ibv_context *context;
+ void *qp_context;
+ struct ibv_pd *pd;
+ struct ibv_cq *send_cq;
+ struct ibv_cq *recv_cq;
+ struct ibv_srq *srq;
+ uint32_t handle;
+ uint32_t qp_num;
+ enum ibv_qp_state state;
+ enum ibv_qp_type qp_type;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t events_completed;
};
struct ibv_comp_channel {
- struct ibv_context *context;
- int fd;
- int refcnt;
+ struct ibv_context *context;
+ int fd;
+ int refcnt;
};
struct ibv_cq {
- struct ibv_context *context;
+ struct ibv_context *context;
struct ibv_comp_channel *channel;
- void *cq_context;
- uint32_t handle;
- int cqe;
-
- pthread_mutex_t mutex;
- pthread_cond_t cond;
- uint32_t comp_events_completed;
- uint32_t async_events_completed;
+ void *cq_context;
+ uint32_t handle;
+ int cqe;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t comp_events_completed;
+ uint32_t async_events_completed;
};
struct ibv_ah {
- struct ibv_context *context;
- struct ibv_pd *pd;
- uint32_t handle;
+ struct ibv_context *context;
+ struct ibv_pd *pd;
+ uint32_t handle;
};
struct ibv_device;
struct ibv_context;
struct ibv_device_ops {
- struct ibv_context * (*alloc_context)(struct ibv_device *device, int cmd_fd);
- void (*free_context)(struct ibv_context *context);
+ struct ibv_context * (*alloc_context)(struct ibv_device *device,
+ int cmd_fd);
+ void (*free_context)(struct ibv_context *context);
};
enum {
- IBV_SYSFS_NAME_MAX = 64,
- IBV_SYSFS_PATH_MAX = 256
+ IBV_SYSFS_NAME_MAX = 64, IBV_SYSFS_PATH_MAX = 256
};
struct ibv_device {
- struct ibv_device_ops ops;
- enum ibv_node_type node_type;
- enum ibv_transport_type transport_type;
+ struct ibv_device_ops ops;
+ enum ibv_node_type node_type;
+ enum ibv_transport_type transport_type;
/* Name of underlying kernel IB device, eg "mthca0" */
- char name[IBV_SYSFS_NAME_MAX];
+ char name[IBV_SYSFS_NAME_MAX];
/* Name of uverbs device, eg "uverbs0" */
- char dev_name[IBV_SYSFS_NAME_MAX];
+ char dev_name[IBV_SYSFS_NAME_MAX];
/* Path to infiniband_verbs class device in sysfs */
- char dev_path[IBV_SYSFS_PATH_MAX];
+ char dev_path[IBV_SYSFS_PATH_MAX];
/* Path to infiniband class device in sysfs */
- char ibdev_path[IBV_SYSFS_PATH_MAX];
+ char ibdev_path[IBV_SYSFS_PATH_MAX];
};
struct verbs_device {
struct ibv_device device; /* Must be first */
- size_t sz;
- size_t size_of_context;
- int (*init_context)(struct verbs_device *device,
- struct ibv_context *ctx, int cmd_fd);
- void (*uninit_context)(struct verbs_device *device,
- struct ibv_context *ctx);
- /* future fields added here */
+ size_t sz;
+ size_t size_of_context;
+ int (*init_context)(struct verbs_device *device,
+ struct ibv_context *ctx, int cmd_fd);
+ void (*uninit_context)(struct verbs_device *device,
+ struct ibv_context *ctx);
+/* future fields added here */
};
struct ibv_context_ops {
- int (*query_device)(struct ibv_context *context,
- struct ibv_device_attr *device_attr);
- int (*query_port)(struct ibv_context *context, uint8_t port_num,
- struct ibv_port_attr *port_attr);
- struct ibv_pd * (*alloc_pd)(struct ibv_context *context);
- int (*dealloc_pd)(struct ibv_pd *pd);
- struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
- int access);
- struct ibv_mr * (*rereg_mr)(struct ibv_mr *mr,
- int flags,
- struct ibv_pd *pd, void *addr,
- size_t length,
- int access);
- int (*dereg_mr)(struct ibv_mr *mr);
- struct ibv_mw * (*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
- int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
- struct ibv_mw_bind *mw_bind);
- int (*dealloc_mw)(struct ibv_mw *mw);
- struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
- struct ibv_comp_channel *channel,
- int comp_vector);
- int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
- int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
- void (*cq_event)(struct ibv_cq *cq);
- int (*resize_cq)(struct ibv_cq *cq, int cqe);
- int (*destroy_cq)(struct ibv_cq *cq);
- struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
- struct ibv_srq_init_attr *srq_init_attr);
- int (*modify_srq)(struct ibv_srq *srq,
- struct ibv_srq_attr *srq_attr,
- int srq_attr_mask);
- int (*query_srq)(struct ibv_srq *srq,
- struct ibv_srq_attr *srq_attr);
- int (*destroy_srq)(struct ibv_srq *srq);
- int (*post_srq_recv)(struct ibv_srq *srq,
- struct ibv_recv_wr *recv_wr,
- struct ibv_recv_wr **bad_recv_wr);
- struct ibv_qp * (*create_qp)(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
- int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- int attr_mask,
- struct ibv_qp_init_attr *init_attr);
- int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- int attr_mask);
- int (*destroy_qp)(struct ibv_qp *qp);
- int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
- struct ibv_send_wr **bad_wr);
- int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
- struct ibv_recv_wr **bad_wr);
- struct ibv_ah * (*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
- int (*destroy_ah)(struct ibv_ah *ah);
- int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
- uint16_t lid);
- int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
- uint16_t lid);
- void (*async_event)(struct ibv_async_event *event);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ struct ibv_pd * (*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
+ int access);
+ struct ibv_mr * (*rereg_mr)(struct ibv_mr *mr, int flags,
+ struct ibv_pd *pd, void *addr, size_t length,
+ int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_mw * (*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
+ int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
+ struct ibv_mw_bind *mw_bind);
+ int (*dealloc_mw)(struct ibv_mw *mw);
+ struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
+ struct ibv_comp_channel *channel, int comp_vector);
+ int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
+ int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
+ void (*cq_event)(struct ibv_cq *cq);
+ int (*resize_cq)(struct ibv_cq *cq, int cqe);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
+ struct ibv_srq_init_attr *srq_init_attr);
+ int (*modify_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask);
+ int (*query_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
+ int (*destroy_srq)(struct ibv_srq *srq);
+ int (*post_srq_recv)(struct ibv_srq *srq, struct ibv_recv_wr *recv_wr,
+ struct ibv_recv_wr **bad_recv_wr);
+ struct ibv_qp * (*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *attr);
+ int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
+ struct ibv_send_wr **bad_wr);
+ int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr);
+ struct ibv_ah * (*create_ah)(struct ibv_pd *pd,
+ struct ibv_ah_attr *attr);
+ int (*destroy_ah)(struct ibv_ah *ah);
+ int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+ int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+ void (*async_event)(struct ibv_async_event *event);
};
struct ibv_context {
- struct ibv_device *device;
- struct ibv_context_ops ops;
- int cmd_fd;
- int async_fd;
- int num_comp_vectors;
- pthread_mutex_t mutex;
- void *abi_compat;
+ struct ibv_device *device;
+ struct ibv_context_ops ops;
+ int cmd_fd;
+ int async_fd;
+ int num_comp_vectors;
+ pthread_mutex_t mutex;
+ void *abi_compat;
};
enum verbs_context_mask {
- VERBS_CONTEXT_RESERVED = 1 << 0
+ VERBS_CONTEXT_XRCD = 1 << 0,
+ VERBS_CONTEXT_RESERVED = 1 << 1
};
struct verbs_context {
/* "grows up" - new fields go here */
+ struct ibv_xrcd * (*open_xrcd)(struct ibv_context *context,
+ struct ibv_xrcd_init_attr *xrcd_init_attr);
+ int (*close_xrcd)(struct ibv_xrcd *xrcd);
uint64_t has_comp_mask;
size_t sz; /* Must be immediately before struct ibv_context */
struct ibv_context context;/* Must be last field in the struct */
};
-static inline struct verbs_context *verbs_get_ctx(
- const struct ibv_context *ctx)
-{
+static inline struct verbs_context *verbs_get_ctx(const struct ibv_context *ctx) {
return (ctx->abi_compat != ((uint8_t *) NULL) - 1) ?
- NULL : container_of(ctx, struct verbs_context, context);
+ NULL : container_of(ctx, struct verbs_context, context);
}
+#define verbs_get_ctx_op(ctx, op) ({ \
+ struct verbs_context *vctx = verbs_get_ctx(ctx); \
+ (!vctx || (vctx->sz < sizeof(*vctx) - offsetof(struct verbs_context, op)) || \
+ !vctx->op) ? NULL : vctx; })
+
static inline struct verbs_device *verbs_get_device(
- const struct ibv_device *dev)
-{
+ const struct ibv_device *dev) {
return (dev->ops.alloc_context) ?
- NULL : container_of(dev, struct verbs_device, device);
+ NULL : container_of(dev, struct verbs_device, device);
}
/**
* be acknowledged with ibv_ack_async_event().
*/
int ibv_get_async_event(struct ibv_context *context,
- struct ibv_async_event *event);
+ struct ibv_async_event *event);
/**
* ibv_ack_async_event - Acknowledge an async event
* ibv_query_device - Get device properties
*/
int ibv_query_device(struct ibv_context *context,
- struct ibv_device_attr *device_attr);
+ struct ibv_device_attr *device_attr);
/**
* ibv_query_port - Get port properties
*/
int ibv_query_port(struct ibv_context *context, uint8_t port_num,
- struct ibv_port_attr *port_attr);
+ struct ibv_port_attr *port_attr);
static inline int ___ibv_query_port(struct ibv_context *context,
- uint8_t port_num,
- struct ibv_port_attr *port_attr)
-{
- /* For compatability when running with old libibverbs */
+ uint8_t port_num, struct ibv_port_attr *port_attr) {
+ /* For compatibility when running with old libibverbs */
port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
- port_attr->reserved = 0;
+ port_attr->reserved = 0;
return ibv_query_port(context, port_num, port_attr);
}
/**
* ibv_query_gid - Get a GID table entry
*/
-int ibv_query_gid(struct ibv_context *context, uint8_t port_num,
- int index, union ibv_gid *gid);
+int ibv_query_gid(struct ibv_context *context, uint8_t port_num, int index,
+ union ibv_gid *gid);
/**
* ibv_query_pkey - Get a P_Key table entry
*/
-int ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
- int index, uint16_t *pkey);
+int ibv_query_pkey(struct ibv_context *context, uint8_t port_num, int index,
+ uint16_t *pkey);
/**
* ibv_alloc_pd - Allocate a protection domain
*/
int ibv_dealloc_pd(struct ibv_pd *pd);
+/**
+ * ibv_open_xrcd - Open an extended connection domain
+ */
+static inline struct ibv_xrcd *
+ibv_open_xrcd(struct ibv_context *context, struct ibv_xrcd_init_attr *xrcd_init_attr) {
+ struct verbs_context *vctx = verbs_get_ctx_op(context, open_xrcd);
+ if (!vctx) {
+ errno = ENOSYS;
+ return NULL;
+ }
+ return vctx->open_xrcd(context, xrcd_init_attr);
+}
+
+/**
+ * ibv_close_xrcd - Close an extended connection domain
+ */
+static inline int ibv_close_xrcd(struct ibv_xrcd *xrcd) {
+ struct verbs_context *vctx = verbs_get_ctx(xrcd->context);
+ return vctx->close_xrcd(xrcd);
+}
+
/**
* ibv_reg_mr - Register a memory region
*/
-struct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr,
- size_t length, int access);
+struct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+ int access);
/**
* ibv_dereg_mr - Deregister a memory region
* Must be >= 0 and < context->num_comp_vectors.
*/
struct ibv_cq *ibv_create_cq(struct ibv_context *context, int cqe,
- void *cq_context,
- struct ibv_comp_channel *channel,
- int comp_vector);
+ void *cq_context, struct ibv_comp_channel *channel,
+ int comp_vector);
/**
* ibv_resize_cq - Modifies the capacity of the CQ.
* All completion events returned by ibv_get_cq_event() must
* eventually be acknowledged with ibv_ack_cq_events().
*/
-int ibv_get_cq_event(struct ibv_comp_channel *channel,
- struct ibv_cq **cq, void **cq_context);
+int ibv_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context);
/**
* ibv_ack_cq_events - Acknowledge CQ completion events
* non-negative and strictly less than num_entries, then the CQ was
* emptied.
*/
-static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
-{
+static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries,
+ struct ibv_wc *wc) {
return cq->context->ops.poll_cq(cq, num_entries, wc);
}
* the next solicited CQ entry. If zero, any CQ entry, solicited or
* not, will generate an event.
*/
-static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
-{
+static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only) {
return cq->context->ops.req_notify_cq(cq, solicited_only);
}
* will always be at least as large as the requested values.
*/
struct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
- struct ibv_srq_init_attr *srq_init_attr);
+ struct ibv_srq_init_attr *srq_init_attr);
/**
* ibv_modify_srq - Modifies the attributes for the specified SRQ.
* IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
* the number of receives queued drops below the limit.
*/
-int ibv_modify_srq(struct ibv_srq *srq,
- struct ibv_srq_attr *srq_attr,
- int srq_attr_mask);
+int ibv_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask);
/**
* ibv_query_srq - Returns the attribute list and current values for the
* the work request that failed to be posted on the QP.
*/
static inline int ibv_post_srq_recv(struct ibv_srq *srq,
- struct ibv_recv_wr *recv_wr,
- struct ibv_recv_wr **bad_recv_wr)
-{
+ struct ibv_recv_wr *recv_wr, struct ibv_recv_wr **bad_recv_wr) {
return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
}
* ibv_create_qp - Create a queue pair.
*/
struct ibv_qp *ibv_create_qp(struct ibv_pd *pd,
- struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp_init_attr *qp_init_attr);
/**
* ibv_modify_qp - Modify a queue pair.
*/
-int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- int attr_mask);
+int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask);
/**
* ibv_query_qp - Returns the attribute list and current values for the
* The qp_attr_mask may be used to limit the query to gathering only the
* selected attributes.
*/
-int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- int attr_mask,
- struct ibv_qp_init_attr *init_attr);
+int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr);
/**
* ibv_destroy_qp - Destroy a queue pair.
* immediately after the call returns.
*/
static inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
- struct ibv_send_wr **bad_wr)
-{
+ struct ibv_send_wr **bad_wr) {
return qp->context->ops.post_send(qp, wr, bad_wr);
}
* ibv_post_recv - Post a list of work requests to a receive queue.
*/
static inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
- struct ibv_recv_wr **bad_wr)
-{
+ struct ibv_recv_wr **bad_wr) {
return qp->context->ops.post_recv(qp, wr, bad_wr);
}
* handle for replying to the message.
*/
int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
- struct ibv_wc *wc, struct ibv_grh *grh,
- struct ibv_ah_attr *ah_attr);
+ struct ibv_wc *wc, struct ibv_grh *grh,
+ struct ibv_ah_attr *ah_attr);
/**
* ibv_create_ah_from_wc - Creates an address handle associated with the
* in all UD QP post sends.
*/
struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
- struct ibv_grh *grh, uint8_t port_num);
+ struct ibv_grh *grh, uint8_t port_num);
/**
* ibv_destroy_ah - Destroy an address handle.
# undef __attribute_const
-
#endif /* INFINIBAND_VERBS_H */