From cca15ab7e2a5fa3fcdaf8f3dd2f9ecb57646720d Mon Sep 17 00:00:00 2001 From: shefty Date: Wed, 23 Jul 2008 20:14:05 +0000 Subject: [PATCH] uvp/interface: add support for reporting QP context in WCs Extend the uvp_interface to include support for reporting the QP context in work completions. The required changes are to allow specifying a qp_context in the pre_create_qp() IOCTL call, and reporting the qp_context through the work completion structure. Signed-off-by: Sean Hefty git-svn-id: svn://openib.tc.cornell.edu/gen1@1436 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- trunk/hw/mlx4/user/hca/mlx4.c | 655 +++++++++--------- trunk/hw/mlx4/user/hca/verbs.c | 19 + trunk/hw/mlx4/user/hca/verbs.h | 898 ++++++++++++------------ trunk/hw/mthca/user/mlnx_ual_qp.c | 28 +- trunk/hw/mthca/user/mlnx_uvp_verbs.h | 981 ++++++++++++++------------- trunk/inc/user/iba/ib_uvp.h | 21 +- 6 files changed, 1332 insertions(+), 1270 deletions(-) diff --git a/trunk/hw/mlx4/user/hca/mlx4.c b/trunk/hw/mlx4/user/hca/mlx4.c index e8e85b20..bb5c71e3 100644 --- a/trunk/hw/mlx4/user/hca/mlx4.c +++ b/trunk/hw/mlx4/user/hca/mlx4.c @@ -1,329 +1,326 @@ -/* - * Copyright (c) 2007 Cisco, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "mlx4.h" -#include "mx_abi.h" - -#ifndef PCI_VENDOR_ID_MELLANOX -#define PCI_VENDOR_ID_MELLANOX 0x15b3 -#endif - -#define HCA(v, d) \ - {PCI_VENDOR_ID_##v, \ - d } - -struct { - unsigned vendor; - unsigned device; -} hca_table[] = { - HCA(MELLANOX, 0x6340), /* MT25408 "Hermon" SDR */ - HCA(MELLANOX, 0x634a), /* MT25408 "Hermon" DDR */ - HCA(MELLANOX, 0x6354), /* MT25408 "Hermon" QDR */ - HCA(MELLANOX, 0x6732), /* MT25408 "Hermon" DDR PCIe gen2 */ - HCA(MELLANOX, 0x673c), /* MT25408 "Hermon" QDR PCIe gen2 */ - HCA(MELLANOX, 0x0191), /* MT25408 "Hermon" livefish mode */ -}; - - -struct ibv_context * mlx4_alloc_context() -{ - struct mlx4_context *context; - - /* allocate context */ - context = cl_zalloc(sizeof *context); - if (!context) - goto end; - - context->qp_table_mutex = CreateMutex(NULL, FALSE, NULL); - if (!context->qp_table_mutex) - goto err_qp_mutex; - -#ifdef XRC_SUPPORT - context->xrc_srq_table_mutex = CreateMutex(NULL, FALSE, NULL); - if (!context->xrc_srq_table_mutex) - goto err_xrc_mutex; -#endif - - context->db_list_mutex = CreateMutex(NULL, FALSE, NULL); - if (!context->db_list_mutex) - goto err_db_mutex; - - context->ibv_ctx.mutex = CreateMutex(NULL, FALSE, NULL); - if (!context->ibv_ctx.mutex) - goto err_ctx_mutex; - - if (cl_spinlock_init(&context->uar_lock)) - goto err_uar_spinlock; - - if (cl_spinlock_init(&context->bf_lock)) - goto err_bf_spinlock; - - return &context->ibv_ctx; - -err_bf_spinlock: - cl_spinlock_destroy(&context->uar_lock); -err_uar_spinlock: - CloseHandle(context->ibv_ctx.mutex); -err_ctx_mutex: - CloseHandle(context->db_list_mutex); -err_db_mutex: -#ifdef XRC_SUPPORT - CloseHandle(context->xrc_srq_table_mutex); -err_xrc_mutex: -#endif - CloseHandle(context->qp_table_mutex); -err_qp_mutex: - cl_free(context); -end: - return NULL; - -} - -struct ibv_context * mlx4_fill_context(struct ibv_context *ctx, struct ibv_get_context_resp *p_resp) -{ - struct mlx4_context *context = to_mctx(ctx); - SYSTEM_INFO sys_info; - int i; - - /* check device type */ - for (i = 0; i < sizeof hca_table / sizeof hca_table[0]; ++i) - if (p_resp->vend_id == hca_table[i].vendor && - p_resp->dev_id == hca_table[i].device) - goto found; - goto err_dev_type; - -found: - context->num_qps = p_resp->qp_tab_size; - context->qp_table_shift = ffsl(context->num_qps) - 1 - MLX4_QP_TABLE_BITS; - context->qp_table_mask = (1 << context->qp_table_shift) - 1; - - for (i = 0; i < MLX4_QP_TABLE_SIZE; ++i) - context->qp_table[i].refcnt = 0; - -#ifdef XRC_SUPPORT - context->num_xrc_srqs = p_resp->qp_tab_size; - context->xrc_srq_table_shift = ffsl(context->num_xrc_srqs) - 1 - - MLX4_XRC_SRQ_TABLE_BITS; - context->xrc_srq_table_mask = (1 << context->xrc_srq_table_shift) - 1; - - for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i) - context->xrc_srq_table[i].refcnt = 0; -#endif - - for (i = 0; i < MLX4_NUM_DB_TYPE; ++i) - context->db_list[i] = NULL; - - context->uar = (uint8_t *)(uintptr_t)p_resp->uar_addr; - context->bf_page = (uint8_t *)(uintptr_t)p_resp->bf_page; - context->bf_buf_size = p_resp->bf_buf_size; - context->bf_offset = p_resp->bf_offset; - - context->max_qp_wr = p_resp->max_qp_wr; - context->max_sge = p_resp->max_sge; - context->max_cqe = p_resp->max_cqe; - - GetSystemInfo(&sys_info); - context->ibv_ctx.page_size = sys_info.dwPageSize; - context->ibv_ctx.p_hca_attr = NULL; - - return &context->ibv_ctx; - -err_dev_type: - mlx4_free_context(&context->ibv_ctx); - return NULL; -} - -void mlx4_free_context(struct ibv_context *ctx) -{ - struct mlx4_context *context = to_mctx(ctx); - - cl_spinlock_destroy(&context->bf_lock); - cl_spinlock_destroy(&context->uar_lock); - CloseHandle(context->ibv_ctx.mutex); - CloseHandle(context->db_list_mutex); -#ifdef XRC_SUPPORT - CloseHandle(context->xrc_srq_table_mutex); -#endif - CloseHandle(context->qp_table_mutex); - if (context->ibv_ctx.p_hca_attr) - cl_free(context->ibv_ctx.p_hca_attr); - cl_free(context); -} - -__declspec(dllexport) ib_api_status_t -uvp_get_interface ( - IN OUT uvp_interface_t *p_uvp ) -{ - CL_ASSERT(p_uvp); - - /* - * Version of the header file this interface export can handle - */ - p_uvp->version = 0x101; - p_uvp->guid = 0x12345678; - - - /* - * CA Management - */ - p_uvp->pre_open_ca = mlx4_pre_open_ca; - p_uvp->post_open_ca = mlx4_post_open_ca; - p_uvp->pre_query_ca = mlx4_pre_query_ca; - p_uvp->post_query_ca = mlx4_post_query_ca; - p_uvp->pre_modify_ca = NULL; - p_uvp->post_modify_ca = NULL; - p_uvp->pre_close_ca = NULL; - p_uvp->post_close_ca = mlx4_post_close_ca; - - - /* - * Protection Domain - */ - p_uvp->pre_allocate_pd = mlx4_pre_alloc_pd; - p_uvp->post_allocate_pd = mlx4_post_alloc_pd; - p_uvp->pre_deallocate_pd = NULL; - p_uvp->post_deallocate_pd = mlx4_post_free_pd; - - - /* - * SRQ Management Verbs - */ - p_uvp->pre_create_srq = mlx4_pre_create_srq; - p_uvp->post_create_srq = mlx4_post_create_srq; - p_uvp->pre_query_srq = NULL; - p_uvp->post_query_srq = NULL; - p_uvp->pre_modify_srq = NULL; - p_uvp->post_modify_srq = NULL; - p_uvp->pre_destroy_srq = NULL; - p_uvp->post_destroy_srq = mlx4_post_destroy_srq; - - - /* - * QP Management Verbs - */ - p_uvp->pre_create_qp = mlx4_pre_create_qp; - p_uvp->post_create_qp = mlx4_post_create_qp; - p_uvp->pre_modify_qp = mlx4_pre_modify_qp; - p_uvp->post_modify_qp = mlx4_post_modify_qp; - p_uvp->pre_query_qp = NULL; - p_uvp->post_query_qp = mlx4_post_query_qp; - p_uvp->pre_destroy_qp = mlx4_pre_destroy_qp; - p_uvp->post_destroy_qp = mlx4_post_destroy_qp; - p_uvp->nd_modify_qp = mlx4_nd_modify_qp; - p_uvp->nd_get_qp_state = mlx4_nd_get_qp_state; - - - /* - * Completion Queue Management Verbs - */ - p_uvp->pre_create_cq = mlx4_pre_create_cq; - p_uvp->post_create_cq = mlx4_post_create_cq; - p_uvp->pre_query_cq = mlx4_pre_query_cq; - p_uvp->post_query_cq = NULL; - p_uvp->pre_resize_cq = NULL; - p_uvp->post_resize_cq = NULL; - p_uvp->pre_destroy_cq = NULL; - p_uvp->post_destroy_cq = mlx4_post_destroy_cq; - - - /* - * AV Management - */ - p_uvp->pre_create_av = mlx4_pre_create_ah; - p_uvp->post_create_av = NULL; - p_uvp->pre_query_av = mlx4_pre_query_ah; - p_uvp->post_query_av = mlx4_post_query_ah; - p_uvp->pre_modify_av = mlx4_pre_modify_ah; - p_uvp->post_modify_av = NULL; - p_uvp->pre_destroy_av = mlx4_pre_destroy_ah; - p_uvp->post_destroy_av = NULL; - - - /* - * Memory Region / Window Management Verbs - */ - p_uvp->pre_create_mw = NULL; - p_uvp->post_create_mw = NULL; - p_uvp->pre_query_mw = NULL; - p_uvp->post_query_mw = NULL; - p_uvp->pre_destroy_mw = NULL; - p_uvp->post_destroy_mw = NULL; - - - /* - * Multicast Support Verbs - */ - p_uvp->pre_attach_mcast = NULL; - p_uvp->post_attach_mcast = NULL; - p_uvp->pre_detach_mcast = NULL; - p_uvp->post_detach_mcast = NULL; - - - /* - * OS bypass (send, receive, poll/notify cq) - */ - p_uvp->post_send = mlx4_post_send; - p_uvp->post_recv = mlx4_post_recv; - p_uvp->post_srq_recv = mlx4_post_srq_recv; - p_uvp->poll_cq = mlx4_poll_cq; - p_uvp->rearm_cq = mlx4_arm_cq; - p_uvp->rearm_n_cq = NULL; /* __enable_ncomp_cq_notify: Not implemented */; - p_uvp->peek_cq = NULL; /* __peek_cq: Not implemented */ - p_uvp->bind_mw = NULL; /* __bind_mw: Not implemented */ - -#ifdef XRC_SUPPORT - /* - * XRC Management Verbs - */ - p_uvp->pre_create_xrc_srq = mlx4_pre_create_xrc_srq; - p_uvp->post_create_xrc_srq = mlx4_post_create_xrc_srq; - p_uvp->pre_open_xrc_domain = mlx4_pre_open_xrc_domain; - p_uvp->post_open_xrc_domain = mlx4_post_open_xrc_domain; - p_uvp->pre_close_xrc_domain = NULL; - p_uvp->post_close_xrc_domain = mlx4_post_close_xrc_domain; - p_uvp->pre_create_xrc_rcv_qp = NULL; - p_uvp->post_create_xrc_rcv_qp = NULL; - p_uvp->pre_modify_xrc_rcv_qp = NULL; - p_uvp->post_modify_xrc_rcv_qp = NULL; - p_uvp->pre_query_xrc_rcv_qp = NULL; - p_uvp->post_query_xrc_rcv_qp = NULL; - p_uvp->pre_reg_xrc_rcv_qp = NULL; - p_uvp->post_reg_xrc_rcv_qp = NULL; - p_uvp->pre_unreg_xrc_rcv_qp = NULL; - p_uvp->post_unreg_xrc_rcv_qp = NULL; -#endif - - return IB_SUCCESS; -} - - +/* + * Copyright (c) 2007 Cisco, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "mlx4.h" +#include "mx_abi.h" + +#ifndef PCI_VENDOR_ID_MELLANOX +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#endif + +#define HCA(v, d) \ + {PCI_VENDOR_ID_##v, \ + d } + +struct { + unsigned vendor; + unsigned device; +} hca_table[] = { + HCA(MELLANOX, 0x6340), /* MT25408 "Hermon" SDR */ + HCA(MELLANOX, 0x634a), /* MT25408 "Hermon" DDR */ + HCA(MELLANOX, 0x6354), /* MT25408 "Hermon" QDR */ + HCA(MELLANOX, 0x6732), /* MT25408 "Hermon" DDR PCIe gen2 */ + HCA(MELLANOX, 0x673c), /* MT25408 "Hermon" QDR PCIe gen2 */ + HCA(MELLANOX, 0x0191), /* MT25408 "Hermon" livefish mode */ +}; + + +struct ibv_context * mlx4_alloc_context() +{ + struct mlx4_context *context; + + /* allocate context */ + context = cl_zalloc(sizeof *context); + if (!context) + goto end; + + context->qp_table_mutex = CreateMutex(NULL, FALSE, NULL); + if (!context->qp_table_mutex) + goto err_qp_mutex; + +#ifdef XRC_SUPPORT + context->xrc_srq_table_mutex = CreateMutex(NULL, FALSE, NULL); + if (!context->xrc_srq_table_mutex) + goto err_xrc_mutex; +#endif + + context->db_list_mutex = CreateMutex(NULL, FALSE, NULL); + if (!context->db_list_mutex) + goto err_db_mutex; + + context->ibv_ctx.mutex = CreateMutex(NULL, FALSE, NULL); + if (!context->ibv_ctx.mutex) + goto err_ctx_mutex; + + if (cl_spinlock_init(&context->uar_lock)) + goto err_uar_spinlock; + + if (cl_spinlock_init(&context->bf_lock)) + goto err_bf_spinlock; + + return &context->ibv_ctx; + +err_bf_spinlock: + cl_spinlock_destroy(&context->uar_lock); +err_uar_spinlock: + CloseHandle(context->ibv_ctx.mutex); +err_ctx_mutex: + CloseHandle(context->db_list_mutex); +err_db_mutex: +#ifdef XRC_SUPPORT + CloseHandle(context->xrc_srq_table_mutex); +err_xrc_mutex: +#endif + CloseHandle(context->qp_table_mutex); +err_qp_mutex: + cl_free(context); +end: + return NULL; + +} + +struct ibv_context * mlx4_fill_context(struct ibv_context *ctx, struct ibv_get_context_resp *p_resp) +{ + struct mlx4_context *context = to_mctx(ctx); + SYSTEM_INFO sys_info; + int i; + + /* check device type */ + for (i = 0; i < sizeof hca_table / sizeof hca_table[0]; ++i) + if (p_resp->vend_id == hca_table[i].vendor && + p_resp->dev_id == hca_table[i].device) + goto found; + goto err_dev_type; + +found: + context->num_qps = p_resp->qp_tab_size; + context->qp_table_shift = ffsl(context->num_qps) - 1 - MLX4_QP_TABLE_BITS; + context->qp_table_mask = (1 << context->qp_table_shift) - 1; + + for (i = 0; i < MLX4_QP_TABLE_SIZE; ++i) + context->qp_table[i].refcnt = 0; + +#ifdef XRC_SUPPORT + context->num_xrc_srqs = p_resp->qp_tab_size; + context->xrc_srq_table_shift = ffsl(context->num_xrc_srqs) - 1 + - MLX4_XRC_SRQ_TABLE_BITS; + context->xrc_srq_table_mask = (1 << context->xrc_srq_table_shift) - 1; + + for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i) + context->xrc_srq_table[i].refcnt = 0; +#endif + + for (i = 0; i < MLX4_NUM_DB_TYPE; ++i) + context->db_list[i] = NULL; + + context->uar = (uint8_t *)(uintptr_t)p_resp->uar_addr; + context->bf_page = (uint8_t *)(uintptr_t)p_resp->bf_page; + context->bf_buf_size = p_resp->bf_buf_size; + context->bf_offset = p_resp->bf_offset; + + context->max_qp_wr = p_resp->max_qp_wr; + context->max_sge = p_resp->max_sge; + context->max_cqe = p_resp->max_cqe; + + GetSystemInfo(&sys_info); + context->ibv_ctx.page_size = sys_info.dwPageSize; + context->ibv_ctx.p_hca_attr = NULL; + + return &context->ibv_ctx; + +err_dev_type: + mlx4_free_context(&context->ibv_ctx); + return NULL; +} + +void mlx4_free_context(struct ibv_context *ctx) +{ + struct mlx4_context *context = to_mctx(ctx); + + cl_spinlock_destroy(&context->bf_lock); + cl_spinlock_destroy(&context->uar_lock); + CloseHandle(context->ibv_ctx.mutex); + CloseHandle(context->db_list_mutex); +#ifdef XRC_SUPPORT + CloseHandle(context->xrc_srq_table_mutex); +#endif + CloseHandle(context->qp_table_mutex); + if (context->ibv_ctx.p_hca_attr) + cl_free(context->ibv_ctx.p_hca_attr); + cl_free(context); +} + +__declspec(dllexport) ib_api_status_t +uvp_get_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + CL_ASSERT(p_uvp); + + /* + * Version of the header file this interface export can handle + */ + p_uvp->version = 0x101; + p_uvp->guid = 0x12345678; + + + /* + * CA Management + */ + p_uvp->pre_open_ca = mlx4_pre_open_ca; + p_uvp->post_open_ca = mlx4_post_open_ca; + p_uvp->pre_query_ca = mlx4_pre_query_ca; + p_uvp->post_query_ca = mlx4_post_query_ca; + p_uvp->pre_modify_ca = NULL; + p_uvp->post_modify_ca = NULL; + p_uvp->pre_close_ca = NULL; + p_uvp->post_close_ca = mlx4_post_close_ca; + + + /* + * Protection Domain + */ + p_uvp->pre_allocate_pd = mlx4_pre_alloc_pd; + p_uvp->post_allocate_pd = mlx4_post_alloc_pd; + p_uvp->pre_deallocate_pd = NULL; + p_uvp->post_deallocate_pd = mlx4_post_free_pd; + + + /* + * SRQ Management Verbs + */ + p_uvp->pre_create_srq = mlx4_pre_create_srq; + p_uvp->post_create_srq = mlx4_post_create_srq; + p_uvp->pre_query_srq = NULL; + p_uvp->post_query_srq = NULL; + p_uvp->pre_modify_srq = NULL; + p_uvp->post_modify_srq = NULL; + p_uvp->pre_destroy_srq = NULL; + p_uvp->post_destroy_srq = mlx4_post_destroy_srq; + + p_uvp->pre_create_qp = mlx4_pre_create_qp; + p_uvp->wv_pre_create_qp = mlx4_wv_pre_create_qp; + p_uvp->post_create_qp = mlx4_post_create_qp; + p_uvp->pre_modify_qp = mlx4_pre_modify_qp; + p_uvp->post_modify_qp = mlx4_post_modify_qp; + p_uvp->pre_query_qp = NULL; + p_uvp->post_query_qp = mlx4_post_query_qp; + p_uvp->pre_destroy_qp = mlx4_pre_destroy_qp; + p_uvp->post_destroy_qp = mlx4_post_destroy_qp; + p_uvp->nd_modify_qp = mlx4_nd_modify_qp; + p_uvp->nd_get_qp_state = mlx4_nd_get_qp_state; + + + /* + * Completion Queue Management Verbs + */ + p_uvp->pre_create_cq = mlx4_pre_create_cq; + p_uvp->post_create_cq = mlx4_post_create_cq; + p_uvp->pre_query_cq = mlx4_pre_query_cq; + p_uvp->post_query_cq = NULL; + p_uvp->pre_resize_cq = NULL; + p_uvp->post_resize_cq = NULL; + p_uvp->pre_destroy_cq = NULL; + p_uvp->post_destroy_cq = mlx4_post_destroy_cq; + + + /* + * AV Management + */ + p_uvp->pre_create_av = mlx4_pre_create_ah; + p_uvp->post_create_av = NULL; + p_uvp->pre_query_av = mlx4_pre_query_ah; + p_uvp->post_query_av = mlx4_post_query_ah; + p_uvp->pre_modify_av = mlx4_pre_modify_ah; + p_uvp->post_modify_av = NULL; + p_uvp->pre_destroy_av = mlx4_pre_destroy_ah; + p_uvp->post_destroy_av = NULL; + + + /* + * Memory Region / Window Management Verbs + */ + p_uvp->pre_create_mw = NULL; + p_uvp->post_create_mw = NULL; + p_uvp->pre_query_mw = NULL; + p_uvp->post_query_mw = NULL; + p_uvp->pre_destroy_mw = NULL; + p_uvp->post_destroy_mw = NULL; + + + /* + * Multicast Support Verbs + */ + p_uvp->pre_attach_mcast = NULL; + p_uvp->post_attach_mcast = NULL; + p_uvp->pre_detach_mcast = NULL; + p_uvp->post_detach_mcast = NULL; + + + /* + * OS bypass (send, receive, poll/notify cq) + */ + p_uvp->post_send = mlx4_post_send; + p_uvp->post_recv = mlx4_post_recv; + p_uvp->post_srq_recv = mlx4_post_srq_recv; + p_uvp->poll_cq = mlx4_poll_cq; + p_uvp->rearm_cq = mlx4_arm_cq; + p_uvp->rearm_n_cq = NULL; /* __enable_ncomp_cq_notify: Not implemented */; + p_uvp->peek_cq = NULL; /* __peek_cq: Not implemented */ + p_uvp->bind_mw = NULL; /* __bind_mw: Not implemented */ + +#ifdef XRC_SUPPORT + /* + * XRC Management Verbs + */ + p_uvp->pre_create_xrc_srq = mlx4_pre_create_xrc_srq; + p_uvp->post_create_xrc_srq = mlx4_post_create_xrc_srq; + p_uvp->pre_open_xrc_domain = mlx4_pre_open_xrc_domain; + p_uvp->post_open_xrc_domain = mlx4_post_open_xrc_domain; + p_uvp->pre_close_xrc_domain = NULL; + p_uvp->post_close_xrc_domain = mlx4_post_close_xrc_domain; + p_uvp->pre_create_xrc_rcv_qp = NULL; + p_uvp->post_create_xrc_rcv_qp = NULL; + p_uvp->pre_modify_xrc_rcv_qp = NULL; + p_uvp->post_modify_xrc_rcv_qp = NULL; + p_uvp->pre_query_xrc_rcv_qp = NULL; + p_uvp->post_query_xrc_rcv_qp = NULL; + p_uvp->pre_reg_xrc_rcv_qp = NULL; + p_uvp->post_reg_xrc_rcv_qp = NULL; + p_uvp->pre_unreg_xrc_rcv_qp = NULL; + p_uvp->post_unreg_xrc_rcv_qp = NULL; +#endif + + return IB_SUCCESS; +} + + diff --git a/trunk/hw/mlx4/user/hca/verbs.c b/trunk/hw/mlx4/user/hca/verbs.c index 09205426..df46f5d0 100644 --- a/trunk/hw/mlx4/user/hca/verbs.c +++ b/trunk/hw/mlx4/user/hca/verbs.c @@ -848,6 +848,25 @@ end: return status; } +ib_api_status_t +mlx4_wv_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const uvp_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp ) +{ + struct mlx4_qp *qp; + ib_api_status_t status; + + status = mlx4_pre_create_qp(h_uvp_pd, &p_create_attr->qp_create, + p_umv_buf, ph_uvp_qp); + if (status == IB_SUCCESS) { + qp = (struct mlx4_qp *) *ph_uvp_qp; + qp->ibv_qp.qp_context = p_create_attr->context; + } + return status; +} + ib_api_status_t mlx4_post_create_qp ( IN const ib_pd_handle_t h_uvp_pd, diff --git a/trunk/hw/mlx4/user/hca/verbs.h b/trunk/hw/mlx4/user/hca/verbs.h index 30bec82c..fbef971c 100644 --- a/trunk/hw/mlx4/user/hca/verbs.h +++ b/trunk/hw/mlx4/user/hca/verbs.h @@ -1,445 +1,453 @@ -/* - * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. - * Copyright (c) 2005 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef INFINIBAND_VERBS_H -#define INFINIBAND_VERBS_H - -#include "l2w.h" - - -#ifdef __cplusplus -# define BEGIN_C_DECLS extern "C" { -# define END_C_DECLS } -#else /* !__cplusplus */ -# define BEGIN_C_DECLS -# define END_C_DECLS -#endif /* __cplusplus */ - -BEGIN_C_DECLS - -union ibv_gid { - uint8_t raw[16]; - struct { - uint64_t subnet_prefix; - uint64_t interface_id; - } global; -}; - -enum ibv_rate { - IBV_RATE_MAX = 0, - IBV_RATE_2_5_GBPS = 2, - IBV_RATE_5_GBPS = 5, - IBV_RATE_10_GBPS = 3, - IBV_RATE_20_GBPS = 6, - IBV_RATE_30_GBPS = 4, - IBV_RATE_40_GBPS = 7, - IBV_RATE_60_GBPS = 8, - IBV_RATE_80_GBPS = 9, - IBV_RATE_120_GBPS = 10 -}; - -struct ibv_global_route { - union ibv_gid dgid; - uint32_t flow_label; - uint8_t sgid_index; - uint8_t hop_limit; - uint8_t traffic_class; -}; - -struct ibv_grh { - uint32_t version_tclass_flow; - uint16_t paylen; - uint8_t next_hdr; - uint8_t hop_limit; - union ibv_gid sgid; - union ibv_gid dgid; -}; - -struct ibv_ah_attr { - struct ibv_global_route grh; - uint16_t dlid; - uint8_t sl; - uint8_t src_path_bits; - uint8_t static_rate; - uint8_t is_global; - uint8_t port_num; -}; - -struct ibv_xrc_domain { - struct ibv_context *context; - uint64_t handle; -}; - -struct ibv_srq_attr { - uint32_t max_wr; - uint32_t max_sge; - uint32_t srq_limit; -}; - -enum ibv_qp_type { - IBV_QPT_RC = 2, - IBV_QPT_UC, - IBV_QPT_UD, - IBV_QPT_XRC -}; - -struct ibv_qp_cap { - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t max_recv_sge; - uint32_t max_inline_data; -}; - -struct ibv_qp_init_attr { - void *qp_context; - struct ibv_cq *send_cq; - struct ibv_cq *recv_cq; - struct ibv_srq *srq; - struct ibv_qp_cap cap; - enum ibv_qp_type qp_type; - int sq_sig_all; - struct ibv_xrc_domain *xrc_domain; -}; - -enum ibv_qp_attr_mask { - IBV_QP_STATE = 1 << 0, - IBV_QP_CUR_STATE = 1 << 1, - IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, - IBV_QP_ACCESS_FLAGS = 1 << 3, - IBV_QP_PKEY_INDEX = 1 << 4, - IBV_QP_PORT = 1 << 5, - IBV_QP_QKEY = 1 << 6, - IBV_QP_AV = 1 << 7, - IBV_QP_PATH_MTU = 1 << 8, - IBV_QP_TIMEOUT = 1 << 9, - IBV_QP_RETRY_CNT = 1 << 10, - IBV_QP_RNR_RETRY = 1 << 11, - IBV_QP_RQ_PSN = 1 << 12, - IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13, - IBV_QP_ALT_PATH = 1 << 14, - IBV_QP_MIN_RNR_TIMER = 1 << 15, - IBV_QP_SQ_PSN = 1 << 16, - IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17, - IBV_QP_PATH_MIG_STATE = 1 << 18, - IBV_QP_CAP = 1 << 19, - IBV_QP_DEST_QPN = 1 << 20 -}; - -enum ibv_qp_state { - IBV_QPS_RESET, - IBV_QPS_INIT, - IBV_QPS_RTR, - IBV_QPS_RTS, - IBV_QPS_SQD, - IBV_QPS_SQE, - IBV_QPS_ERR -}; - -struct ibv_pd { - struct ibv_context *context; - uint64_t handle; -}; - -struct ibv_srq { - struct ibv_context *context; - struct ibv_pd *pd; - uint64_t handle; - uint32_t xrc_srq_num; - struct ibv_xrc_domain *xrc_domain; - struct ibv_cq *xrc_cq; -}; - -struct ibv_qp { - struct ibv_context *context; - struct ibv_pd *pd; - struct ibv_cq *send_cq; - struct ibv_cq *recv_cq; - struct ibv_srq *srq; - uint64_t handle; - uint32_t qp_num; - enum ibv_qp_state state; - enum ibv_qp_type qp_type; - struct ibv_xrc_domain *xrc_domain; -}; - -struct ibv_cq { - struct ibv_context *context; - uint64_t handle; - int cqe; -}; - -struct ibv_ah { - struct ibv_context *context; - struct ibv_pd *pd; - ib_av_attr_t av_attr; -}; - -struct ibv_context { - ib_ca_attr_t *p_hca_attr; - int page_size; - pthread_mutex_t mutex; -}; - - -/************* CA operations *************************/ -ib_api_status_t -mlx4_pre_open_ca ( - IN const ib_net64_t ca_guid, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_ca_handle_t *ph_uvp_ca ); - -ib_api_status_t -mlx4_post_open_ca ( - IN const ib_net64_t ca_guid, - IN ib_api_status_t ioctl_status, - IN OUT ib_ca_handle_t *ph_uvp_ca, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_query_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_ca_attr_t *p_ca_attr, - IN size_t byte_count, - IN ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_query_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN ib_ca_attr_t *p_ca_attr, - IN size_t byte_count, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_post_close_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status ); - -/************* PD Management ***********************/ -extern ib_api_status_t -mlx4_pre_alloc_pd ( - IN const ib_ca_handle_t h_uvp_ca, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_pd_handle_t *ph_uvp_pd ); - -void -mlx4_post_alloc_pd ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN OUT ib_pd_handle_t *ph_uvp_pd, - IN ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_free_pd ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status ); - -/************* CQ Management ***********************/ -ib_api_status_t -mlx4_pre_create_cq ( - IN const ib_ca_handle_t h_uvp_ca, - IN OUT uint32_t* const p_size, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_cq_handle_t *ph_uvp_cq ); - -void -mlx4_post_create_cq ( - IN const ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN const uint32_t size, - IN OUT ib_cq_handle_t *ph_uvp_cq, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_query_cq ( - IN const ib_cq_handle_t h_uvp_cq, - OUT uint32_t* const p_size, - IN OUT ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_destroy_cq ( - IN const ib_cq_handle_t h_uvp_cq, - IN ib_api_status_t ioctl_status ); - -/************* SRQ Management **********************/ -ib_api_status_t -mlx4_pre_create_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_srq_attr_t *p_srq_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_srq_handle_t *ph_uvp_srq ); - -void -mlx4_post_create_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_srq_handle_t *ph_uvp_srq, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_destroy_srq ( - IN const ib_srq_handle_t h_uvp_srq ); - -void -mlx4_post_destroy_srq ( - IN const ib_srq_handle_t h_uvp_srq, - IN ib_api_status_t ioctl_status ); - -/************* QP Management ***********************/ -ib_api_status_t -mlx4_pre_create_qp ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_qp_create_t *p_create_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_qp_handle_t *ph_uvp_qp ); - -ib_api_status_t -mlx4_post_create_qp ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_qp_handle_t *ph_uvp_qp, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN const ib_qp_mod_t *p_modify_attr, - IN OUT ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status, - IN OUT ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_query_qp ( - IN ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status, - IN OUT ib_qp_attr_t *p_query_attr, - IN OUT ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_destroy_qp ( - IN const ib_qp_handle_t h_uvp_qp ); - -void -mlx4_post_destroy_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status ); - -void -mlx4_nd_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - OUT void** pp_outbuf, - OUT DWORD* p_size ); - -uint32_t -mlx4_nd_get_qp_state ( - IN const ib_qp_handle_t h_uvp_qp ); - -/************* AV Management ***********************/ -ib_api_status_t -mlx4_pre_create_ah ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_av_attr_t *p_av_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_av_handle_t *ph_uvp_av ); - -ib_api_status_t -mlx4_pre_query_ah ( - IN const ib_av_handle_t h_uvp_av, - IN OUT ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_query_ah ( - IN const ib_av_handle_t h_uvp_av, - IN ib_api_status_t ioctl_status, - IN OUT ib_av_attr_t *p_addr_vector, - IN OUT ib_pd_handle_t *ph_pd, - IN OUT ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_modify_ah ( - IN const ib_av_handle_t h_uvp_av, - IN const ib_av_attr_t *p_addr_vector, - IN OUT ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_destroy_ah ( - IN const ib_av_handle_t h_uvp_av ); - -#ifdef XRC_SUPPORT -/************* XRC Management **********************/ -ib_api_status_t -mlx4_pre_create_xrc_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_xrcd_handle_t h_uvp_xrcd, - IN const ib_srq_attr_t *p_srq_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_srq_handle_t *ph_uvp_srq ); - -ib_api_status_t -mlx4_post_create_xrc_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_srq_handle_t *ph_uvp_srq, - IN ci_umv_buf_t *p_umv_buf ); - -ib_api_status_t -mlx4_pre_open_xrc_domain ( - IN const ib_ca_handle_t h_uvp_ca, - IN const uint32_t oflag, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_xrcd_handle_t *ph_uvp_xrcd ); - -void -mlx4_post_open_xrc_domain ( - IN const ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN OUT ib_xrcd_handle_t *ph_uvp_xrcd, - IN ci_umv_buf_t *p_umv_buf ); - -void -mlx4_post_close_xrc_domain ( - IN const ib_xrcd_handle_t h_uvp_xrcd, - IN ib_api_status_t ioctl_status ); - -#endif /* XRC_SUPPORT */ - -END_C_DECLS - -#endif /* INFINIBAND_VERBS_H */ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef INFINIBAND_VERBS_H +#define INFINIBAND_VERBS_H + +#include "l2w.h" + + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +union ibv_gid { + uint8_t raw[16]; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } global; +}; + +enum ibv_rate { + IBV_RATE_MAX = 0, + IBV_RATE_2_5_GBPS = 2, + IBV_RATE_5_GBPS = 5, + IBV_RATE_10_GBPS = 3, + IBV_RATE_20_GBPS = 6, + IBV_RATE_30_GBPS = 4, + IBV_RATE_40_GBPS = 7, + IBV_RATE_60_GBPS = 8, + IBV_RATE_80_GBPS = 9, + IBV_RATE_120_GBPS = 10 +}; + +struct ibv_global_route { + union ibv_gid dgid; + uint32_t flow_label; + uint8_t sgid_index; + uint8_t hop_limit; + uint8_t traffic_class; +}; + +struct ibv_grh { + uint32_t version_tclass_flow; + uint16_t paylen; + uint8_t next_hdr; + uint8_t hop_limit; + union ibv_gid sgid; + union ibv_gid dgid; +}; + +struct ibv_ah_attr { + struct ibv_global_route grh; + uint16_t dlid; + uint8_t sl; + uint8_t src_path_bits; + uint8_t static_rate; + uint8_t is_global; + uint8_t port_num; +}; + +struct ibv_xrc_domain { + struct ibv_context *context; + uint64_t handle; +}; + +struct ibv_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +}; + +enum ibv_qp_type { + IBV_QPT_RC = 2, + IBV_QPT_UC, + IBV_QPT_UD, + IBV_QPT_XRC +}; + +struct ibv_qp_cap { + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; +}; + +struct ibv_qp_init_attr { + void *qp_context; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + struct ibv_qp_cap cap; + enum ibv_qp_type qp_type; + int sq_sig_all; + struct ibv_xrc_domain *xrc_domain; +}; + +enum ibv_qp_attr_mask { + IBV_QP_STATE = 1 << 0, + IBV_QP_CUR_STATE = 1 << 1, + IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, + IBV_QP_ACCESS_FLAGS = 1 << 3, + IBV_QP_PKEY_INDEX = 1 << 4, + IBV_QP_PORT = 1 << 5, + IBV_QP_QKEY = 1 << 6, + IBV_QP_AV = 1 << 7, + IBV_QP_PATH_MTU = 1 << 8, + IBV_QP_TIMEOUT = 1 << 9, + IBV_QP_RETRY_CNT = 1 << 10, + IBV_QP_RNR_RETRY = 1 << 11, + IBV_QP_RQ_PSN = 1 << 12, + IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13, + IBV_QP_ALT_PATH = 1 << 14, + IBV_QP_MIN_RNR_TIMER = 1 << 15, + IBV_QP_SQ_PSN = 1 << 16, + IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17, + IBV_QP_PATH_MIG_STATE = 1 << 18, + IBV_QP_CAP = 1 << 19, + IBV_QP_DEST_QPN = 1 << 20 +}; + +enum ibv_qp_state { + IBV_QPS_RESET, + IBV_QPS_INIT, + IBV_QPS_RTR, + IBV_QPS_RTS, + IBV_QPS_SQD, + IBV_QPS_SQE, + IBV_QPS_ERR +}; + +struct ibv_pd { + struct ibv_context *context; + uint64_t handle; +}; + +struct ibv_srq { + struct ibv_context *context; + struct ibv_pd *pd; + uint64_t handle; + uint32_t xrc_srq_num; + struct ibv_xrc_domain *xrc_domain; + struct ibv_cq *xrc_cq; +}; + +struct ibv_qp { + struct ibv_context *context; + void *qp_context; + struct ibv_pd *pd; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + uint64_t handle; + uint32_t qp_num; + enum ibv_qp_state state; + enum ibv_qp_type qp_type; + struct ibv_xrc_domain *xrc_domain; +}; + +struct ibv_cq { + struct ibv_context *context; + uint64_t handle; + int cqe; +}; + +struct ibv_ah { + struct ibv_context *context; + struct ibv_pd *pd; + ib_av_attr_t av_attr; +}; + +struct ibv_context { + ib_ca_attr_t *p_hca_attr; + int page_size; + pthread_mutex_t mutex; +}; + + +/************* CA operations *************************/ +ib_api_status_t +mlx4_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_ca_handle_t *ph_uvp_ca ); + +ib_api_status_t +mlx4_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + IN OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ); + +/************* PD Management ***********************/ +extern ib_api_status_t +mlx4_pre_alloc_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_pd_handle_t *ph_uvp_pd ); + +void +mlx4_post_alloc_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_free_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ); + +/************* CQ Management ***********************/ +ib_api_status_t +mlx4_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_cq_handle_t *ph_uvp_cq ); + +void +mlx4_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status ); + +/************* SRQ Management **********************/ +ib_api_status_t +mlx4_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_srq_handle_t *ph_uvp_srq ); + +void +mlx4_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq ); + +void +mlx4_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ); + +/************* QP Management ***********************/ +ib_api_status_t +mlx4_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp ); + +ib_api_status_t +mlx4_wv_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const uvp_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp ); + +ib_api_status_t +mlx4_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp ); + +void +mlx4_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status ); + +void +mlx4_nd_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + OUT void** pp_outbuf, + OUT DWORD* p_size ); + +uint32_t +mlx4_nd_get_qp_state ( + IN const ib_qp_handle_t h_uvp_qp ); + +/************* AV Management ***********************/ +ib_api_status_t +mlx4_pre_create_ah ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_av_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_av_handle_t *ph_uvp_av ); + +ib_api_status_t +mlx4_pre_query_ah ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_query_ah ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_modify_ah ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_destroy_ah ( + IN const ib_av_handle_t h_uvp_av ); + +#ifdef XRC_SUPPORT +/************* XRC Management **********************/ +ib_api_status_t +mlx4_pre_create_xrc_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_xrcd_handle_t h_uvp_xrcd, + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_srq_handle_t *ph_uvp_srq ); + +ib_api_status_t +mlx4_post_create_xrc_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlx4_pre_open_xrc_domain ( + IN const ib_ca_handle_t h_uvp_ca, + IN const uint32_t oflag, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_xrcd_handle_t *ph_uvp_xrcd ); + +void +mlx4_post_open_xrc_domain ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN OUT ib_xrcd_handle_t *ph_uvp_xrcd, + IN ci_umv_buf_t *p_umv_buf ); + +void +mlx4_post_close_xrc_domain ( + IN const ib_xrcd_handle_t h_uvp_xrcd, + IN ib_api_status_t ioctl_status ); + +#endif /* XRC_SUPPORT */ + +END_C_DECLS + +#endif /* INFINIBAND_VERBS_H */ diff --git a/trunk/hw/mthca/user/mlnx_ual_qp.c b/trunk/hw/mthca/user/mlnx_ual_qp.c index b7fccf27..23293b6e 100644 --- a/trunk/hw/mthca/user/mlnx_ual_qp.c +++ b/trunk/hw/mthca/user/mlnx_ual_qp.c @@ -94,8 +94,6 @@ __pre_create_qp ( size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) ); struct ibv_pd *ibv_pd = h_uvp_pd->ibv_pd; - UNREFERENCED_PARAMETER(ph_uvp_qp); - UVP_ENTER(UVP_DBG_QP); CL_ASSERT(p_umv_buf); @@ -139,6 +137,7 @@ __pre_create_qp ( goto err_alloc_qp; } + *ph_uvp_qp = (ib_qp_handle_t) ibv_qp; goto end; err_alloc_qp: @@ -149,6 +148,25 @@ end: return status; } +static ib_api_status_t +__wv_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const uvp_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp) +{ + struct ibv_qp *qp; + ib_api_status_t status; + + status = __pre_create_qp(h_uvp_pd, &p_create_attr->qp_create, + p_umv_buf, ph_uvp_qp); + if (status == IB_SUCCESS) { + qp = (struct ibv_qp *) *ph_uvp_qp; + qp->qp_context = p_create_attr->context; + } + return status; +} + static ib_api_status_t __post_create_qp ( IN const ib_pd_handle_t h_uvp_pd, @@ -179,8 +197,6 @@ __post_create_qp ( status = errno_to_iberr(err); goto err_create_cq; } - - *ph_uvp_qp = (ib_qp_handle_t)ibv_qp; } goto end; @@ -365,8 +381,10 @@ mlnx_get_qp_interface ( p_uvp->post_query_qp = __post_query_qp; p_uvp->pre_destroy_qp = __pre_destroy_qp; p_uvp->post_destroy_qp = __post_destroy_qp; + p_uvp->nd_modify_qp = __nd_modify_qp; - p_uvp->nd_get_qp_state = __nd_get_qp_state; + p_uvp->nd_get_qp_state = __nd_get_qp_state; + p_uvp->wv_pre_create_qp = __wv_pre_create_qp; UVP_EXIT(UVP_DBG_SHIM); } diff --git a/trunk/hw/mthca/user/mlnx_uvp_verbs.h b/trunk/hw/mthca/user/mlnx_uvp_verbs.h index 613b15e9..18fe0600 100644 --- a/trunk/hw/mthca/user/mlnx_uvp_verbs.h +++ b/trunk/hw/mthca/user/mlnx_uvp_verbs.h @@ -1,490 +1,491 @@ -/* - * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * Copyright (c) 2005 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id$ - */ - -#ifndef MLNX_UVP_VERBS_H -#define MLNX_UVP_VERBS_H - -#include - -#ifdef __cplusplus -# define BEGIN_C_DECLS extern "C" { -# define END_C_DECLS } -#else /* !__cplusplus */ -# define BEGIN_C_DECLS -# define END_C_DECLS -#endif /* __cplusplus */ - -BEGIN_C_DECLS - -union ibv_gid { - uint8_t raw[16]; - struct { - uint64_t subnet_prefix; - uint64_t interface_id; - } global; -}; - -enum ibv_node_type { - IBV_NODE_CA = 1, - IBV_NODE_SWITCH, - IBV_NODE_ROUTER -}; - -enum ibv_device_cap_flags { - IBV_DEVICE_RESIZE_MAX_WR = 1, - IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1, - IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2, - IBV_DEVICE_RAW_MULTI = 1 << 3, - IBV_DEVICE_AUTO_PATH_MIG = 1 << 4, - IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5, - IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6, - IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7, - IBV_DEVICE_SHUTDOWN_PORT = 1 << 8, - IBV_DEVICE_INIT_TYPE = 1 << 9, - IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10, - IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11, - IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12, - IBV_DEVICE_SRQ_RESIZE = 1 << 13, - IBV_DEVICE_N_NOTIFY_CQ = 1 << 14, -}; - -enum ibv_atomic_cap { - IBV_ATOMIC_NONE, - IBV_ATOMIC_HCA, - IBV_ATOMIC_GLOB -}; - -struct ibv_device_attr { - char fw_ver[64]; - uint64_t node_guid; - uint64_t sys_image_guid; - uint64_t max_mr_size; - uint64_t page_size_cap; - uint32_t vendor_id; - uint32_t vendor_part_id; - uint32_t hw_ver; - int max_qp; - int max_qp_wr; - int device_cap_flags; - int max_sge; - int max_sge_rd; - int max_cq; - int max_cqe; - int max_mr; - int max_pd; - int max_qp_rd_atom; - int max_ee_rd_atom; - int max_res_rd_atom; - int max_qp_init_rd_atom; - int max_ee_init_rd_atom; - enum ibv_atomic_cap atomic_cap; - int max_ee; - int max_rdd; - int max_mw; - int max_raw_ipv6_qp; - int max_raw_ethy_qp; - int max_mcast_grp; - int max_mcast_qp_attach; - int max_total_mcast_qp_attach; - uint64_t max_ah; - int max_fmr; - int max_map_per_fmr; - int max_srq; - int max_srq_wr; - int max_srq_sge; - uint16_t max_pkeys; - uint8_t local_ca_ack_delay; - uint8_t phys_port_cnt; -}; - -enum ibv_mtu { - IBV_MTU_256 = 1, - IBV_MTU_512 = 2, - IBV_MTU_1024 = 3, - IBV_MTU_2048 = 4, - IBV_MTU_4096 = 5 -}; - -enum ibv_port_state { - IBV_PORT_NOP = 0, - IBV_PORT_DOWN = 1, - IBV_PORT_INIT = 2, - IBV_PORT_ARMED = 3, - IBV_PORT_ACTIVE = 4, - IBV_PORT_ACTIVE_DEFER = 5 -}; - -struct ibv_port_attr { - enum ibv_port_state state; - enum ibv_mtu max_mtu; - enum ibv_mtu active_mtu; - int gid_tbl_len; - uint32_t port_cap_flags; - uint32_t max_msg_sz; - uint32_t bad_pkey_cntr; - uint32_t qkey_viol_cntr; - uint16_t pkey_tbl_len; - uint16_t lid; - uint16_t sm_lid; - uint8_t lmc; - uint8_t max_vl_num; - uint8_t sm_sl; - uint8_t subnet_timeout; - uint8_t init_type_reply; - uint8_t active_width; - uint8_t active_speed; - uint8_t phys_state; -}; - -enum ibv_event_type { - IBV_EVENT_CQ_ERR, - IBV_EVENT_QP_FATAL, - IBV_EVENT_QP_REQ_ERR, - IBV_EVENT_QP_ACCESS_ERR, - IBV_EVENT_COMM_EST, - IBV_EVENT_SQ_DRAINED, - IBV_EVENT_PATH_MIG, - IBV_EVENT_PATH_MIG_ERR, - IBV_EVENT_DEVICE_FATAL, - IBV_EVENT_PORT_ACTIVE, - IBV_EVENT_PORT_ERR, - IBV_EVENT_LID_CHANGE, - IBV_EVENT_PKEY_CHANGE, - IBV_EVENT_SM_CHANGE, - IBV_EVENT_SRQ_ERR, - IBV_EVENT_SRQ_LIMIT_REACHED, - IBV_EVENT_QP_LAST_WQE_REACHED -}; - -struct ibv_async_event { - union { - struct ibv_cq *cq; - struct ibv_qp *qp; - struct ibv_srq *srq; - int port_num; - } element; - enum ibv_event_type event_type; -}; - -enum ibv_access_flags { - IBV_ACCESS_LOCAL_WRITE = 1, - IBV_ACCESS_REMOTE_WRITE = (1<<1), - IBV_ACCESS_REMOTE_READ = (1<<2), - IBV_ACCESS_REMOTE_ATOMIC = (1<<3), - IBV_ACCESS_MW_BIND = (1<<4) -}; - -struct ibv_pd { - struct ibv_context *context; - uint64_t handle; -}; - -struct ibv_mr { - struct ibv_context *context; - struct ibv_pd *pd; - uint64_t handle; - uint32_t lkey; - uint32_t rkey; -}; - -struct ibv_global_route { - ib_gid_t dgid; - uint32_t flow_label; - uint8_t sgid_index; - uint8_t hop_limit; - uint8_t traffic_class; -}; - -struct ibv_ah_attr { - struct ibv_global_route grh; - uint16_t dlid; - uint8_t sl; - uint8_t src_path_bits; - uint8_t static_rate; - uint8_t is_global; - uint8_t port_num; -}; - - -enum ib_cq_notify { - IB_CQ_SOLICITED, - IB_CQ_NEXT_COMP -}; - -enum ibv_srq_attr_mask { - IBV_SRQ_MAX_WR = 1 << 0, - IBV_SRQ_LIMIT = 1 << 1, -}; - -struct ibv_srq_attr { - uint32_t max_wr; - uint32_t max_sge; - uint32_t srq_limit; -}; - -struct ibv_srq_init_attr { - void *srq_context; - struct ibv_srq_attr attr; -}; - -struct ibv_qp_cap { - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t max_recv_sge; - uint32_t max_inline_data; -}; - -struct ibv_qp_init_attr { - void *qp_context; - struct ibv_cq *send_cq; - struct ibv_cq *recv_cq; - struct ibv_srq *srq; - struct ibv_qp_cap cap; - ib_qp_type_t qp_type; - int sq_sig_all; -}; - -enum ibv_qp_attr_mask { - IBV_QP_STATE = 1 << 0, - IBV_QP_CUR_STATE = 1 << 1, - IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, - IBV_QP_ACCESS_FLAGS = 1 << 3, - IBV_QP_PKEY_INDEX = 1 << 4, - IBV_QP_PORT = 1 << 5, - IBV_QP_QKEY = 1 << 6, - IBV_QP_AV = 1 << 7, - IBV_QP_PATH_MTU = 1 << 8, - IBV_QP_TIMEOUT = 1 << 9, - IBV_QP_RETRY_CNT = 1 << 10, - IBV_QP_RNR_RETRY = 1 << 11, - IBV_QP_RQ_PSN = 1 << 12, - IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13, - IBV_QP_ALT_PATH = 1 << 14, - IBV_QP_MIN_RNR_TIMER = 1 << 15, - IBV_QP_SQ_PSN = 1 << 16, - IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17, - IBV_QP_PATH_MIG_STATE = 1 << 18, - IBV_QP_CAP = 1 << 19, - IBV_QP_DEST_QPN = 1 << 20 -}; - -enum ibv_qp_state { - IBV_QPS_RESET, - IBV_QPS_INIT, - IBV_QPS_RTR, - IBV_QPS_RTS, - IBV_QPS_SQD, - IBV_QPS_SQE, - IBV_QPS_ERR -}; - -enum ibv_mig_state { - IBV_MIG_MIGRATED, - IBV_MIG_REARM, - IBV_MIG_ARMED -}; - -struct ibv_qp_attr { - enum ibv_qp_state qp_state; - enum ibv_qp_state cur_qp_state; - enum ibv_mtu path_mtu; - enum ibv_mig_state path_mig_state; - uint32_t qkey; - uint32_t rq_psn; - uint32_t sq_psn; - uint32_t dest_qp_num; - int qp_access_flags; - struct ibv_qp_cap cap; - struct ibv_ah_attr ah_attr; - struct ibv_ah_attr alt_ah_attr; - uint16_t pkey_index; - uint16_t alt_pkey_index; - uint8_t en_sqd_async_notify; - uint8_t sq_draining; - uint8_t max_rd_atomic; - uint8_t max_dest_rd_atomic; - uint8_t min_rnr_timer; - uint8_t port_num; - uint8_t timeout; - uint8_t retry_cnt; - uint8_t rnr_retry; - uint8_t alt_port_num; - uint8_t alt_timeout; -}; - - -enum ibv_send_flags { - IBV_SEND_FENCE = 1 << 0, - IBV_SEND_SIGNALED = 1 << 1, - IBV_SEND_SOLICITED = 1 << 2, - IBV_SEND_INLINE = 1 << 3 -}; - -struct ibv_sge { - uint64_t addr; - uint32_t length; - uint32_t lkey; -}; - -struct ibv_send_wr { - struct ibv_send_wr *next; - uint64_t wr_id; - struct ibv_sge *sg_list; - int num_sge; - enum ibv_wr_opcode opcode; - enum ibv_send_flags send_flags; - uint32_t imm_data; /* in network byte order */ - union { - struct { - uint64_t remote_addr; - uint32_t rkey; - } rdma; - struct { - uint64_t remote_addr; - uint64_t compare_add; - uint64_t swap; - uint32_t rkey; - } atomic; - struct { - struct mthca_ah *ah; - uint32_t remote_qpn; - uint32_t remote_qkey; - } ud; - } wr; -}; - -struct ibv_recv_wr { - struct ibv_recv_wr *next; - uint64_t wr_id; - struct ibv_sge *sg_list; - int num_sge; -}; - -typedef enum MTHCA_QP_ACCESS_FLAGS { - MTHCA_ACCESS_LOCAL_WRITE = 1, - MTHCA_ACCESS_REMOTE_WRITE = (1<<1), - MTHCA_ACCESS_REMOTE_READ = (1<<2), - MTHCA_ACCESS_REMOTE_ATOMIC = (1<<3), - MTHCA_ACCESS_MW_BIND = (1<<4) -} mthca_qp_access_t; - - -struct ibv_srq { - struct ibv_pd *pd; - uint64_t handle; - struct ibv_context *context; -}; - -struct ibv_qp { - struct ibv_pd *pd; - struct ibv_cq *send_cq; - struct ibv_cq *recv_cq; - struct ibv_srq *srq; - uint64_t handle; - uint32_t qp_num; - enum ibv_qp_state state; - ib_qp_type_t qp_type; - struct ibv_context *context; -}; - -struct ibv_cq { - uint64_t handle; - int cqe; - struct ibv_context *context; -}; - -struct ibv_ah { - struct ibv_pd *pd; -}; - -struct ibv_context_ops { - int (*query_device)(struct ibv_context *context, - struct ibv_device_attr *device_attr); - int (*query_port)(struct ibv_context *context, uint8_t port_num, - struct ibv_port_attr *port_attr); - struct ibv_pd * (*alloc_pd)(struct ibv_context *context, struct ibv_alloc_pd_resp *resp_p); - int (*dealloc_pd)(struct ibv_pd *pd); - struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length, - enum ibv_access_flags access); - int (*dereg_mr)(struct ibv_mr *mr); - struct ibv_cq * (*create_cq_pre)(struct ibv_context *context, int *cqe, - struct ibv_create_cq *req); - struct ibv_cq * (*create_cq_post)(struct ibv_context *context, - struct ibv_create_cq_resp *resp); - int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct _ib_wc *wc); - int (*poll_cq_list)( struct ibv_cq *ibcq, - struct _ib_wc** const pp_free_wclist, - struct _ib_wc** const pp_done_wclist ); - int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only); - int (*destroy_cq)(struct ibv_cq *cq); - struct ibv_srq * (*create_srq)(struct ibv_pd *pd, - struct ibv_srq_init_attr *srq_init_attr); - int (*modify_srq)(struct ibv_srq *srq, - struct ibv_srq_attr *srq_attr, - enum ibv_srq_attr_mask srq_attr_mask); - int (*destroy_srq)(struct ibv_srq *srq); - int (*post_srq_recv)(struct ibv_srq *srq, - struct _ib_recv_wr *recv_wr, - struct _ib_recv_wr **bad_recv_wr); - struct ibv_qp *(*create_qp_pre)(struct ibv_pd *pd, - struct ibv_qp_init_attr *attr, struct ibv_create_qp *req); - struct ibv_qp *(*create_qp_post)(struct ibv_pd *pd, - struct ibv_create_qp_resp *resp); - int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, - enum ibv_qp_attr_mask attr_mask); - int (*destroy_qp)(struct ibv_qp *qp); - int (*post_send)(struct ibv_qp *qp, struct _ib_send_wr *wr, - struct _ib_send_wr **bad_wr); - int (*post_recv)(struct ibv_qp *qp, struct _ib_recv_wr *wr, - struct _ib_recv_wr **bad_wr); - int (*attach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, - uint16_t lid); - int (*detach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, - uint16_t lid); -}; - -struct ibv_context { - struct ibv_context_ops ops; - void *abi_compat; -}; - -int align_queue_size(struct ibv_context *context, int size, int spare); - -END_C_DECLS - -#endif /* INFINIBAND_VERBS_H */ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MLNX_UVP_VERBS_H +#define MLNX_UVP_VERBS_H + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +union ibv_gid { + uint8_t raw[16]; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } global; +}; + +enum ibv_node_type { + IBV_NODE_CA = 1, + IBV_NODE_SWITCH, + IBV_NODE_ROUTER +}; + +enum ibv_device_cap_flags { + IBV_DEVICE_RESIZE_MAX_WR = 1, + IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1, + IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2, + IBV_DEVICE_RAW_MULTI = 1 << 3, + IBV_DEVICE_AUTO_PATH_MIG = 1 << 4, + IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5, + IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6, + IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7, + IBV_DEVICE_SHUTDOWN_PORT = 1 << 8, + IBV_DEVICE_INIT_TYPE = 1 << 9, + IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10, + IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11, + IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12, + IBV_DEVICE_SRQ_RESIZE = 1 << 13, + IBV_DEVICE_N_NOTIFY_CQ = 1 << 14, +}; + +enum ibv_atomic_cap { + IBV_ATOMIC_NONE, + IBV_ATOMIC_HCA, + IBV_ATOMIC_GLOB +}; + +struct ibv_device_attr { + char fw_ver[64]; + uint64_t node_guid; + uint64_t sys_image_guid; + uint64_t max_mr_size; + uint64_t page_size_cap; + uint32_t vendor_id; + uint32_t vendor_part_id; + uint32_t hw_ver; + int max_qp; + int max_qp_wr; + int device_cap_flags; + int max_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ibv_atomic_cap atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + uint64_t max_ah; + int max_fmr; + int max_map_per_fmr; + int max_srq; + int max_srq_wr; + int max_srq_sge; + uint16_t max_pkeys; + uint8_t local_ca_ack_delay; + uint8_t phys_port_cnt; +}; + +enum ibv_mtu { + IBV_MTU_256 = 1, + IBV_MTU_512 = 2, + IBV_MTU_1024 = 3, + IBV_MTU_2048 = 4, + IBV_MTU_4096 = 5 +}; + +enum ibv_port_state { + IBV_PORT_NOP = 0, + IBV_PORT_DOWN = 1, + IBV_PORT_INIT = 2, + IBV_PORT_ARMED = 3, + IBV_PORT_ACTIVE = 4, + IBV_PORT_ACTIVE_DEFER = 5 +}; + +struct ibv_port_attr { + enum ibv_port_state state; + enum ibv_mtu max_mtu; + enum ibv_mtu active_mtu; + int gid_tbl_len; + uint32_t port_cap_flags; + uint32_t max_msg_sz; + uint32_t bad_pkey_cntr; + uint32_t qkey_viol_cntr; + uint16_t pkey_tbl_len; + uint16_t lid; + uint16_t sm_lid; + uint8_t lmc; + uint8_t max_vl_num; + uint8_t sm_sl; + uint8_t subnet_timeout; + uint8_t init_type_reply; + uint8_t active_width; + uint8_t active_speed; + uint8_t phys_state; +}; + +enum ibv_event_type { + IBV_EVENT_CQ_ERR, + IBV_EVENT_QP_FATAL, + IBV_EVENT_QP_REQ_ERR, + IBV_EVENT_QP_ACCESS_ERR, + IBV_EVENT_COMM_EST, + IBV_EVENT_SQ_DRAINED, + IBV_EVENT_PATH_MIG, + IBV_EVENT_PATH_MIG_ERR, + IBV_EVENT_DEVICE_FATAL, + IBV_EVENT_PORT_ACTIVE, + IBV_EVENT_PORT_ERR, + IBV_EVENT_LID_CHANGE, + IBV_EVENT_PKEY_CHANGE, + IBV_EVENT_SM_CHANGE, + IBV_EVENT_SRQ_ERR, + IBV_EVENT_SRQ_LIMIT_REACHED, + IBV_EVENT_QP_LAST_WQE_REACHED +}; + +struct ibv_async_event { + union { + struct ibv_cq *cq; + struct ibv_qp *qp; + struct ibv_srq *srq; + int port_num; + } element; + enum ibv_event_type event_type; +}; + +enum ibv_access_flags { + IBV_ACCESS_LOCAL_WRITE = 1, + IBV_ACCESS_REMOTE_WRITE = (1<<1), + IBV_ACCESS_REMOTE_READ = (1<<2), + IBV_ACCESS_REMOTE_ATOMIC = (1<<3), + IBV_ACCESS_MW_BIND = (1<<4) +}; + +struct ibv_pd { + struct ibv_context *context; + uint64_t handle; +}; + +struct ibv_mr { + struct ibv_context *context; + struct ibv_pd *pd; + uint64_t handle; + uint32_t lkey; + uint32_t rkey; +}; + +struct ibv_global_route { + ib_gid_t dgid; + uint32_t flow_label; + uint8_t sgid_index; + uint8_t hop_limit; + uint8_t traffic_class; +}; + +struct ibv_ah_attr { + struct ibv_global_route grh; + uint16_t dlid; + uint8_t sl; + uint8_t src_path_bits; + uint8_t static_rate; + uint8_t is_global; + uint8_t port_num; +}; + + +enum ib_cq_notify { + IB_CQ_SOLICITED, + IB_CQ_NEXT_COMP +}; + +enum ibv_srq_attr_mask { + IBV_SRQ_MAX_WR = 1 << 0, + IBV_SRQ_LIMIT = 1 << 1, +}; + +struct ibv_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +}; + +struct ibv_srq_init_attr { + void *srq_context; + struct ibv_srq_attr attr; +}; + +struct ibv_qp_cap { + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; +}; + +struct ibv_qp_init_attr { + void *qp_context; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + struct ibv_qp_cap cap; + ib_qp_type_t qp_type; + int sq_sig_all; +}; + +enum ibv_qp_attr_mask { + IBV_QP_STATE = 1 << 0, + IBV_QP_CUR_STATE = 1 << 1, + IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, + IBV_QP_ACCESS_FLAGS = 1 << 3, + IBV_QP_PKEY_INDEX = 1 << 4, + IBV_QP_PORT = 1 << 5, + IBV_QP_QKEY = 1 << 6, + IBV_QP_AV = 1 << 7, + IBV_QP_PATH_MTU = 1 << 8, + IBV_QP_TIMEOUT = 1 << 9, + IBV_QP_RETRY_CNT = 1 << 10, + IBV_QP_RNR_RETRY = 1 << 11, + IBV_QP_RQ_PSN = 1 << 12, + IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13, + IBV_QP_ALT_PATH = 1 << 14, + IBV_QP_MIN_RNR_TIMER = 1 << 15, + IBV_QP_SQ_PSN = 1 << 16, + IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17, + IBV_QP_PATH_MIG_STATE = 1 << 18, + IBV_QP_CAP = 1 << 19, + IBV_QP_DEST_QPN = 1 << 20 +}; + +enum ibv_qp_state { + IBV_QPS_RESET, + IBV_QPS_INIT, + IBV_QPS_RTR, + IBV_QPS_RTS, + IBV_QPS_SQD, + IBV_QPS_SQE, + IBV_QPS_ERR +}; + +enum ibv_mig_state { + IBV_MIG_MIGRATED, + IBV_MIG_REARM, + IBV_MIG_ARMED +}; + +struct ibv_qp_attr { + enum ibv_qp_state qp_state; + enum ibv_qp_state cur_qp_state; + enum ibv_mtu path_mtu; + enum ibv_mig_state path_mig_state; + uint32_t qkey; + uint32_t rq_psn; + uint32_t sq_psn; + uint32_t dest_qp_num; + int qp_access_flags; + struct ibv_qp_cap cap; + struct ibv_ah_attr ah_attr; + struct ibv_ah_attr alt_ah_attr; + uint16_t pkey_index; + uint16_t alt_pkey_index; + uint8_t en_sqd_async_notify; + uint8_t sq_draining; + uint8_t max_rd_atomic; + uint8_t max_dest_rd_atomic; + uint8_t min_rnr_timer; + uint8_t port_num; + uint8_t timeout; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t alt_port_num; + uint8_t alt_timeout; +}; + + +enum ibv_send_flags { + IBV_SEND_FENCE = 1 << 0, + IBV_SEND_SIGNALED = 1 << 1, + IBV_SEND_SOLICITED = 1 << 2, + IBV_SEND_INLINE = 1 << 3 +}; + +struct ibv_sge { + uint64_t addr; + uint32_t length; + uint32_t lkey; +}; + +struct ibv_send_wr { + struct ibv_send_wr *next; + uint64_t wr_id; + struct ibv_sge *sg_list; + int num_sge; + enum ibv_wr_opcode opcode; + enum ibv_send_flags send_flags; + uint32_t imm_data; /* in network byte order */ + union { + struct { + uint64_t remote_addr; + uint32_t rkey; + } rdma; + struct { + uint64_t remote_addr; + uint64_t compare_add; + uint64_t swap; + uint32_t rkey; + } atomic; + struct { + struct mthca_ah *ah; + uint32_t remote_qpn; + uint32_t remote_qkey; + } ud; + } wr; +}; + +struct ibv_recv_wr { + struct ibv_recv_wr *next; + uint64_t wr_id; + struct ibv_sge *sg_list; + int num_sge; +}; + +typedef enum MTHCA_QP_ACCESS_FLAGS { + MTHCA_ACCESS_LOCAL_WRITE = 1, + MTHCA_ACCESS_REMOTE_WRITE = (1<<1), + MTHCA_ACCESS_REMOTE_READ = (1<<2), + MTHCA_ACCESS_REMOTE_ATOMIC = (1<<3), + MTHCA_ACCESS_MW_BIND = (1<<4) +} mthca_qp_access_t; + + +struct ibv_srq { + struct ibv_pd *pd; + uint64_t handle; + struct ibv_context *context; +}; + +struct ibv_qp { + struct ibv_pd *pd; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + uint64_t handle; + uint32_t qp_num; + enum ibv_qp_state state; + ib_qp_type_t qp_type; + struct ibv_context *context; + void *qp_context; +}; + +struct ibv_cq { + uint64_t handle; + int cqe; + struct ibv_context *context; +}; + +struct ibv_ah { + struct ibv_pd *pd; +}; + +struct ibv_context_ops { + int (*query_device)(struct ibv_context *context, + struct ibv_device_attr *device_attr); + int (*query_port)(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr *port_attr); + struct ibv_pd * (*alloc_pd)(struct ibv_context *context, struct ibv_alloc_pd_resp *resp_p); + int (*dealloc_pd)(struct ibv_pd *pd); + struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length, + enum ibv_access_flags access); + int (*dereg_mr)(struct ibv_mr *mr); + struct ibv_cq * (*create_cq_pre)(struct ibv_context *context, int *cqe, + struct ibv_create_cq *req); + struct ibv_cq * (*create_cq_post)(struct ibv_context *context, + struct ibv_create_cq_resp *resp); + int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct _ib_wc *wc); + int (*poll_cq_list)( struct ibv_cq *ibcq, + struct _ib_wc** const pp_free_wclist, + struct _ib_wc** const pp_done_wclist ); + int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only); + int (*destroy_cq)(struct ibv_cq *cq); + struct ibv_srq * (*create_srq)(struct ibv_pd *pd, + struct ibv_srq_init_attr *srq_init_attr); + int (*modify_srq)(struct ibv_srq *srq, + struct ibv_srq_attr *srq_attr, + enum ibv_srq_attr_mask srq_attr_mask); + int (*destroy_srq)(struct ibv_srq *srq); + int (*post_srq_recv)(struct ibv_srq *srq, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr); + struct ibv_qp *(*create_qp_pre)(struct ibv_pd *pd, + struct ibv_qp_init_attr *attr, struct ibv_create_qp *req); + struct ibv_qp *(*create_qp_post)(struct ibv_pd *pd, + struct ibv_create_qp_resp *resp); + int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, + enum ibv_qp_attr_mask attr_mask); + int (*destroy_qp)(struct ibv_qp *qp); + int (*post_send)(struct ibv_qp *qp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); + int (*post_recv)(struct ibv_qp *qp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); + int (*attach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, + uint16_t lid); + int (*detach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, + uint16_t lid); +}; + +struct ibv_context { + struct ibv_context_ops ops; + void *abi_compat; +}; + +int align_queue_size(struct ibv_context *context, int size, int spare); + +END_C_DECLS + +#endif /* INFINIBAND_VERBS_H */ diff --git a/trunk/inc/user/iba/ib_uvp.h b/trunk/inc/user/iba/ib_uvp.h index a6fbd037..6e219444 100644 --- a/trunk/inc/user/iba/ib_uvp.h +++ b/trunk/inc/user/iba/ib_uvp.h @@ -1582,6 +1582,24 @@ typedef ib_api_status_t * ********/ +typedef struct _uvp_qp_create +{ + ib_qp_create_t qp_create; + + void *context; + uint32_t max_inline_send; + uint32_t initiator_depth; + uint32_t responder_resources; + +} uvp_qp_create_t; + +typedef ib_api_status_t +(AL_API *uvp_wv_pre_create_qp) ( + IN const ib_pd_handle_t h_uvp_pd, + IN const uvp_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp); + /********/ /****f* user-mode Verbs/uvp_post_create_qp_t @@ -3436,7 +3454,8 @@ typedef struct _uvp_interface */ uvp_nd_modify_qp_t nd_modify_qp; uvp_nd_get_qp_state_t nd_get_qp_state; - + uvp_wv_pre_create_qp wv_pre_create_qp; + } uvp_interface_t; /********/ -- 2.41.0