From fa8a9d5da5490e6a902498b20ffef1567e3b2c00 Mon Sep 17 00:00:00 2001 From: leonidk Date: Sun, 13 Jul 2008 10:28:11 +0000 Subject: [PATCH] [IBAL, HW, IPOIB] Remove VOID_PTR64. Also fixed GPL license and missing copyright issues. Signed-off-by: Fab Tillier git-svn-id: svn://openib.tc.cornell.edu/gen1@1380 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- trunk/core/al/kernel/al_proxy.c | 2 +- trunk/core/al/kernel/al_proxy_ndi.c | 2 +- trunk/core/al/kernel/al_proxy_subnet.c | 2 +- trunk/core/al/kernel/al_proxy_verbs.c | 30 +- trunk/core/al/user/ual_cm_cep.c | 8 +- trunk/core/al/user/ual_mcast.c | 2 +- trunk/core/al/user/ual_mgr.c | 4 +- trunk/core/al/user/ual_mw.c | 2 +- trunk/core/al/user/ual_qp.c | 6 +- trunk/hw/mlx4/kernel/hca/ca.c | 844 +++---- trunk/hw/mlx4/user/hca/verbs.c | 3230 ++++++++++++------------ trunk/hw/mthca/kernel/hca_data.h | 2 +- trunk/hw/mthca/kernel/hca_mcast.c | 2 +- trunk/hw/mthca/kernel/hca_memory.c | 6 +- trunk/hw/mthca/kernel/hca_verbs.c | 26 +- trunk/hw/mthca/kernel/mthca_mad.c | 584 +++-- trunk/hw/mthca/user/mlnx_ual_av.c | 4 +- trunk/hw/mthca/user/mlnx_ual_ca.c | 2 +- trunk/hw/mthca/user/mlnx_ual_cq.c | 2 +- trunk/hw/mthca/user/mlnx_ual_pd.c | 2 +- trunk/hw/mthca/user/mlnx_ual_qp.c | 2 +- trunk/hw/mthca/user/mlnx_ual_srq.c | 2 +- trunk/inc/iba/ib_types.h | 4 - trunk/ulp/ipoib/kernel/ipoib_driver.c | 3 +- 24 files changed, 2384 insertions(+), 2389 deletions(-) diff --git a/trunk/core/al/kernel/al_proxy.c b/trunk/core/al/kernel/al_proxy.c index bb150476..12809bbe 100644 --- a/trunk/core/al/kernel/al_proxy.c +++ b/trunk/core/al/kernel/al_proxy.c @@ -776,7 +776,7 @@ __proxy_pnp_cb( break; } - p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t VOID_PTR64)HDL_TO_PTR(p_pnp_rec->h_pnp->obj.hdl); + p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t)HDL_TO_PTR(p_pnp_rec->h_pnp->obj.hdl); p_pnp_rec->h_pnp->obj.hdl_valid = TRUE; hdl = diff --git a/trunk/core/al/kernel/al_proxy_ndi.c b/trunk/core/al/kernel/al_proxy_ndi.c index 49276386..4a947407 100644 --- a/trunk/core/al/kernel/al_proxy_ndi.c +++ b/trunk/core/al/kernel/al_proxy_ndi.c @@ -387,7 +387,7 @@ __ndi_rep_cm( } /* Get and validate QP handle */ - h_qp = (ib_qp_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, p_rep->h_qp, AL_OBJ_TYPE_H_QP ); + h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_rep->h_qp, AL_OBJ_TYPE_H_QP ); if( !h_qp ) { cl_status = CL_INVALID_HANDLE; diff --git a/trunk/core/al/kernel/al_proxy_subnet.c b/trunk/core/al/kernel/al_proxy_subnet.c index 39355536..3c05a353 100644 --- a/trunk/core/al/kernel/al_proxy_subnet.c +++ b/trunk/core/al/kernel/al_proxy_subnet.c @@ -697,7 +697,7 @@ __proxy_mad_recv_cb( cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context; cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size; cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad = - (ib_mad_element_t* VOID_PTR64)p_mad_element->send_context1; + (ib_mad_element_t*)p_mad_element->send_context1; /* * If we're already closing the device - do not queue a callback, since diff --git a/trunk/core/al/kernel/al_proxy_verbs.c b/trunk/core/al/kernel/al_proxy_verbs.c index 57df4d2d..58e0f7dd 100644 --- a/trunk/core/al/kernel/al_proxy_verbs.c +++ b/trunk/core/al/kernel/al_proxy_verbs.c @@ -347,7 +347,7 @@ proxy_ca_err_cb( cb_info.rec_type = CA_ERROR_REC; /* Return the Proxy's open_ca handle and the user's context */ cb_info.ioctl_rec.event_rec = *p_err_rec; - cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t VOID_PTR64)HDL_TO_PTR(h_ca->obj.hdl); + cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t)HDL_TO_PTR(h_ca->obj.hdl); /* The proxy handle must be valid now. */ if( !h_ca->obj.hdl_valid ) @@ -982,7 +982,7 @@ proxy_srq_err_cb( cb_info.rec_type = SRQ_ERROR_REC; /* Return the Proxy's SRQ handle and the user's context */ cb_info.ioctl_rec.event_rec = *p_err_rec; - cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t VOID_PTR64) HDL_TO_PTR(h_srq->obj.hdl); + cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t) HDL_TO_PTR(h_srq->obj.hdl); /* The proxy handle must be valid now. */ if( !h_srq->obj.hdl_valid ) @@ -1286,7 +1286,7 @@ proxy_qp_err_cb( cb_info.rec_type = QP_ERROR_REC; /* Return the Proxy's QP handle and the user's context */ cb_info.ioctl_rec.event_rec = *p_err_rec; - cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(h_qp->obj.hdl); + cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t)HDL_TO_PTR(h_qp->obj.hdl); /* The proxy handle must be valid now. */ if( !h_qp->obj.hdl_valid ) @@ -1337,14 +1337,14 @@ proxy_create_qp( } /* Validate handles. */ - h_pd = (ib_pd_handle_t VOID_PTR64) + h_pd = (ib_pd_handle_t) al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); - h_sq_cq = (ib_cq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, + h_sq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ ); - h_rq_cq = (ib_cq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, + h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ ); if (p_ioctl->in.qp_create.h_srq) { - h_srq = (ib_srq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, + h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ ); if( !h_srq) { @@ -1486,25 +1486,25 @@ proxy_query_qp( if( p_ioctl->out.attr.h_pd ) { p_ioctl->out.attr.h_pd = - (ib_pd_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl); + (ib_pd_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl); } if( p_ioctl->out.attr.h_sq_cq ) { p_ioctl->out.attr.h_sq_cq = - (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_sq_cq->obj.hdl); + (ib_cq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_sq_cq->obj.hdl); } if( p_ioctl->out.attr.h_rq_cq ) { p_ioctl->out.attr.h_rq_cq = - (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_rq_cq->obj.hdl); + (ib_cq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_rq_cq->obj.hdl); } if( p_ioctl->out.attr.h_srq ) { p_ioctl->out.attr.h_srq = - (ib_srq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_srq->obj.hdl); + (ib_srq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_srq->obj.hdl); } } else @@ -2045,7 +2045,7 @@ proxy_cq_err_cb( cb_info.rec_type = CQ_ERROR_REC; /* Return the Proxy's cq handle and the user's context */ cb_info.ioctl_rec.event_rec = *p_err_rec; - cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(h_cq->obj.hdl); + cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t)HDL_TO_PTR(h_cq->obj.hdl); /* The proxy handle must be valid now. */ if( !h_cq->obj.hdl_valid ) @@ -2358,7 +2358,7 @@ proxy_post_send( if( h_qp->type == IB_QPT_UNRELIABLE_DGRM ) { /* Validate the AV handle for UD */ - h_av = (ib_av_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, + h_av = (ib_av_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_wr[i].dgrm.ud.h_av, AL_OBJ_TYPE_H_AV ); if( !h_av ) { @@ -3041,7 +3041,7 @@ proxy_query_mr( { /* Replace the pd handle with proxy's handle */ p_ioctl->out.attr.h_pd = - (ib_pd_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl); + (ib_pd_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl); } else { @@ -3492,7 +3492,7 @@ proxy_bind_mw( } /* Validate MR handle */ - h_mr = (ib_mr_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, + h_mr = (ib_mr_handle_t)al_hdl_ref( p_context->h_al, (uint64_t)p_ioctl->in.mw_bind.h_mr, AL_OBJ_TYPE_H_MR ); if( !h_mr ) { diff --git a/trunk/core/al/user/ual_cm_cep.c b/trunk/core/al/user/ual_cm_cep.c index e404fff2..07b14fec 100644 --- a/trunk/core/al/user/ual_cm_cep.c +++ b/trunk/core/al/user/ual_cm_cep.c @@ -522,7 +522,7 @@ al_cep_pre_req( cl_memclr(&ioctl, sizeof(ioctl)); ioctl.in.cid = cid; ioctl.in.cm_req = *p_cm_req; - ioctl.in.cm_req.h_qp = (ib_qp_handle_t VOID_PTR64) HDL_TO_PTR(p_cm_req->h_qp->obj.hdl); + ioctl.in.cm_req.h_qp = (ib_qp_handle_t) HDL_TO_PTR(p_cm_req->h_qp->obj.hdl); ioctl.in.paths[0] = *(p_cm_req->p_primary_path); if( p_cm_req->p_alt_path ) ioctl.in.paths[1] = *(p_cm_req->p_alt_path); @@ -651,7 +651,7 @@ al_cep_pre_rep( ioctl.in.context = context; ioctl.in.cid = cid; ioctl.in.cm_rep = *p_cm_rep; - ioctl.in.cm_rep.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(p_cm_rep->h_qp->obj.hdl); + ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)HDL_TO_PTR(p_cm_rep->h_qp->obj.hdl); /* Copy private data, if any. */ if( p_cm_rep->p_rep_pdata ) { @@ -1010,7 +1010,7 @@ al_cep_lap( cl_memclr(&ioctl,sizeof (ioctl)); ioctl.cid = cid; ioctl.cm_lap = *p_cm_lap; - ioctl.cm_lap.h_qp = (ib_qp_handle_t VOID_PTR64) HDL_TO_PTR(p_cm_lap->h_qp->obj.hdl); + ioctl.cm_lap.h_qp = (ib_qp_handle_t) HDL_TO_PTR(p_cm_lap->h_qp->obj.hdl); ioctl.alt_path = *(p_cm_lap->p_alt_path); /* Copy private data, if any. */ if( p_cm_lap->p_lap_pdata ) @@ -1066,7 +1066,7 @@ al_cep_pre_apr( cl_memclr(&ioctl, sizeof (ioctl)); ioctl.in.cid = cid; ioctl.in.cm_apr = *p_cm_apr; - ioctl.in.cm_apr.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(p_cm_apr->h_qp->obj.hdl); + ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)HDL_TO_PTR(p_cm_apr->h_qp->obj.hdl); if( p_cm_apr->p_info ) { if( p_cm_apr->info_length > IB_APR_INFO_SIZE ) diff --git a/trunk/core/al/user/ual_mcast.c b/trunk/core/al/user/ual_mcast.c index cda009e8..01253bd3 100644 --- a/trunk/core/al/user/ual_mcast.c +++ b/trunk/core/al/user/ual_mcast.c @@ -99,7 +99,7 @@ ual_attach_mcast( status = ioctl_buf.out.status; if( status == IB_SUCCESS ){ h_mcast->obj.hdl = ioctl_buf.out.h_attach; - h_mcast->h_ci_mcast = (ib_mcast_handle_t VOID_PTR64) HDL_TO_PTR(ioctl_buf.out.h_attach); + h_mcast->h_ci_mcast = (ib_mcast_handle_t) HDL_TO_PTR(ioctl_buf.out.h_attach); } } diff --git a/trunk/core/al/user/ual_mgr.c b/trunk/core/al/user/ual_mgr.c index ea7b04f5..e5f7c901 100644 --- a/trunk/core/al/user/ual_mgr.c +++ b/trunk/core/al/user/ual_mgr.c @@ -646,7 +646,7 @@ __process_misc_cb( /* We got a send completion. */ ib_mad_element_t *p_element; - ib_mad_svc_handle_t VOID_PTR64 h_mad_svc = (ib_mad_svc_handle_t VOID_PTR64) + ib_mad_svc_handle_t h_mad_svc = (ib_mad_svc_handle_t) p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context; /* Copy the data to the user's element. */ @@ -682,7 +682,7 @@ __process_misc_cb( ib_mad_t *p_mad_buf = NULL; ib_grh_t *p_grh = NULL; - h_mad_svc = (ib_mad_svc_handle_t VOID_PTR64) + h_mad_svc = (ib_mad_svc_handle_t) p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context; p_send_mad = diff --git a/trunk/core/al/user/ual_mw.c b/trunk/core/al/user/ual_mw.c index 03ba1d76..0d5dc23c 100644 --- a/trunk/core/al/user/ual_mw.c +++ b/trunk/core/al/user/ual_mw.c @@ -280,7 +280,7 @@ ual_bind_mw( mw_ioctl.in.h_mw = h_mw->obj.hdl; mw_ioctl.in.h_qp = h_qp->obj.hdl; mw_ioctl.in.mw_bind = *p_mw_bind; - mw_ioctl.in.mw_bind.h_mr = (ib_mr_handle_t VOID_PTR64) HDL_TO_PTR(p_mw_bind->h_mr->obj.hdl); + mw_ioctl.in.mw_bind.h_mr = (ib_mr_handle_t) HDL_TO_PTR(p_mw_bind->h_mr->obj.hdl); cl_status = do_al_dev_ioctl( UAL_BIND_MW, &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out), diff --git a/trunk/core/al/user/ual_qp.c b/trunk/core/al/user/ual_qp.c index 4cb94c13..b7996b84 100644 --- a/trunk/core/al/user/ual_qp.c +++ b/trunk/core/al/user/ual_qp.c @@ -314,12 +314,12 @@ ual_create_qp( qp_ioctl.in.h_pd = h_pd->obj.hdl; qp_ioctl.in.qp_create = *p_qp_create; qp_ioctl.in.qp_create.h_rq_cq = - (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_rq_cq->obj.hdl); + (ib_cq_handle_t)HDL_TO_PTR(p_qp_create->h_rq_cq->obj.hdl); qp_ioctl.in.qp_create.h_sq_cq = - (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_sq_cq->obj.hdl); + (ib_cq_handle_t)HDL_TO_PTR(p_qp_create->h_sq_cq->obj.hdl); if (p_qp_create->h_srq) qp_ioctl.in.qp_create.h_srq = - (ib_srq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_srq->obj.hdl); + (ib_srq_handle_t)HDL_TO_PTR(p_qp_create->h_srq->obj.hdl); qp_ioctl.in.context = h_qp; qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE; diff --git a/trunk/hw/mlx4/kernel/hca/ca.c b/trunk/hw/mlx4/kernel/hca/ca.c index f8b1419a..8ce4413c 100644 --- a/trunk/hw/mlx4/kernel/hca/ca.c +++ b/trunk/hw/mlx4/kernel/hca/ca.c @@ -1,422 +1,422 @@ -/* - * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. - * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. - * - * This software is available to you under the OpenIB.org BSD license - * below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $ - */ - -#include "precomp.h" - -#if defined(EVENT_TRACING) -#ifdef offsetof -#undef offsetof -#endif -#include "ca.tmh" -#endif - -ib_api_status_t -mlnx_open_ca ( - IN const ib_net64_t ca_guid, // IN const char * ca_name, - IN const ci_completion_cb_t pfn_completion_cb, - IN const ci_async_event_cb_t pfn_async_event_cb, - IN const void*const ca_context, - OUT ib_ca_handle_t *ph_ca) -{ - mlnx_hca_t *p_hca; - ib_api_status_t status = IB_NOT_FOUND; - struct ib_device *p_ibdev; - - HCA_ENTER(HCA_DBG_SHIM); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, - ("context 0x%p\n", ca_context)); - - // find CA object - p_hca = mlnx_hca_from_guid( ca_guid ); - if( !p_hca ) { - if (status != IB_SUCCESS) - { - HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, - ("completes with ERROR status IB_NOT_FOUND\n")); - } - HCA_EXIT(HCA_DBG_SHIM); - return IB_NOT_FOUND; - } - - p_ibdev = hca2ibdev(p_hca); - - if (hca_is_livefish(hca2fdo(p_hca))) - goto done; - - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, - ("context 0x%p\n", ca_context)); - status = mlnx_set_cb(p_hca, - pfn_completion_cb, - pfn_async_event_cb, - ca_context); - if (IB_SUCCESS != status) { - goto err_set_cb; - } - - - //TODO: do we need something for kernel users ? - - // Return pointer to HCA object -done: - if (ph_ca) *ph_ca = (ib_ca_handle_t)p_hca; - status = IB_SUCCESS; - -//err_mad_cache: -err_set_cb: - if (status != IB_SUCCESS) - { - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("completes with ERROR status %x\n", status)); - } - HCA_EXIT(HCA_DBG_SHIM); - return status; -} - -ib_api_status_t -mlnx_query_ca ( - IN const ib_ca_handle_t h_ca, - OUT ib_ca_attr_t *p_ca_attr, - IN OUT uint32_t *p_byte_count, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - int i; - int err; - ib_api_status_t status; - uint32_t size, required_size; - int port_num, num_ports; - uint32_t num_gids, num_pkeys; - uint32_t num_page_sizes = 1; // TBD: what is actually supported - uint8_t *last_p; - struct ib_device_attr props; - struct ib_port_attr *hca_ports = NULL; - mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; - struct ib_device *p_ibdev = hca2ibdev(p_hca); - - - HCA_ENTER(HCA_DBG_SHIM); - - // sanity checks - if( p_umv_buf && p_umv_buf->command ) { - HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n")); - p_umv_buf->status = status = IB_UNSUPPORTED; - goto err_user_unsupported; - } - - if( !cl_is_blockable() ) { - status = IB_UNSUPPORTED; - goto err_unsupported; - } - - if (NULL == p_byte_count) { - status = IB_INVALID_PARAMETER; - goto err_byte_count; - } - - // query the device - if ( hca_is_livefish(hca2fdo(p_hca)) ) { - struct pci_dev *pdev = hca2pdev(p_hca); - props.max_pd = 1; - props.vendor_id = pdev->ven_id; - props.vendor_part_id = pdev->dev_id; - err = 0; - } - else - err = p_ibdev->query_device(p_ibdev, &props); - if (err) { - HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, - ("ib_query_device failed (%d)\n",err)); - status = errno_to_iberr(err); - goto err_query_device; - } - - // alocate arrary for port properties - num_ports = p_ibdev->phys_port_cnt; /* Number of physical ports of the HCA */ - if ( num_ports ) - if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) { - HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n")); - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_ports; - } - - // start calculation of ib_ca_attr_t full size - num_gids = 0; - num_pkeys = 0; - required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + - PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) + - PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+ - PTR_ALIGN(MLX4_BOARD_ID_LEN)+ - PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ - - // get port properties - for (port_num = 0; port_num <= (end_port(p_ibdev) - start_port(p_ibdev)); ++port_num) { - // request - err = p_ibdev->query_port(p_ibdev, (u8)(port_num + start_port(p_ibdev)), &hca_ports[port_num]); - if (err) { - HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num)); - status = errno_to_iberr(err); - goto err_query_port; - } - - // calculate GID table size - num_gids = hca_ports[port_num].gid_tbl_len; - size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); - required_size += size; - - // calculate pkeys table size - num_pkeys = hca_ports[port_num].pkey_tbl_len; - size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys); - required_size += size; - } - - // resource sufficience check - if (NULL == p_ca_attr || *p_byte_count < required_size) { - *p_byte_count = required_size; - status = IB_INSUFFICIENT_MEMORY; - if ( p_ca_attr != NULL) { - HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size )); - } - goto err_insuff_mem; - } - - // Space is sufficient - setup table pointers - last_p = (uint8_t*)p_ca_attr; - last_p += PTR_ALIGN(sizeof(*p_ca_attr)); - - p_ca_attr->p_page_size = (uint32_t*)last_p; - last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t)); - - p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; - last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); - - for (port_num = 0; port_num < num_ports; port_num++) { - p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; - size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); - last_p += size; - - p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p; - size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len); - last_p += size; - } - - //copy vendor specific data - cl_memcpy(last_p,hca2mdev(p_hca)->board_id, MLX4_BOARD_ID_LEN); - last_p += PTR_ALIGN(MLX4_BOARD_ID_LEN); - *(uplink_info_t*)last_p = hca2pdev(p_hca)->uplink_info; - last_p += PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ - - // Separate the loops to ensure that table pointers are always setup - for (port_num = 0; port_num < num_ports; port_num++) { - - // get pkeys, using cache - for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) { - err = p_ibdev->x.get_cached_pkey( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, - &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] ); - if (err) { - status = errno_to_iberr(err); - HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n", - err, port_num + start_port(p_ibdev), i)); - goto err_get_pkey; - } - } - - // get gids, using cache - for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) { - union ib_gid * VOID_PTR64 gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; - err = p_ibdev->x.get_cached_gid( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, (union ib_gid *)gid ); - //TODO: do we need to convert gids to little endian - if (err) { - status = errno_to_iberr(err); - HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, - ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n", - err, port_num + start_port(p_ibdev), i)); - goto err_get_gid; - } - } - - HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num)); - HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM, - (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14], - p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15])); - } - - // set result size - p_ca_attr->size = required_size; - CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); - HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n", - required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) )); - - // !!! GID/PKEY tables must be queried before this call !!! - from_hca_cap(p_ibdev, &props, hca_ports, p_ca_attr); - - status = IB_SUCCESS; - -err_get_gid: -err_get_pkey: -err_insuff_mem: -err_query_port: - if (hca_ports) - cl_free(hca_ports); -err_alloc_ports: -err_query_device: -err_byte_count: -err_unsupported: -err_user_unsupported: - if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS ) - HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, - ("completes with ERROR status %x\n", status)); - HCA_EXIT(HCA_DBG_SHIM); - return status; -} - -ib_api_status_t -mlnx_modify_ca ( - IN const ib_ca_handle_t h_ca, - IN const uint8_t port_num, - IN const ib_ca_mod_t modca_cmd, - IN const ib_port_attr_mod_t *p_port_attr) -{ -#define SET_CAP_MOD(al_mask, al_fld, ib) \ - if (modca_cmd & al_mask) { \ - if (p_port_attr->cap.##al_fld) \ - props.set_port_cap_mask |= ib; \ - else \ - props.clr_port_cap_mask |= ib; \ - } - - ib_api_status_t status; - int err; - struct ib_port_modify props; - int port_modify_mask = 0; - mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; - struct ib_device *p_ibdev = hca2ibdev(p_hca); - - HCA_ENTER(HCA_DBG_SHIM); - - //sanity check - if( !cl_is_blockable() ) { - status = IB_UNSUPPORTED; - goto err_unsupported; - } - - if (port_num < start_port(p_ibdev) || port_num > end_port(p_ibdev)) { - status = IB_INVALID_PORT; - goto err_port; - } - - // prepare parameters - RtlZeroMemory(&props, sizeof(props)); - SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM); - SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP); - SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP); - SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP); - if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) - port_modify_mask |= IB_PORT_RESET_QKEY_CNTR; - - // modify port - err = p_ibdev->modify_port(p_ibdev, port_num, port_modify_mask, &props ); - if (err) { - status = errno_to_iberr(err); - HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("ib_modify_port failed (%d) \n",err)); - goto err_modify_port; - } - - status = IB_SUCCESS; - -err_modify_port: -err_port: -err_unsupported: - if (status != IB_SUCCESS) - { - HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, - ("completes with ERROR status %x\n", status)); - } - HCA_EXIT(HCA_DBG_SHIM); - return status; -} - -ib_api_status_t -mlnx_close_ca ( - IN ib_ca_handle_t h_ca) -{ - mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; - HCA_ENTER(HCA_DBG_SHIM); - - - if (hca_is_livefish(hca2fdo(p_hca))) - goto done; - - mlnx_reset_cb(p_hca); - -done: - HCA_EXIT(HCA_DBG_SHIM); - - return IB_SUCCESS; -} - - - -void -mlnx_ca_if( - IN OUT ci_interface_t *p_interface ) -{ - p_interface->open_ca = mlnx_open_ca; - p_interface->modify_ca = mlnx_modify_ca; - p_interface->query_ca = mlnx_query_ca; - p_interface->close_ca = mlnx_close_ca; -} - -void -mlnx_ca_if_livefish( - IN OUT ci_interface_t *p_interface ) -{ - p_interface->open_ca = mlnx_open_ca; - p_interface->query_ca = mlnx_query_ca; - p_interface->close_ca = mlnx_close_ca; -} - +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $ + */ + +#include "precomp.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ca.tmh" +#endif + +ib_api_status_t +mlnx_open_ca ( + IN const ib_net64_t ca_guid, // IN const char * ca_name, + IN const ci_completion_cb_t pfn_completion_cb, + IN const ci_async_event_cb_t pfn_async_event_cb, + IN const void*const ca_context, + OUT ib_ca_handle_t *ph_ca) +{ + mlnx_hca_t *p_hca; + ib_api_status_t status = IB_NOT_FOUND; + struct ib_device *p_ibdev; + + HCA_ENTER(HCA_DBG_SHIM); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, + ("context 0x%p\n", ca_context)); + + // find CA object + p_hca = mlnx_hca_from_guid( ca_guid ); + if( !p_hca ) { + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("completes with ERROR status IB_NOT_FOUND\n")); + } + HCA_EXIT(HCA_DBG_SHIM); + return IB_NOT_FOUND; + } + + p_ibdev = hca2ibdev(p_hca); + + if (hca_is_livefish(hca2fdo(p_hca))) + goto done; + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, + ("context 0x%p\n", ca_context)); + status = mlnx_set_cb(p_hca, + pfn_completion_cb, + pfn_async_event_cb, + ca_context); + if (IB_SUCCESS != status) { + goto err_set_cb; + } + + + //TODO: do we need something for kernel users ? + + // Return pointer to HCA object +done: + if (ph_ca) *ph_ca = (ib_ca_handle_t)p_hca; + status = IB_SUCCESS; + +//err_mad_cache: +err_set_cb: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("completes with ERROR status %x\n", status)); + } + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_query_ca ( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t *p_ca_attr, + IN OUT uint32_t *p_byte_count, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int i; + int err; + ib_api_status_t status; + uint32_t size, required_size; + int port_num, num_ports; + uint32_t num_gids, num_pkeys; + uint32_t num_page_sizes = 1; // TBD: what is actually supported + uint8_t *last_p; + struct ib_device_attr props; + struct ib_port_attr *hca_ports = NULL; + mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; + struct ib_device *p_ibdev = hca2ibdev(p_hca); + + + HCA_ENTER(HCA_DBG_SHIM); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n")); + p_umv_buf->status = status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + if (NULL == p_byte_count) { + status = IB_INVALID_PARAMETER; + goto err_byte_count; + } + + // query the device + if ( hca_is_livefish(hca2fdo(p_hca)) ) { + struct pci_dev *pdev = hca2pdev(p_hca); + props.max_pd = 1; + props.vendor_id = pdev->ven_id; + props.vendor_part_id = pdev->dev_id; + err = 0; + } + else + err = p_ibdev->query_device(p_ibdev, &props); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("ib_query_device failed (%d)\n",err)); + status = errno_to_iberr(err); + goto err_query_device; + } + + // alocate arrary for port properties + num_ports = p_ibdev->phys_port_cnt; /* Number of physical ports of the HCA */ + if ( num_ports ) + if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n")); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_ports; + } + + // start calculation of ib_ca_attr_t full size + num_gids = 0; + num_pkeys = 0; + required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + + PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) + + PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+ + PTR_ALIGN(MLX4_BOARD_ID_LEN)+ + PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ + + // get port properties + for (port_num = 0; port_num <= (end_port(p_ibdev) - start_port(p_ibdev)); ++port_num) { + // request + err = p_ibdev->query_port(p_ibdev, (u8)(port_num + start_port(p_ibdev)), &hca_ports[port_num]); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num)); + status = errno_to_iberr(err); + goto err_query_port; + } + + // calculate GID table size + num_gids = hca_ports[port_num].gid_tbl_len; + size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); + required_size += size; + + // calculate pkeys table size + num_pkeys = hca_ports[port_num].pkey_tbl_len; + size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys); + required_size += size; + } + + // resource sufficience check + if (NULL == p_ca_attr || *p_byte_count < required_size) { + *p_byte_count = required_size; + status = IB_INSUFFICIENT_MEMORY; + if ( p_ca_attr != NULL) { + HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size )); + } + goto err_insuff_mem; + } + + // Space is sufficient - setup table pointers + last_p = (uint8_t*)p_ca_attr; + last_p += PTR_ALIGN(sizeof(*p_ca_attr)); + + p_ca_attr->p_page_size = (uint32_t*)last_p; + last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t)); + + p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; + last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); + + for (port_num = 0; port_num < num_ports; port_num++) { + p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; + size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); + last_p += size; + + p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p; + size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len); + last_p += size; + } + + //copy vendor specific data + cl_memcpy(last_p,hca2mdev(p_hca)->board_id, MLX4_BOARD_ID_LEN); + last_p += PTR_ALIGN(MLX4_BOARD_ID_LEN); + *(uplink_info_t*)last_p = hca2pdev(p_hca)->uplink_info; + last_p += PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ + + // Separate the loops to ensure that table pointers are always setup + for (port_num = 0; port_num < num_ports; port_num++) { + + // get pkeys, using cache + for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) { + err = p_ibdev->x.get_cached_pkey( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, + &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] ); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(p_ibdev), i)); + goto err_get_pkey; + } + } + + // get gids, using cache + for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) { + union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; + err = p_ibdev->x.get_cached_gid( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, (union ib_gid *)gid ); + //TODO: do we need to convert gids to little endian + if (err) { + status = errno_to_iberr(err); + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(p_ibdev), i)); + goto err_get_gid; + } + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num)); + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM, + (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15])); + } + + // set result size + p_ca_attr->size = required_size; + CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n", + required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) )); + + // !!! GID/PKEY tables must be queried before this call !!! + from_hca_cap(p_ibdev, &props, hca_ports, p_ca_attr); + + status = IB_SUCCESS; + +err_get_gid: +err_get_pkey: +err_insuff_mem: +err_query_port: + if (hca_ports) + cl_free(hca_ports); +err_alloc_ports: +err_query_device: +err_byte_count: +err_unsupported: +err_user_unsupported: + if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS ) + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("completes with ERROR status %x\n", status)); + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_modify_ca ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t *p_port_attr) +{ +#define SET_CAP_MOD(al_mask, al_fld, ib) \ + if (modca_cmd & al_mask) { \ + if (p_port_attr->cap.##al_fld) \ + props.set_port_cap_mask |= ib; \ + else \ + props.clr_port_cap_mask |= ib; \ + } + + ib_api_status_t status; + int err; + struct ib_port_modify props; + int port_modify_mask = 0; + mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; + struct ib_device *p_ibdev = hca2ibdev(p_hca); + + HCA_ENTER(HCA_DBG_SHIM); + + //sanity check + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + if (port_num < start_port(p_ibdev) || port_num > end_port(p_ibdev)) { + status = IB_INVALID_PORT; + goto err_port; + } + + // prepare parameters + RtlZeroMemory(&props, sizeof(props)); + SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM); + SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP); + if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) + port_modify_mask |= IB_PORT_RESET_QKEY_CNTR; + + // modify port + err = p_ibdev->modify_port(p_ibdev, port_num, port_modify_mask, &props ); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("ib_modify_port failed (%d) \n",err)); + goto err_modify_port; + } + + status = IB_SUCCESS; + +err_modify_port: +err_port: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("completes with ERROR status %x\n", status)); + } + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_close_ca ( + IN ib_ca_handle_t h_ca) +{ + mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca; + HCA_ENTER(HCA_DBG_SHIM); + + + if (hca_is_livefish(hca2fdo(p_hca))) + goto done; + + mlnx_reset_cb(p_hca); + +done: + HCA_EXIT(HCA_DBG_SHIM); + + return IB_SUCCESS; +} + + + +void +mlnx_ca_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->open_ca = mlnx_open_ca; + p_interface->modify_ca = mlnx_modify_ca; + p_interface->query_ca = mlnx_query_ca; + p_interface->close_ca = mlnx_close_ca; +} + +void +mlnx_ca_if_livefish( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->open_ca = mlnx_open_ca; + p_interface->query_ca = mlnx_query_ca; + p_interface->close_ca = mlnx_close_ca; +} + diff --git a/trunk/hw/mlx4/user/hca/verbs.c b/trunk/hw/mlx4/user/hca/verbs.c index 9a16c634..be059099 100644 --- a/trunk/hw/mlx4/user/hca/verbs.c +++ b/trunk/hw/mlx4/user/hca/verbs.c @@ -1,1615 +1,1615 @@ -/* - * Copyright (c) 2007 Cisco, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "mlx4.h" -#include "verbs.h" -#include "mx_abi.h" -#include "wqe.h" -#include "mlx4_debug.h" - -#if defined(EVENT_TRACING) -#include "verbs.tmh" -#endif - -ib_api_status_t -mlx4_pre_open_ca ( - IN const ib_net64_t ca_guid, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_ca_handle_t *ph_uvp_ca ) -{ - struct ibv_context *context; - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(ca_guid); - - context = mlx4_alloc_context(); - if (!context) { - status = IB_INSUFFICIENT_MEMORY; - goto end; - } - - if( p_umv_buf ) - { - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto end; - } - } - p_umv_buf->input_size = 0; - p_umv_buf->output_size = sizeof(struct ibv_get_context_resp); - p_umv_buf->command = TRUE; - } - - *ph_uvp_ca = (ib_ca_handle_t)context; - -end: - return status; -} - -ib_api_status_t -mlx4_post_open_ca ( - IN const ib_net64_t ca_guid, - IN ib_api_status_t ioctl_status, - IN OUT ib_ca_handle_t *ph_uvp_ca, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_get_context_resp *p_resp; - struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca; - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(ca_guid); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - if (!mlx4_fill_context(context, p_resp)) - { - status = IB_INSUFFICIENT_RESOURCES; - goto end; - } - } - -end: - cl_free(p_resp); - return status; -} - -ib_api_status_t -mlx4_pre_query_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_ca_attr_t *p_ca_attr, - IN size_t byte_count, - IN ci_umv_buf_t *p_umv_buf ) -{ - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(h_uvp_ca); - - /* Note that query_ca calls *always* get their attributes from the kernel. - * - * Assume if user buffer is valid then byte_cnt is valid too - * so we can preallocate ca attr buffer for post ioctl data saving - * - * Note that we squirrel the buffer away into the umv_buf and only - * set it into the HCA if the query is successful. - */ - if ( p_ca_attr != NULL ) - { - p_umv_buf->p_inout_buf = cl_malloc(byte_count); - if ( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_RESOURCES; - goto end; - } - } - -end: - return status; -} - -void -__fixup_ca_attr( - IN ib_ca_attr_t* const p_dest, - IN const ib_ca_attr_t* const p_src ) -{ - uint8_t i; - uintn_t offset = (uintn_t)p_dest - (uintn_t)p_src; - ib_port_attr_t *p_tmp_port_attr = NULL; - - CL_ASSERT( p_dest ); - CL_ASSERT( p_src ); - - /* Fix up the pointers to point within the destination buffer. */ - p_dest->p_page_size = - (uint32_t* VOID_PTR64)(((uint8_t* VOID_PTR64)p_dest->p_page_size) + offset); - - p_tmp_port_attr = - (ib_port_attr_t* VOID_PTR64)(((uint8_t* VOID_PTR64)p_dest->p_port_attr) + offset); - - /* Fix up each port attribute's gid and pkey table pointers. */ - for( i = 0; i < p_dest->num_ports; i++ ) - { - p_tmp_port_attr[i].p_gid_table = (ib_gid_t* VOID_PTR64) - (((uint8_t* VOID_PTR64)p_tmp_port_attr[i].p_gid_table) + offset); - - p_tmp_port_attr[i].p_pkey_table =(ib_net16_t* VOID_PTR64) - (((uint8_t* VOID_PTR64)p_tmp_port_attr[i].p_pkey_table) + offset); - } - p_dest->p_port_attr = p_tmp_port_attr; -} - -void -mlx4_post_query_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN ib_ca_attr_t *p_ca_attr, - IN size_t byte_count, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_context *context = (struct ibv_context *)h_uvp_ca; - - CL_ASSERT(context && p_umv_buf); - - if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count) - { - CL_ASSERT( byte_count >= p_ca_attr->size ); - - pthread_mutex_lock(&context->mutex); - - if (context->p_hca_attr) - cl_free(context->p_hca_attr); - context->p_hca_attr = p_umv_buf->p_inout_buf; - cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size ); - __fixup_ca_attr( context->p_hca_attr, p_ca_attr ); - - pthread_mutex_unlock(&context->mutex); - } - else if (p_umv_buf->p_inout_buf) - { - cl_free(p_umv_buf->p_inout_buf); - } -} - -ib_api_status_t -mlx4_post_close_ca ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_context *context = (struct ibv_context *)h_uvp_ca; - - CL_ASSERT(context); - - if (IB_SUCCESS == ioctl_status) - mlx4_free_context(context); - - return IB_SUCCESS; -} - -ib_api_status_t -mlx4_pre_alloc_pd ( - IN const ib_ca_handle_t h_uvp_ca, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_pd_handle_t *ph_uvp_pd ) -{ - struct mlx4_pd *pd; - struct ibv_context *context = (struct ibv_context *)h_uvp_ca; - ib_api_status_t status = IB_SUCCESS; - - CL_ASSERT(context && p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto end; - } - } - p_umv_buf->input_size = 0; - p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp); - p_umv_buf->command = TRUE; - - // Mlx4 code: - - pd = cl_malloc(sizeof *pd); - if (!pd) { - status = IB_INSUFFICIENT_MEMORY; - goto end; - } - - pd->ibv_pd.context = context; - - *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd; - -end: - return status; -} - -void -mlx4_post_alloc_pd ( - IN ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN OUT ib_pd_handle_t *ph_uvp_pd, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_pd *pd = (struct ibv_pd *)*ph_uvp_pd; - struct ibv_alloc_pd_resp *p_resp; - - - UNREFERENCED_PARAMETER(h_uvp_ca); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - pd->handle = p_resp->pd_handle; - to_mpd(pd)->pdn = p_resp->pdn; - } - else - { - cl_free(to_mpd(pd)); - } - - cl_free(p_resp); - return; -} - -void -mlx4_post_free_pd ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - - CL_ASSERT(pd); - - if (IB_SUCCESS == ioctl_status) - cl_free(to_mpd(pd)); -} - -static int __align_queue_size(int req) -{ - int nent; - - for (nent = 1; nent < req; nent <<= 1) - ; /* nothing */ - - return nent; -} - -ib_api_status_t -mlx4_pre_create_cq ( - IN const ib_ca_handle_t h_uvp_ca, - IN OUT uint32_t* const p_size, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_cq_handle_t *ph_uvp_cq ) -{ - struct mlx4_cq *cq; - struct ibv_create_cq *p_create_cq; - struct ibv_context *context = (struct ibv_context *)h_uvp_ca; - ib_api_status_t status = IB_SUCCESS; - int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) ); - - CL_ASSERT(h_uvp_ca && p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc( size ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_umv_buf; - } - } - p_umv_buf->input_size = sizeof(struct ibv_create_cq); - p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); - p_umv_buf->command = TRUE; - - p_create_cq = p_umv_buf->p_inout_buf; - - // Mlx4 code: - - /* Sanity check CQ size before proceeding */ - if (*p_size > 0x3fffff) { - status = IB_INVALID_CQ_SIZE; - goto err_cqe_size; - } - - cq = cl_malloc(sizeof *cq); - if (!cq) { - status = IB_INSUFFICIENT_MEMORY; - goto err_cq; - } - - if (cl_spinlock_init(&cq->lock)) { - status = IB_INSUFFICIENT_MEMORY; - goto err_lock; - } - - *p_size = __align_queue_size(*p_size + 1); - - if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, - context->page_size)) - goto err_alloc_buf; - - cq->ibv_cq.context = context; - cq->cons_index = 0; - - cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ); - if (!cq->set_ci_db) - goto err_alloc_db; - - cq->arm_db = cq->set_ci_db + 1; - *cq->arm_db = 0; - cq->arm_sn = 1; - *cq->set_ci_db = 0; - - p_create_cq->buf_addr = (uintptr_t) cq->buf.buf; - p_create_cq->db_addr = (uintptr_t) cq->set_ci_db; - p_create_cq->arm_sn_addr = (uintptr_t) &cq->arm_sn; - p_create_cq->cqe = --(*p_size); - - *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq; - goto end; - -err_alloc_db: - mlx4_free_buf(&cq->buf); -err_alloc_buf: - cl_spinlock_destroy(&cq->lock); -err_lock: - cl_free(cq); -err_cq: -err_cqe_size: - cl_free(p_umv_buf->p_inout_buf); -err_umv_buf: -end: - return status; -} - -void -mlx4_post_create_cq ( - IN const ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN const uint32_t size, - IN OUT ib_cq_handle_t *ph_uvp_cq, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_cq *cq = (struct ibv_cq *)*ph_uvp_cq; - struct ibv_create_cq_resp *p_resp; - - UNREFERENCED_PARAMETER(h_uvp_ca); - UNREFERENCED_PARAMETER(size); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - to_mcq(cq)->cqn = p_resp->cqn; - cq->cqe = p_resp->cqe; - cq->handle = p_resp->cq_handle; - } - else - { - mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS); - } - - cl_free(p_resp); - return; -} - -ib_api_status_t -mlx4_pre_query_cq ( - IN const ib_cq_handle_t h_uvp_cq, - OUT uint32_t* const p_size, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq; - - UNREFERENCED_PARAMETER(p_umv_buf); - - *p_size = cq->cqe; - - return IB_VERBS_PROCESSING_DONE; -} - -void -mlx4_post_destroy_cq ( - IN const ib_cq_handle_t h_uvp_cq, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq; - - CL_ASSERT(cq); - - if (IB_SUCCESS == ioctl_status) { - mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db); - mlx4_free_buf(&to_mcq(cq)->buf); - - cl_spinlock_destroy(&to_mcq(cq)->lock); - cl_free(to_mcq(cq)); - } -} - -ib_api_status_t -mlx4_pre_create_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_srq_attr_t *p_srq_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_srq_handle_t *ph_uvp_srq ) -{ - struct mlx4_srq *srq; - struct ibv_create_srq *p_create_srq; - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - ib_api_status_t status = IB_SUCCESS; - size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); - - CL_ASSERT(p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc( size ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_memory; - } - } - p_umv_buf->input_size = sizeof(struct ibv_create_srq); - p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); - p_umv_buf->command = TRUE; - - p_create_srq = p_umv_buf->p_inout_buf; - - // Mlx4 code: - - /* Sanity check SRQ size before proceeding */ - if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) - { - status = IB_INVALID_PARAMETER; - goto err_params; - } - - srq = cl_malloc(sizeof *srq); - if (!srq) { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_srq; - } - - if (cl_spinlock_init(&srq->lock)) { - status = IB_INSUFFICIENT_MEMORY; - goto err_lock; - } - - srq->ibv_srq.pd = pd; - srq->ibv_srq.context = pd->context; - - srq->max = __align_queue_size(p_srq_attr->max_wr + 1); - srq->max_gs = p_srq_attr->max_sge; - srq->counter = 0; - - if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq)) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_buf; - } - - srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ); - if (!srq->db) - goto err_alloc_db; - - *srq->db = 0; - - // fill the parameters for ioctl - p_create_srq->buf_addr = (uintptr_t) srq->buf.buf; - p_create_srq->db_addr = (uintptr_t) srq->db; - p_create_srq->pd_handle = pd->handle; - p_create_srq->max_wr = p_srq_attr->max_wr; - p_create_srq->max_sge = p_srq_attr->max_sge; - p_create_srq->srq_limit = p_srq_attr->srq_limit; - - *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq; - goto end; - -err_alloc_db: - cl_free(srq->wrid); - mlx4_free_buf(&srq->buf); -err_alloc_buf: - cl_spinlock_destroy(&srq->lock); -err_lock: - cl_free(srq); -err_alloc_srq: - cl_free(p_umv_buf->p_inout_buf); -err_params: err_memory: -end: - return status; -} - -void -mlx4_post_create_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_srq_handle_t *ph_uvp_srq, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq; - struct mlx4_srq *srq = to_msrq(ibsrq); - struct ibv_create_srq_resp *p_resp; - - UNREFERENCED_PARAMETER(h_uvp_pd); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - srq->srqn = p_resp->srqn; - ibsrq->handle = p_resp->srq_handle; - - srq->max = p_resp->max_wr; - srq->max_gs = p_resp->max_sge; - } - else - { - mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS); - } - - cl_free(p_resp); - return; -} - -ib_api_status_t -mlx4_pre_destroy_srq ( - IN const ib_srq_handle_t h_uvp_srq ) -{ -#ifdef XRC_SUPPORT - struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq; - struct mlx4_srq *srq = to_msrq(ibsrq); - struct mlx4_cq *mcq = NULL; - - if (ibsrq->xrc_cq) - { - /* is an xrc_srq */ - mcq = to_mcq(ibsrq->xrc_cq); - mlx4_cq_clean(mcq, 0, srq); - cl_spinlock_acquire(&mcq->lock); - mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn); - cl_spinlock_release(&mcq->lock); - } -#else - UNUSED_PARAM(h_uvp_srq); -#endif - return IB_SUCCESS; -} - -void -mlx4_post_destroy_srq ( - IN const ib_srq_handle_t h_uvp_srq, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq; - struct mlx4_srq *srq = to_msrq(ibsrq); - - CL_ASSERT(srq); - - if (IB_SUCCESS == ioctl_status) - { - mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db); - cl_free(srq->wrid); - mlx4_free_buf(&srq->buf); - cl_spinlock_destroy(&srq->lock); - cl_free(srq); - } - else - { -#ifdef XRC_SUPPORT - if (ibsrq->xrc_cq) { - /* is an xrc_srq */ - struct mlx4_cq *mcq = to_mcq(ibsrq->xrc_cq); - cl_spinlock_acquire(&mcq->lock); - mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq); - cl_spinlock_release(&mcq->lock); - } -#endif - } -} - -static enum ibv_qp_type -__to_qp_type(ib_qp_type_t type) -{ - switch (type) { - case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC; - case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC; - case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD; -#ifdef XRC_SUPPORT - //case IB_QPT_XRC_CONN: return IBV_QPT_XRC; -#endif - default: return IBV_QPT_RC; - } -} - -ib_api_status_t -mlx4_pre_create_qp ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_qp_create_t *p_create_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_qp_handle_t *ph_uvp_qp ) -{ - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - struct mlx4_context *context = to_mctx(pd->context); - struct mlx4_qp *qp; - struct ibv_create_qp *p_create_qp; - struct ibv_qp_init_attr attr; - ib_api_status_t status = IB_SUCCESS; - int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) ); - - CL_ASSERT(p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc(size); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_memory; - } - } - p_umv_buf->input_size = sizeof(struct ibv_create_qp); - p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); - p_umv_buf->command = TRUE; - - p_create_qp = p_umv_buf->p_inout_buf; - - /* convert attributes */ - memset( &attr, 0, sizeof(attr) ); - attr.send_cq = (struct ibv_cq *)p_create_attr->h_sq_cq; - attr.recv_cq = (struct ibv_cq *)p_create_attr->h_rq_cq; - attr.srq = (struct ibv_srq*)p_create_attr->h_srq; - attr.cap.max_send_wr = p_create_attr->sq_depth; - attr.cap.max_recv_wr = p_create_attr->rq_depth; - attr.cap.max_send_sge = p_create_attr->sq_sge; - attr.cap.max_recv_sge = p_create_attr->rq_sge; - attr.cap.max_inline_data = p_create_attr->sq_max_inline; - attr.qp_type = __to_qp_type(p_create_attr->qp_type); - attr.sq_sig_all = p_create_attr->sq_signaled; - - // Mlx4 code: - - /* Sanity check QP size before proceeding */ - if (attr.cap.max_send_wr > (uint32_t) context->max_qp_wr || - attr.cap.max_recv_wr > (uint32_t) context->max_qp_wr || - attr.cap.max_send_sge > (uint32_t) context->max_sge || - attr.cap.max_recv_sge > (uint32_t) context->max_sge || - attr.cap.max_inline_data > 1024) - { - status = IB_INVALID_PARAMETER; - goto end; - } - - qp = cl_malloc(sizeof *qp); - if (!qp) { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_qp; - } - - mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp); - - /* - * We need to leave 2 KB + 1 WQE of headroom in the SQ to - * allow HW to prefetch. - */ - qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; - qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes); - qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr); - - if (attr.srq || attr.qp_type == IBV_QPT_XRC) - attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0; - else - { - if (attr.cap.max_recv_sge < 1) - attr.cap.max_recv_sge = 1; - if (attr.cap.max_recv_wr < 1) - attr.cap.max_recv_wr = 1; - } - - if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp)) - goto err_alloc_qp_buff; - - mlx4_init_qp_indices(qp); - - if (cl_spinlock_init(&qp->sq.lock)) { - status = IB_INSUFFICIENT_MEMORY; - goto err_spinlock_sq; - } - if (cl_spinlock_init(&qp->rq.lock)) { - status = IB_INSUFFICIENT_MEMORY; - goto err_spinlock_rq; - } - - // fill qp fields - if (!attr.srq && attr.qp_type != IBV_QPT_XRC) { - qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ); - if (!qp->db) { - status = IB_INSUFFICIENT_MEMORY; - goto err_db; - } - - *qp->db = 0; - } - if (attr.sq_sig_all) - qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE); - else - qp->sq_signal_bits = 0; - - // fill the rest of qp fields - qp->ibv_qp.pd = pd; - qp->ibv_qp.context= pd->context; - qp->ibv_qp.send_cq = attr.send_cq; - qp->ibv_qp.recv_cq = attr.recv_cq; - qp->ibv_qp.srq = attr.srq; - qp->ibv_qp.state = IBV_QPS_RESET; - qp->ibv_qp.qp_type = attr.qp_type; - - // fill request fields - p_create_qp->buf_addr = (uintptr_t) qp->buf.buf; - if (!attr.srq && attr.qp_type != IBV_QPT_XRC) - p_create_qp->db_addr = (uintptr_t) qp->db; - else - p_create_qp->db_addr = 0; - - p_create_qp->pd_handle = pd->handle; - p_create_qp->send_cq_handle = attr.send_cq->handle; - p_create_qp->recv_cq_handle = attr.recv_cq->handle; - p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ? - (attr.xrc_domain ? attr.xrc_domain->handle : 0) : - (attr.srq ? attr.srq->handle : 0); - - p_create_qp->max_send_wr = attr.cap.max_send_wr; - p_create_qp->max_recv_wr = attr.cap.max_recv_wr; - p_create_qp->max_send_sge = attr.cap.max_send_sge; - p_create_qp->max_recv_sge = attr.cap.max_recv_sge; - p_create_qp->max_inline_data = attr.cap.max_inline_data; - p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all; - p_create_qp->qp_type = attr.qp_type; - p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ? - !!attr.xrc_domain : !!attr.srq); - - p_create_qp->log_sq_stride = (uint8_t)qp->sq.wqe_shift; - for (p_create_qp->log_sq_bb_count = 0; - qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count; - ++p_create_qp->log_sq_bb_count) - ; /* nothing */ - p_create_qp->sq_no_prefetch = 0; - - *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp; - goto end; - -err_db: - cl_spinlock_destroy(&qp->rq.lock); -err_spinlock_rq: - cl_spinlock_destroy(&qp->sq.lock); -err_spinlock_sq: - cl_free(qp->sq.wrid); - if (qp->rq.wqe_cnt) - free(qp->rq.wrid); - mlx4_free_buf(&qp->buf); -err_alloc_qp_buff: - cl_free(qp); -err_alloc_qp: - cl_free(p_umv_buf->p_inout_buf); -err_memory: -end: - return status; -} - -ib_api_status_t -mlx4_post_create_qp ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_qp_handle_t *ph_uvp_qp, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct mlx4_qp *qp = (struct mlx4_qp *)*ph_uvp_qp; - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - struct ibv_context *context = pd->context; - struct ibv_create_qp_resp *p_resp; - ib_api_status_t status = IB_SUCCESS; - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - struct ibv_qp_cap cap; - - cap.max_recv_sge = p_resp->max_recv_sge; - cap.max_send_sge = p_resp->max_send_sge; - cap.max_recv_wr = p_resp->max_recv_wr; - cap.max_send_wr = p_resp->max_send_wr; - cap.max_inline_data = p_resp->max_inline_data; - - qp->ibv_qp.handle = p_resp->qp_handle; - qp->ibv_qp.qp_num = p_resp->qpn; - - qp->rq.wqe_cnt = cap.max_recv_wr; - qp->rq.max_gs = cap.max_recv_sge; - - /* adjust rq maxima to not exceed reported device maxima */ - cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr); - cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge); - - qp->rq.max_post = cap.max_recv_wr; - //qp->rq.max_gs = cap.max_recv_sge; - RIB : add this ? - mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type); - - qp->doorbell_qpn = cl_hton32(qp->ibv_qp.qp_num << 8); - - if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp)) - { - mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS); - status = IB_INSUFFICIENT_MEMORY; - } - MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, - ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", - qp->ibv_qp.qp_num, qp->buf.buf, qp->db, - qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); - } - else - { - mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS); - } - - cl_free(p_resp); - return status; -} - -ib_api_status_t -mlx4_pre_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN const ib_qp_mod_t *p_modify_attr, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(h_uvp_qp); - UNREFERENCED_PARAMETER(p_modify_attr); - - CL_ASSERT(p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp)); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_memory; - } - } - p_umv_buf->input_size = 0; - p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp); - p_umv_buf->command = TRUE; - -err_memory: - return status; -} - -void -mlx4_post_query_qp ( - IN ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status, - IN OUT ib_qp_attr_t *p_query_attr, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp; - - UNREFERENCED_PARAMETER(p_umv_buf); - - if(IB_SUCCESS == ioctl_status) - { - p_query_attr->sq_max_inline = qp->max_inline_data; - p_query_attr->sq_sge = qp->sq.max_gs; - p_query_attr->sq_depth = qp->sq.max_post; - p_query_attr->rq_sge = qp->rq.max_gs; - p_query_attr->rq_depth = qp->rq.max_post; - } -} - -void -mlx4_post_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_qp *qp = (struct ibv_qp *)h_uvp_qp; - struct ibv_modify_qp_resp *p_resp; - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - if (qp->state == IBV_QPS_RESET && - p_resp->attr_mask & IBV_QP_STATE && - p_resp->qp_state == IBV_QPS_INIT) - { - mlx4_qp_init_sq_ownership(to_mqp(qp)); - } - - if (p_resp->attr_mask & IBV_QP_STATE) { - qp->state = p_resp->qp_state; - } - - if (p_resp->attr_mask & IBV_QP_STATE && - p_resp->qp_state == IBV_QPS_RESET) - { - mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, - qp->srq ? to_msrq(qp->srq) : NULL); - if (qp->send_cq != qp->recv_cq) - mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); - - mlx4_init_qp_indices(to_mqp(qp)); - if (!qp->srq && qp->qp_type != IBV_QPT_XRC) - *to_mqp(qp)->db = 0; - } - } - - cl_free (p_resp); - return; -} - -static void -__mlx4_lock_cqs(struct ibv_qp *qp) -{ - struct mlx4_cq *send_cq = to_mcq(qp->send_cq); - struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq); - - if (send_cq == recv_cq) - cl_spinlock_acquire(&send_cq->lock); - else if (send_cq->cqn < recv_cq->cqn) { - cl_spinlock_acquire(&send_cq->lock); - cl_spinlock_acquire(&recv_cq->lock); - } else { - cl_spinlock_acquire(&recv_cq->lock); - cl_spinlock_acquire(&send_cq->lock); - } -} - -static void -__mlx4_unlock_cqs(struct ibv_qp *qp) -{ - struct mlx4_cq *send_cq = to_mcq(qp->send_cq); - struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq); - - if (send_cq == recv_cq) - cl_spinlock_release(&send_cq->lock); - else if (send_cq->cqn < recv_cq->cqn) { - cl_spinlock_release(&recv_cq->lock); - cl_spinlock_release(&send_cq->lock); - } else { - cl_spinlock_release(&send_cq->lock); - cl_spinlock_release(&recv_cq->lock); - } -} - -ib_api_status_t -mlx4_pre_destroy_qp ( - IN const ib_qp_handle_t h_uvp_qp ) -{ - struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp; - - mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, - qp->srq ? to_msrq(qp->srq) : NULL); - if (qp->send_cq != qp->recv_cq) - mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); - - __mlx4_lock_cqs(qp); - mlx4_clear_qp(to_mctx(qp->context), qp->qp_num); - __mlx4_unlock_cqs(qp); - - return IB_SUCCESS; -} - -void -mlx4_post_destroy_qp ( - IN const ib_qp_handle_t h_uvp_qp, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp; - struct mlx4_qp* qp = to_mqp(ibqp); - - CL_ASSERT(h_uvp_qp); - - if (IB_SUCCESS == ioctl_status) - { - if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC) - mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db); - - cl_spinlock_destroy(&qp->sq.lock); - cl_spinlock_destroy(&qp->rq.lock); - - MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, - ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, - qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); - cl_free(qp->sq.wrid); - if (qp->rq.wqe_cnt) - cl_free(qp->rq.wrid); - mlx4_free_buf(&qp->buf); - cl_free(qp); - } - else - { - __mlx4_lock_cqs(ibqp); - mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp); - __mlx4_unlock_cqs(ibqp); - } -} - -void -mlx4_nd_modify_qp ( - IN const ib_qp_handle_t h_uvp_qp, - OUT void** pp_outbuf, - OUT DWORD* p_size ) -{ - struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp; - - *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state; - *p_size = sizeof(ibv_qp->state); -} - -static ib_qp_state_t __from_qp_state(enum ibv_qp_state state) -{ - switch (state) { - case IBV_QPS_RESET: return IB_QPS_RESET; - case IBV_QPS_INIT: return IB_QPS_INIT; - case IBV_QPS_RTR: return IB_QPS_RTR; - case IBV_QPS_RTS: return IB_QPS_RTS; - case IBV_QPS_SQD: return IB_QPS_SQD; - case IBV_QPS_SQE: return IB_QPS_SQERR; - case IBV_QPS_ERR: return IB_QPS_ERROR; - default: return IB_QPS_TIME_WAIT; - }; -} - -uint32_t -mlx4_nd_get_qp_state ( - IN const ib_qp_handle_t h_uvp_qp ) -{ - struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp; - - return __from_qp_state(ibv_qp->state); -} - -static uint8_t -__gid_to_index_lookup ( - IN ib_ca_attr_t *p_ca_attr, - IN uint8_t port_num, - IN uint8_t *raw_gid ) -{ - ib_gid_t *p_gid_table = NULL; - uint8_t i, index = 0; - uint16_t num_gids; - - p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table; - CL_ASSERT (p_gid_table); - - num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids; - - for (i = 0; i < num_gids; i++) - { - if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16)) - { - index = i; - break; - } - } - return index; -} - -static enum ibv_rate __to_rate(uint8_t rate) -{ - if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS; - if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS; - if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS; - if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS; - if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS; - if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS; - if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS; - if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS; - if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS; - return IBV_RATE_MAX; -} - -inline void -__grh_get_ver_class_flow( - IN const ib_net32_t ver_class_flow, - OUT uint8_t* const p_ver OPTIONAL, - OUT uint8_t* const p_tclass OPTIONAL, - OUT net32_t* const p_flow_lbl OPTIONAL ) -{ - ib_net32_t tmp_ver_class_flow; - - tmp_ver_class_flow = cl_ntoh32( ver_class_flow ); - - if (p_ver) - *p_ver = (uint8_t)(tmp_ver_class_flow >> 28); - - if (p_tclass) - *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20); - - if (p_flow_lbl) - *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF )); -} - -static ib_api_status_t -__to_ah ( - IN ib_ca_attr_t *p_ca_attr, - IN const ib_av_attr_t *p_av_attr, - OUT struct ibv_ah_attr *p_attr ) -{ - if (p_av_attr->port_num == 0 || - p_av_attr->port_num > p_ca_attr->num_ports) { - MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV , - (" invalid port number specified (%d)\n",p_av_attr->port_num)); - return IB_INVALID_PORT; - } - - p_attr->port_num = p_av_attr->port_num; - p_attr->sl = p_av_attr->sl; - p_attr->dlid = cl_ntoh16 (p_av_attr->dlid); - p_attr->static_rate = __to_rate(p_av_attr->static_rate); - p_attr->src_path_bits = p_av_attr->path_bits; - - /* For global destination or Multicast address:*/ - if (p_av_attr->grh_valid) - { - p_attr->is_global = TRUE; - p_attr->grh.hop_limit = p_av_attr->grh.hop_limit; - __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL, - &p_attr->grh.traffic_class, &p_attr->grh.flow_label ); - p_attr->grh.sgid_index = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num, - (uint8_t *) p_av_attr->grh.src_gid.raw); - cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16); - } - else - { - p_attr->is_global = FALSE; - } - return IB_SUCCESS; -} - -static void -__set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr) -{ - ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24)); - ah->av.g_slid = attr->src_path_bits; - ah->av.dlid = cl_hton16(attr->dlid); - if (attr->static_rate) { - ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET); - /* XXX check rate cap? */ - } - ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28); - if (attr->is_global) - { - ah->av.g_slid |= 0x80; - ah->av.gid_index = attr->grh.sgid_index; - ah->av.hop_limit = attr->grh.hop_limit; - ah->av.sl_tclass_flowlabel |= - cl_hton32((attr->grh.traffic_class << 20) | - attr->grh.flow_label); - cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16); - } -} - -ib_api_status_t -mlx4_pre_create_ah ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_av_attr_t *p_av_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_av_handle_t *ph_uvp_av ) -{ - struct mlx4_ah *ah; - struct ibv_ah_attr attr; - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(p_umv_buf); - - if (pd->context->p_hca_attr == NULL) { - status = IB_ERROR; - goto end; - } - - ah = cl_malloc(sizeof *ah); - if (!ah) { - status = IB_INSUFFICIENT_MEMORY; - goto end; - } - - // sanity check - if (p_av_attr->port_num == 0 || - p_av_attr->port_num > pd->context->p_hca_attr->num_ports) - { - status = IB_INVALID_PORT; - goto end; - } - - // convert parameters - cl_memset(&attr, 0, sizeof(attr)); - status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr); - if (status) - goto end; - - ah->ibv_ah.pd = pd; - ah->ibv_ah.context = pd->context; - cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t)); - - cl_memset(&ah->av, 0, sizeof ah->av); - __set_av_params(ah, pd, &attr); - - *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah; - status = IB_VERBS_PROCESSING_DONE; - -end: - return status; -} - -ib_api_status_t -mlx4_pre_query_ah ( - IN const ib_av_handle_t h_uvp_av, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - UNREFERENCED_PARAMETER(h_uvp_av); - UNREFERENCED_PARAMETER(p_umv_buf); - - return IB_VERBS_PROCESSING_DONE; -} - -void -mlx4_post_query_ah ( - IN const ib_av_handle_t h_uvp_av, - IN ib_api_status_t ioctl_status, - IN OUT ib_av_attr_t *p_addr_vector, - IN OUT ib_pd_handle_t *ph_pd, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; - - UNREFERENCED_PARAMETER(p_umv_buf); - - CL_ASSERT(h_uvp_av && p_addr_vector); - - if (ioctl_status == IB_SUCCESS) - { - cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t)); - if (ph_pd) - *ph_pd = (ib_pd_handle_t)ah->pd; - } -} - -ib_api_status_t -mlx4_pre_modify_ah ( - IN const ib_av_handle_t h_uvp_av, - IN const ib_av_attr_t *p_addr_vector, - IN OUT ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; - struct ibv_ah_attr attr; - ib_api_status_t status; - - UNREFERENCED_PARAMETER(p_umv_buf); - - CL_ASSERT (h_uvp_av); - - status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr); - if (status) - return status; - - __set_av_params(to_mah(ah), ah->pd, &attr); - cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t)); - - return IB_VERBS_PROCESSING_DONE; -} - -ib_api_status_t -mlx4_pre_destroy_ah ( - IN const ib_av_handle_t h_uvp_av ) -{ - struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; - - CL_ASSERT(ah); - - cl_free(to_mah(ah)); - - return IB_VERBS_PROCESSING_DONE; -} - -#ifdef XRC_SUPPORT -ib_api_status_t -mlx4_pre_create_xrc_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN const ib_xrcd_handle_t h_uvp_xrcd, - IN const ib_srq_attr_t *p_srq_attr, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_srq_handle_t *ph_uvp_srq ) -{ - struct mlx4_srq *srq; - struct ibv_create_srq *p_create_srq; - struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; - struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd; - ib_api_status_t status = IB_SUCCESS; - size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); - - CL_ASSERT(p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc( size ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_memory; - } - } - p_umv_buf->input_size = sizeof(struct ibv_create_srq); - p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); - p_umv_buf->command = TRUE; - - p_create_srq = p_umv_buf->p_inout_buf; - - // Mlx4 code: - - /* Sanity check SRQ size before proceeding */ - if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) - { - status = IB_INVALID_PARAMETER; - goto err_params; - } - - srq = cl_malloc(sizeof *srq); - if (!srq) { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_srq; - } - - if (cl_spinlock_init(&srq->lock)) { - status = IB_INSUFFICIENT_MEMORY; - goto err_lock; - } - - srq->ibv_srq.pd = pd; - srq->ibv_srq.context = pd->context; - - srq->max = __align_queue_size(p_srq_attr->max_wr + 1); - srq->max_gs = p_srq_attr->max_sge; - srq->counter = 0; - - if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq)) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_buf; - } - - srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ); - if (!srq->db) - goto err_alloc_db; - - *srq->db = 0; - - // fill the parameters for ioctl - p_create_srq->buf_addr = (uintptr_t) srq->buf.buf; - p_create_srq->db_addr = (uintptr_t) srq->db; - p_create_srq->pd_handle = pd->handle; - p_create_srq->max_wr = p_srq_attr->max_wr; - p_create_srq->max_sge = p_srq_attr->max_sge; - p_create_srq->srq_limit = p_srq_attr->srq_limit; - - *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq; - goto end; - -err_alloc_db: - cl_free(srq->wrid); - mlx4_free_buf(&srq->buf); -err_alloc_buf: - cl_spinlock_destroy(&srq->lock); -err_lock: - cl_free(srq); -err_alloc_srq: - cl_free(p_umv_buf->p_inout_buf); -err_params: err_memory: -end: - return status; -} - -ib_api_status_t -mlx4_post_create_xrc_srq ( - IN const ib_pd_handle_t h_uvp_pd, - IN ib_api_status_t ioctl_status, - IN OUT ib_srq_handle_t *ph_uvp_srq, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq; - struct ibv_create_srq_resp *p_resp; - ib_api_status_t status = IB_SUCCESS; - - UNREFERENCED_PARAMETER(h_uvp_pd); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - srq->ibv_srq.xrc_srq_num = srq->srqn = p_resp->srqn; - srq->ibv_srq.handle = p_resp->srq_handle; - - srq->max = p_resp->max_wr; - srq->max_gs = p_resp->max_sge; - - if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq)) - { - mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS); - status = IB_INSUFFICIENT_MEMORY; - } - } - else - { - mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS); - } - - cl_free( p_resp ); - return status; -} - -ib_api_status_t -mlx4_pre_open_xrc_domain ( - IN const ib_ca_handle_t h_uvp_ca, - IN const uint32_t oflag, - IN OUT ci_umv_buf_t *p_umv_buf, - OUT ib_xrcd_handle_t *ph_uvp_xrcd ) -{ - struct mlx4_xrc_domain *xrcd; - struct ibv_context * context = (struct ibv_context *)h_uvp_ca; - struct ibv_open_xrc_domain *p_open_xrcd; - ib_api_status_t status = IB_SUCCESS; - int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) ); - - CL_ASSERT(h_uvp_ca && p_umv_buf); - - if( !p_umv_buf->p_inout_buf ) - { - p_umv_buf->p_inout_buf = cl_malloc( size ); - if( !p_umv_buf->p_inout_buf ) - { - status = IB_INSUFFICIENT_MEMORY; - goto err_umv_buf; - } - } - p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain); - p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp); - p_umv_buf->command = TRUE; - - p_open_xrcd = p_umv_buf->p_inout_buf; - - // Mlx4 code: - - xrcd = cl_malloc(sizeof *xrcd); - if (!xrcd) { - status = IB_INSUFFICIENT_MEMORY; - goto err_xrc; - } - - xrcd->ibv_xrcd.context = context; - - p_open_xrcd->oflags = oflag; - - *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd; - goto end; - -err_xrc: - cl_free(p_umv_buf->p_inout_buf); -err_umv_buf: -end: - return status; -} - -void -mlx4_post_open_xrc_domain ( - IN const ib_ca_handle_t h_uvp_ca, - IN ib_api_status_t ioctl_status, - IN OUT ib_xrcd_handle_t *ph_uvp_xrcd, - IN ci_umv_buf_t *p_umv_buf ) -{ - struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd; - struct ibv_open_xrc_domain_resp *p_resp; - - UNREFERENCED_PARAMETER(h_uvp_ca); - - CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); - - p_resp = p_umv_buf->p_inout_buf; - - if (IB_SUCCESS == ioctl_status) - { - // Mlx4 code: - - xrcd->handle = p_resp->xrcd_handle; - to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn; - } - else - { - cl_free(to_mxrcd(xrcd)); - } - - cl_free(p_resp); - return; -} - -void -mlx4_post_close_xrc_domain ( - IN const ib_xrcd_handle_t h_uvp_xrcd, - IN ib_api_status_t ioctl_status ) -{ - struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd; - - CL_ASSERT(xrdc); - - if (IB_SUCCESS == ioctl_status) { - cl_free(to_mxrcd(xrdc)); - } -} -#endif +/* + * Copyright (c) 2007 Cisco, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4.h" +#include "verbs.h" +#include "mx_abi.h" +#include "wqe.h" +#include "mlx4_debug.h" + +#if defined(EVENT_TRACING) +#include "verbs.tmh" +#endif + +ib_api_status_t +mlx4_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_ca_handle_t *ph_uvp_ca ) +{ + struct ibv_context *context; + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(ca_guid); + + context = mlx4_alloc_context(); + if (!context) { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + if( p_umv_buf ) + { + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + } + p_umv_buf->input_size = 0; + p_umv_buf->output_size = sizeof(struct ibv_get_context_resp); + p_umv_buf->command = TRUE; + } + + *ph_uvp_ca = (ib_ca_handle_t)context; + +end: + return status; +} + +ib_api_status_t +mlx4_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + IN OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_get_context_resp *p_resp; + struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca; + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(ca_guid); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + if (!mlx4_fill_context(context, p_resp)) + { + status = IB_INSUFFICIENT_RESOURCES; + goto end; + } + } + +end: + cl_free(p_resp); + return status; +} + +ib_api_status_t +mlx4_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(h_uvp_ca); + + /* Note that query_ca calls *always* get their attributes from the kernel. + * + * Assume if user buffer is valid then byte_cnt is valid too + * so we can preallocate ca attr buffer for post ioctl data saving + * + * Note that we squirrel the buffer away into the umv_buf and only + * set it into the HCA if the query is successful. + */ + if ( p_ca_attr != NULL ) + { + p_umv_buf->p_inout_buf = cl_malloc(byte_count); + if ( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_RESOURCES; + goto end; + } + } + +end: + return status; +} + +void +__fixup_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ) +{ + uint8_t i; + uintn_t offset = (uintn_t)p_dest - (uintn_t)p_src; + ib_port_attr_t *p_tmp_port_attr = NULL; + + CL_ASSERT( p_dest ); + CL_ASSERT( p_src ); + + /* Fix up the pointers to point within the destination buffer. */ + p_dest->p_page_size = + (uint32_t*)(((uint8_t*)p_dest->p_page_size) + offset); + + p_tmp_port_attr = + (ib_port_attr_t*)(((uint8_t*)p_dest->p_port_attr) + offset); + + /* Fix up each port attribute's gid and pkey table pointers. */ + for( i = 0; i < p_dest->num_ports; i++ ) + { + p_tmp_port_attr[i].p_gid_table = (ib_gid_t*) + (((uint8_t*)p_tmp_port_attr[i].p_gid_table) + offset); + + p_tmp_port_attr[i].p_pkey_table =(ib_net16_t*) + (((uint8_t*)p_tmp_port_attr[i].p_pkey_table) + offset); + } + p_dest->p_port_attr = p_tmp_port_attr; +} + +void +mlx4_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_context *context = (struct ibv_context *)h_uvp_ca; + + CL_ASSERT(context && p_umv_buf); + + if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count) + { + CL_ASSERT( byte_count >= p_ca_attr->size ); + + pthread_mutex_lock(&context->mutex); + + if (context->p_hca_attr) + cl_free(context->p_hca_attr); + context->p_hca_attr = p_umv_buf->p_inout_buf; + cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size ); + __fixup_ca_attr( context->p_hca_attr, p_ca_attr ); + + pthread_mutex_unlock(&context->mutex); + } + else if (p_umv_buf->p_inout_buf) + { + cl_free(p_umv_buf->p_inout_buf); + } +} + +ib_api_status_t +mlx4_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_context *context = (struct ibv_context *)h_uvp_ca; + + CL_ASSERT(context); + + if (IB_SUCCESS == ioctl_status) + mlx4_free_context(context); + + return IB_SUCCESS; +} + +ib_api_status_t +mlx4_pre_alloc_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_pd_handle_t *ph_uvp_pd ) +{ + struct mlx4_pd *pd; + struct ibv_context *context = (struct ibv_context *)h_uvp_ca; + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT(context && p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + } + p_umv_buf->input_size = 0; + p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp); + p_umv_buf->command = TRUE; + + // Mlx4 code: + + pd = cl_malloc(sizeof *pd); + if (!pd) { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + pd->ibv_pd.context = context; + + *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd; + +end: + return status; +} + +void +mlx4_post_alloc_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_pd *pd = (struct ibv_pd *)*ph_uvp_pd; + struct ibv_alloc_pd_resp *p_resp; + + + UNREFERENCED_PARAMETER(h_uvp_ca); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + pd->handle = p_resp->pd_handle; + to_mpd(pd)->pdn = p_resp->pdn; + } + else + { + cl_free(to_mpd(pd)); + } + + cl_free(p_resp); + return; +} + +void +mlx4_post_free_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + + CL_ASSERT(pd); + + if (IB_SUCCESS == ioctl_status) + cl_free(to_mpd(pd)); +} + +static int __align_queue_size(int req) +{ + int nent; + + for (nent = 1; nent < req; nent <<= 1) + ; /* nothing */ + + return nent; +} + +ib_api_status_t +mlx4_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_cq_handle_t *ph_uvp_cq ) +{ + struct mlx4_cq *cq; + struct ibv_create_cq *p_create_cq; + struct ibv_context *context = (struct ibv_context *)h_uvp_ca; + ib_api_status_t status = IB_SUCCESS; + int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) ); + + CL_ASSERT(h_uvp_ca && p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_umv_buf; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_cq); + p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); + p_umv_buf->command = TRUE; + + p_create_cq = p_umv_buf->p_inout_buf; + + // Mlx4 code: + + /* Sanity check CQ size before proceeding */ + if (*p_size > 0x3fffff) { + status = IB_INVALID_CQ_SIZE; + goto err_cqe_size; + } + + cq = cl_malloc(sizeof *cq); + if (!cq) { + status = IB_INSUFFICIENT_MEMORY; + goto err_cq; + } + + if (cl_spinlock_init(&cq->lock)) { + status = IB_INSUFFICIENT_MEMORY; + goto err_lock; + } + + *p_size = __align_queue_size(*p_size + 1); + + if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, + context->page_size)) + goto err_alloc_buf; + + cq->ibv_cq.context = context; + cq->cons_index = 0; + + cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ); + if (!cq->set_ci_db) + goto err_alloc_db; + + cq->arm_db = cq->set_ci_db + 1; + *cq->arm_db = 0; + cq->arm_sn = 1; + *cq->set_ci_db = 0; + + p_create_cq->buf_addr = (uintptr_t) cq->buf.buf; + p_create_cq->db_addr = (uintptr_t) cq->set_ci_db; + p_create_cq->arm_sn_addr = (uintptr_t) &cq->arm_sn; + p_create_cq->cqe = --(*p_size); + + *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq; + goto end; + +err_alloc_db: + mlx4_free_buf(&cq->buf); +err_alloc_buf: + cl_spinlock_destroy(&cq->lock); +err_lock: + cl_free(cq); +err_cq: +err_cqe_size: + cl_free(p_umv_buf->p_inout_buf); +err_umv_buf: +end: + return status; +} + +void +mlx4_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_cq *cq = (struct ibv_cq *)*ph_uvp_cq; + struct ibv_create_cq_resp *p_resp; + + UNREFERENCED_PARAMETER(h_uvp_ca); + UNREFERENCED_PARAMETER(size); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + to_mcq(cq)->cqn = p_resp->cqn; + cq->cqe = p_resp->cqe; + cq->handle = p_resp->cq_handle; + } + else + { + mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS); + } + + cl_free(p_resp); + return; +} + +ib_api_status_t +mlx4_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq; + + UNREFERENCED_PARAMETER(p_umv_buf); + + *p_size = cq->cqe; + + return IB_VERBS_PROCESSING_DONE; +} + +void +mlx4_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq; + + CL_ASSERT(cq); + + if (IB_SUCCESS == ioctl_status) { + mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db); + mlx4_free_buf(&to_mcq(cq)->buf); + + cl_spinlock_destroy(&to_mcq(cq)->lock); + cl_free(to_mcq(cq)); + } +} + +ib_api_status_t +mlx4_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_srq_handle_t *ph_uvp_srq ) +{ + struct mlx4_srq *srq; + struct ibv_create_srq *p_create_srq; + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_srq); + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + p_umv_buf->command = TRUE; + + p_create_srq = p_umv_buf->p_inout_buf; + + // Mlx4 code: + + /* Sanity check SRQ size before proceeding */ + if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) + { + status = IB_INVALID_PARAMETER; + goto err_params; + } + + srq = cl_malloc(sizeof *srq); + if (!srq) { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_srq; + } + + if (cl_spinlock_init(&srq->lock)) { + status = IB_INSUFFICIENT_MEMORY; + goto err_lock; + } + + srq->ibv_srq.pd = pd; + srq->ibv_srq.context = pd->context; + + srq->max = __align_queue_size(p_srq_attr->max_wr + 1); + srq->max_gs = p_srq_attr->max_sge; + srq->counter = 0; + + if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq)) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_buf; + } + + srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ); + if (!srq->db) + goto err_alloc_db; + + *srq->db = 0; + + // fill the parameters for ioctl + p_create_srq->buf_addr = (uintptr_t) srq->buf.buf; + p_create_srq->db_addr = (uintptr_t) srq->db; + p_create_srq->pd_handle = pd->handle; + p_create_srq->max_wr = p_srq_attr->max_wr; + p_create_srq->max_sge = p_srq_attr->max_sge; + p_create_srq->srq_limit = p_srq_attr->srq_limit; + + *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq; + goto end; + +err_alloc_db: + cl_free(srq->wrid); + mlx4_free_buf(&srq->buf); +err_alloc_buf: + cl_spinlock_destroy(&srq->lock); +err_lock: + cl_free(srq); +err_alloc_srq: + cl_free(p_umv_buf->p_inout_buf); +err_params: err_memory: +end: + return status; +} + +void +mlx4_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq; + struct mlx4_srq *srq = to_msrq(ibsrq); + struct ibv_create_srq_resp *p_resp; + + UNREFERENCED_PARAMETER(h_uvp_pd); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + srq->srqn = p_resp->srqn; + ibsrq->handle = p_resp->srq_handle; + + srq->max = p_resp->max_wr; + srq->max_gs = p_resp->max_sge; + } + else + { + mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS); + } + + cl_free(p_resp); + return; +} + +ib_api_status_t +mlx4_pre_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq ) +{ +#ifdef XRC_SUPPORT + struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq; + struct mlx4_srq *srq = to_msrq(ibsrq); + struct mlx4_cq *mcq = NULL; + + if (ibsrq->xrc_cq) + { + /* is an xrc_srq */ + mcq = to_mcq(ibsrq->xrc_cq); + mlx4_cq_clean(mcq, 0, srq); + cl_spinlock_acquire(&mcq->lock); + mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn); + cl_spinlock_release(&mcq->lock); + } +#else + UNUSED_PARAM(h_uvp_srq); +#endif + return IB_SUCCESS; +} + +void +mlx4_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq; + struct mlx4_srq *srq = to_msrq(ibsrq); + + CL_ASSERT(srq); + + if (IB_SUCCESS == ioctl_status) + { + mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db); + cl_free(srq->wrid); + mlx4_free_buf(&srq->buf); + cl_spinlock_destroy(&srq->lock); + cl_free(srq); + } + else + { +#ifdef XRC_SUPPORT + if (ibsrq->xrc_cq) { + /* is an xrc_srq */ + struct mlx4_cq *mcq = to_mcq(ibsrq->xrc_cq); + cl_spinlock_acquire(&mcq->lock); + mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq); + cl_spinlock_release(&mcq->lock); + } +#endif + } +} + +static enum ibv_qp_type +__to_qp_type(ib_qp_type_t type) +{ + switch (type) { + case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC; + case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC; + case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD; +#ifdef XRC_SUPPORT + //case IB_QPT_XRC_CONN: return IBV_QPT_XRC; +#endif + default: return IBV_QPT_RC; + } +} + +ib_api_status_t +mlx4_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_qp_handle_t *ph_uvp_qp ) +{ + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + struct mlx4_context *context = to_mctx(pd->context); + struct mlx4_qp *qp; + struct ibv_create_qp *p_create_qp; + struct ibv_qp_init_attr attr; + ib_api_status_t status = IB_SUCCESS; + int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) ); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc(size); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_qp); + p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); + p_umv_buf->command = TRUE; + + p_create_qp = p_umv_buf->p_inout_buf; + + /* convert attributes */ + memset( &attr, 0, sizeof(attr) ); + attr.send_cq = (struct ibv_cq *)p_create_attr->h_sq_cq; + attr.recv_cq = (struct ibv_cq *)p_create_attr->h_rq_cq; + attr.srq = (struct ibv_srq*)p_create_attr->h_srq; + attr.cap.max_send_wr = p_create_attr->sq_depth; + attr.cap.max_recv_wr = p_create_attr->rq_depth; + attr.cap.max_send_sge = p_create_attr->sq_sge; + attr.cap.max_recv_sge = p_create_attr->rq_sge; + attr.cap.max_inline_data = p_create_attr->sq_max_inline; + attr.qp_type = __to_qp_type(p_create_attr->qp_type); + attr.sq_sig_all = p_create_attr->sq_signaled; + + // Mlx4 code: + + /* Sanity check QP size before proceeding */ + if (attr.cap.max_send_wr > (uint32_t) context->max_qp_wr || + attr.cap.max_recv_wr > (uint32_t) context->max_qp_wr || + attr.cap.max_send_sge > (uint32_t) context->max_sge || + attr.cap.max_recv_sge > (uint32_t) context->max_sge || + attr.cap.max_inline_data > 1024) + { + status = IB_INVALID_PARAMETER; + goto end; + } + + qp = cl_malloc(sizeof *qp); + if (!qp) { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_qp; + } + + mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp); + + /* + * We need to leave 2 KB + 1 WQE of headroom in the SQ to + * allow HW to prefetch. + */ + qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; + qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes); + qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr); + + if (attr.srq || attr.qp_type == IBV_QPT_XRC) + attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0; + else + { + if (attr.cap.max_recv_sge < 1) + attr.cap.max_recv_sge = 1; + if (attr.cap.max_recv_wr < 1) + attr.cap.max_recv_wr = 1; + } + + if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp)) + goto err_alloc_qp_buff; + + mlx4_init_qp_indices(qp); + + if (cl_spinlock_init(&qp->sq.lock)) { + status = IB_INSUFFICIENT_MEMORY; + goto err_spinlock_sq; + } + if (cl_spinlock_init(&qp->rq.lock)) { + status = IB_INSUFFICIENT_MEMORY; + goto err_spinlock_rq; + } + + // fill qp fields + if (!attr.srq && attr.qp_type != IBV_QPT_XRC) { + qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ); + if (!qp->db) { + status = IB_INSUFFICIENT_MEMORY; + goto err_db; + } + + *qp->db = 0; + } + if (attr.sq_sig_all) + qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE); + else + qp->sq_signal_bits = 0; + + // fill the rest of qp fields + qp->ibv_qp.pd = pd; + qp->ibv_qp.context= pd->context; + qp->ibv_qp.send_cq = attr.send_cq; + qp->ibv_qp.recv_cq = attr.recv_cq; + qp->ibv_qp.srq = attr.srq; + qp->ibv_qp.state = IBV_QPS_RESET; + qp->ibv_qp.qp_type = attr.qp_type; + + // fill request fields + p_create_qp->buf_addr = (uintptr_t) qp->buf.buf; + if (!attr.srq && attr.qp_type != IBV_QPT_XRC) + p_create_qp->db_addr = (uintptr_t) qp->db; + else + p_create_qp->db_addr = 0; + + p_create_qp->pd_handle = pd->handle; + p_create_qp->send_cq_handle = attr.send_cq->handle; + p_create_qp->recv_cq_handle = attr.recv_cq->handle; + p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ? + (attr.xrc_domain ? attr.xrc_domain->handle : 0) : + (attr.srq ? attr.srq->handle : 0); + + p_create_qp->max_send_wr = attr.cap.max_send_wr; + p_create_qp->max_recv_wr = attr.cap.max_recv_wr; + p_create_qp->max_send_sge = attr.cap.max_send_sge; + p_create_qp->max_recv_sge = attr.cap.max_recv_sge; + p_create_qp->max_inline_data = attr.cap.max_inline_data; + p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all; + p_create_qp->qp_type = attr.qp_type; + p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ? + !!attr.xrc_domain : !!attr.srq); + + p_create_qp->log_sq_stride = (uint8_t)qp->sq.wqe_shift; + for (p_create_qp->log_sq_bb_count = 0; + qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count; + ++p_create_qp->log_sq_bb_count) + ; /* nothing */ + p_create_qp->sq_no_prefetch = 0; + + *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp; + goto end; + +err_db: + cl_spinlock_destroy(&qp->rq.lock); +err_spinlock_rq: + cl_spinlock_destroy(&qp->sq.lock); +err_spinlock_sq: + cl_free(qp->sq.wrid); + if (qp->rq.wqe_cnt) + free(qp->rq.wrid); + mlx4_free_buf(&qp->buf); +err_alloc_qp_buff: + cl_free(qp); +err_alloc_qp: + cl_free(p_umv_buf->p_inout_buf); +err_memory: +end: + return status; +} + +ib_api_status_t +mlx4_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct mlx4_qp *qp = (struct mlx4_qp *)*ph_uvp_qp; + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + struct ibv_context *context = pd->context; + struct ibv_create_qp_resp *p_resp; + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + struct ibv_qp_cap cap; + + cap.max_recv_sge = p_resp->max_recv_sge; + cap.max_send_sge = p_resp->max_send_sge; + cap.max_recv_wr = p_resp->max_recv_wr; + cap.max_send_wr = p_resp->max_send_wr; + cap.max_inline_data = p_resp->max_inline_data; + + qp->ibv_qp.handle = p_resp->qp_handle; + qp->ibv_qp.qp_num = p_resp->qpn; + + qp->rq.wqe_cnt = cap.max_recv_wr; + qp->rq.max_gs = cap.max_recv_sge; + + /* adjust rq maxima to not exceed reported device maxima */ + cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr); + cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge); + + qp->rq.max_post = cap.max_recv_wr; + //qp->rq.max_gs = cap.max_recv_sge; - RIB : add this ? + mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type); + + qp->doorbell_qpn = cl_hton32(qp->ibv_qp.qp_num << 8); + + if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp)) + { + mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS); + status = IB_INSUFFICIENT_MEMORY; + } + MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, + ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", + qp->ibv_qp.qp_num, qp->buf.buf, qp->db, + qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); + } + else + { + mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS); + } + + cl_free(p_resp); + return status; +} + +ib_api_status_t +mlx4_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(h_uvp_qp); + UNREFERENCED_PARAMETER(p_modify_attr); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp)); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = 0; + p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp); + p_umv_buf->command = TRUE; + +err_memory: + return status; +} + +void +mlx4_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp; + + UNREFERENCED_PARAMETER(p_umv_buf); + + if(IB_SUCCESS == ioctl_status) + { + p_query_attr->sq_max_inline = qp->max_inline_data; + p_query_attr->sq_sge = qp->sq.max_gs; + p_query_attr->sq_depth = qp->sq.max_post; + p_query_attr->rq_sge = qp->rq.max_gs; + p_query_attr->rq_depth = qp->rq.max_post; + } +} + +void +mlx4_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_qp *qp = (struct ibv_qp *)h_uvp_qp; + struct ibv_modify_qp_resp *p_resp; + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + if (qp->state == IBV_QPS_RESET && + p_resp->attr_mask & IBV_QP_STATE && + p_resp->qp_state == IBV_QPS_INIT) + { + mlx4_qp_init_sq_ownership(to_mqp(qp)); + } + + if (p_resp->attr_mask & IBV_QP_STATE) { + qp->state = p_resp->qp_state; + } + + if (p_resp->attr_mask & IBV_QP_STATE && + p_resp->qp_state == IBV_QPS_RESET) + { + mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, + qp->srq ? to_msrq(qp->srq) : NULL); + if (qp->send_cq != qp->recv_cq) + mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); + + mlx4_init_qp_indices(to_mqp(qp)); + if (!qp->srq && qp->qp_type != IBV_QPT_XRC) + *to_mqp(qp)->db = 0; + } + } + + cl_free (p_resp); + return; +} + +static void +__mlx4_lock_cqs(struct ibv_qp *qp) +{ + struct mlx4_cq *send_cq = to_mcq(qp->send_cq); + struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq); + + if (send_cq == recv_cq) + cl_spinlock_acquire(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + cl_spinlock_acquire(&send_cq->lock); + cl_spinlock_acquire(&recv_cq->lock); + } else { + cl_spinlock_acquire(&recv_cq->lock); + cl_spinlock_acquire(&send_cq->lock); + } +} + +static void +__mlx4_unlock_cqs(struct ibv_qp *qp) +{ + struct mlx4_cq *send_cq = to_mcq(qp->send_cq); + struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq); + + if (send_cq == recv_cq) + cl_spinlock_release(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + cl_spinlock_release(&recv_cq->lock); + cl_spinlock_release(&send_cq->lock); + } else { + cl_spinlock_release(&send_cq->lock); + cl_spinlock_release(&recv_cq->lock); + } +} + +ib_api_status_t +mlx4_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp ) +{ + struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp; + + mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, + qp->srq ? to_msrq(qp->srq) : NULL); + if (qp->send_cq != qp->recv_cq) + mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); + + __mlx4_lock_cqs(qp); + mlx4_clear_qp(to_mctx(qp->context), qp->qp_num); + __mlx4_unlock_cqs(qp); + + return IB_SUCCESS; +} + +void +mlx4_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp; + struct mlx4_qp* qp = to_mqp(ibqp); + + CL_ASSERT(h_uvp_qp); + + if (IB_SUCCESS == ioctl_status) + { + if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC) + mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db); + + cl_spinlock_destroy(&qp->sq.lock); + cl_spinlock_destroy(&qp->rq.lock); + + MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, + ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, + qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); + cl_free(qp->sq.wrid); + if (qp->rq.wqe_cnt) + cl_free(qp->rq.wrid); + mlx4_free_buf(&qp->buf); + cl_free(qp); + } + else + { + __mlx4_lock_cqs(ibqp); + mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp); + __mlx4_unlock_cqs(ibqp); + } +} + +void +mlx4_nd_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + OUT void** pp_outbuf, + OUT DWORD* p_size ) +{ + struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp; + + *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state; + *p_size = sizeof(ibv_qp->state); +} + +static ib_qp_state_t __from_qp_state(enum ibv_qp_state state) +{ + switch (state) { + case IBV_QPS_RESET: return IB_QPS_RESET; + case IBV_QPS_INIT: return IB_QPS_INIT; + case IBV_QPS_RTR: return IB_QPS_RTR; + case IBV_QPS_RTS: return IB_QPS_RTS; + case IBV_QPS_SQD: return IB_QPS_SQD; + case IBV_QPS_SQE: return IB_QPS_SQERR; + case IBV_QPS_ERR: return IB_QPS_ERROR; + default: return IB_QPS_TIME_WAIT; + }; +} + +uint32_t +mlx4_nd_get_qp_state ( + IN const ib_qp_handle_t h_uvp_qp ) +{ + struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp; + + return __from_qp_state(ibv_qp->state); +} + +static uint8_t +__gid_to_index_lookup ( + IN ib_ca_attr_t *p_ca_attr, + IN uint8_t port_num, + IN uint8_t *raw_gid ) +{ + ib_gid_t *p_gid_table = NULL; + uint8_t i, index = 0; + uint16_t num_gids; + + p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table; + CL_ASSERT (p_gid_table); + + num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids; + + for (i = 0; i < num_gids; i++) + { + if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16)) + { + index = i; + break; + } + } + return index; +} + +static enum ibv_rate __to_rate(uint8_t rate) +{ + if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS; + if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS; + if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS; + if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS; + if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS; + if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS; + if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS; + if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS; + if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS; + return IBV_RATE_MAX; +} + +inline void +__grh_get_ver_class_flow( + IN const ib_net32_t ver_class_flow, + OUT uint8_t* const p_ver OPTIONAL, + OUT uint8_t* const p_tclass OPTIONAL, + OUT net32_t* const p_flow_lbl OPTIONAL ) +{ + ib_net32_t tmp_ver_class_flow; + + tmp_ver_class_flow = cl_ntoh32( ver_class_flow ); + + if (p_ver) + *p_ver = (uint8_t)(tmp_ver_class_flow >> 28); + + if (p_tclass) + *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20); + + if (p_flow_lbl) + *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF )); +} + +static ib_api_status_t +__to_ah ( + IN ib_ca_attr_t *p_ca_attr, + IN const ib_av_attr_t *p_av_attr, + OUT struct ibv_ah_attr *p_attr ) +{ + if (p_av_attr->port_num == 0 || + p_av_attr->port_num > p_ca_attr->num_ports) { + MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV , + (" invalid port number specified (%d)\n",p_av_attr->port_num)); + return IB_INVALID_PORT; + } + + p_attr->port_num = p_av_attr->port_num; + p_attr->sl = p_av_attr->sl; + p_attr->dlid = cl_ntoh16 (p_av_attr->dlid); + p_attr->static_rate = __to_rate(p_av_attr->static_rate); + p_attr->src_path_bits = p_av_attr->path_bits; + + /* For global destination or Multicast address:*/ + if (p_av_attr->grh_valid) + { + p_attr->is_global = TRUE; + p_attr->grh.hop_limit = p_av_attr->grh.hop_limit; + __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL, + &p_attr->grh.traffic_class, &p_attr->grh.flow_label ); + p_attr->grh.sgid_index = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num, + (uint8_t *) p_av_attr->grh.src_gid.raw); + cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16); + } + else + { + p_attr->is_global = FALSE; + } + return IB_SUCCESS; +} + +static void +__set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr) +{ + ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24)); + ah->av.g_slid = attr->src_path_bits; + ah->av.dlid = cl_hton16(attr->dlid); + if (attr->static_rate) { + ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET); + /* XXX check rate cap? */ + } + ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28); + if (attr->is_global) + { + ah->av.g_slid |= 0x80; + ah->av.gid_index = attr->grh.sgid_index; + ah->av.hop_limit = attr->grh.hop_limit; + ah->av.sl_tclass_flowlabel |= + cl_hton32((attr->grh.traffic_class << 20) | + attr->grh.flow_label); + cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16); + } +} + +ib_api_status_t +mlx4_pre_create_ah ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_av_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_av_handle_t *ph_uvp_av ) +{ + struct mlx4_ah *ah; + struct ibv_ah_attr attr; + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(p_umv_buf); + + if (pd->context->p_hca_attr == NULL) { + status = IB_ERROR; + goto end; + } + + ah = cl_malloc(sizeof *ah); + if (!ah) { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + // sanity check + if (p_av_attr->port_num == 0 || + p_av_attr->port_num > pd->context->p_hca_attr->num_ports) + { + status = IB_INVALID_PORT; + goto end; + } + + // convert parameters + cl_memset(&attr, 0, sizeof(attr)); + status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr); + if (status) + goto end; + + ah->ibv_ah.pd = pd; + ah->ibv_ah.context = pd->context; + cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t)); + + cl_memset(&ah->av, 0, sizeof ah->av); + __set_av_params(ah, pd, &attr); + + *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah; + status = IB_VERBS_PROCESSING_DONE; + +end: + return status; +} + +ib_api_status_t +mlx4_pre_query_ah ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_uvp_av); + UNREFERENCED_PARAMETER(p_umv_buf); + + return IB_VERBS_PROCESSING_DONE; +} + +void +mlx4_post_query_ah ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; + + UNREFERENCED_PARAMETER(p_umv_buf); + + CL_ASSERT(h_uvp_av && p_addr_vector); + + if (ioctl_status == IB_SUCCESS) + { + cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t)); + if (ph_pd) + *ph_pd = (ib_pd_handle_t)ah->pd; + } +} + +ib_api_status_t +mlx4_pre_modify_ah ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; + struct ibv_ah_attr attr; + ib_api_status_t status; + + UNREFERENCED_PARAMETER(p_umv_buf); + + CL_ASSERT (h_uvp_av); + + status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr); + if (status) + return status; + + __set_av_params(to_mah(ah), ah->pd, &attr); + cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t)); + + return IB_VERBS_PROCESSING_DONE; +} + +ib_api_status_t +mlx4_pre_destroy_ah ( + IN const ib_av_handle_t h_uvp_av ) +{ + struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av; + + CL_ASSERT(ah); + + cl_free(to_mah(ah)); + + return IB_VERBS_PROCESSING_DONE; +} + +#ifdef XRC_SUPPORT +ib_api_status_t +mlx4_pre_create_xrc_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_xrcd_handle_t h_uvp_xrcd, + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_srq_handle_t *ph_uvp_srq ) +{ + struct mlx4_srq *srq; + struct ibv_create_srq *p_create_srq; + struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd; + struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_srq); + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + p_umv_buf->command = TRUE; + + p_create_srq = p_umv_buf->p_inout_buf; + + // Mlx4 code: + + /* Sanity check SRQ size before proceeding */ + if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) + { + status = IB_INVALID_PARAMETER; + goto err_params; + } + + srq = cl_malloc(sizeof *srq); + if (!srq) { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_srq; + } + + if (cl_spinlock_init(&srq->lock)) { + status = IB_INSUFFICIENT_MEMORY; + goto err_lock; + } + + srq->ibv_srq.pd = pd; + srq->ibv_srq.context = pd->context; + + srq->max = __align_queue_size(p_srq_attr->max_wr + 1); + srq->max_gs = p_srq_attr->max_sge; + srq->counter = 0; + + if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq)) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_buf; + } + + srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ); + if (!srq->db) + goto err_alloc_db; + + *srq->db = 0; + + // fill the parameters for ioctl + p_create_srq->buf_addr = (uintptr_t) srq->buf.buf; + p_create_srq->db_addr = (uintptr_t) srq->db; + p_create_srq->pd_handle = pd->handle; + p_create_srq->max_wr = p_srq_attr->max_wr; + p_create_srq->max_sge = p_srq_attr->max_sge; + p_create_srq->srq_limit = p_srq_attr->srq_limit; + + *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq; + goto end; + +err_alloc_db: + cl_free(srq->wrid); + mlx4_free_buf(&srq->buf); +err_alloc_buf: + cl_spinlock_destroy(&srq->lock); +err_lock: + cl_free(srq); +err_alloc_srq: + cl_free(p_umv_buf->p_inout_buf); +err_params: err_memory: +end: + return status; +} + +ib_api_status_t +mlx4_post_create_xrc_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq; + struct ibv_create_srq_resp *p_resp; + ib_api_status_t status = IB_SUCCESS; + + UNREFERENCED_PARAMETER(h_uvp_pd); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + srq->ibv_srq.xrc_srq_num = srq->srqn = p_resp->srqn; + srq->ibv_srq.handle = p_resp->srq_handle; + + srq->max = p_resp->max_wr; + srq->max_gs = p_resp->max_sge; + + if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq)) + { + mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS); + status = IB_INSUFFICIENT_MEMORY; + } + } + else + { + mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS); + } + + cl_free( p_resp ); + return status; +} + +ib_api_status_t +mlx4_pre_open_xrc_domain ( + IN const ib_ca_handle_t h_uvp_ca, + IN const uint32_t oflag, + IN OUT ci_umv_buf_t *p_umv_buf, + OUT ib_xrcd_handle_t *ph_uvp_xrcd ) +{ + struct mlx4_xrc_domain *xrcd; + struct ibv_context * context = (struct ibv_context *)h_uvp_ca; + struct ibv_open_xrc_domain *p_open_xrcd; + ib_api_status_t status = IB_SUCCESS; + int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) ); + + CL_ASSERT(h_uvp_ca && p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_malloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_umv_buf; + } + } + p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain); + p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp); + p_umv_buf->command = TRUE; + + p_open_xrcd = p_umv_buf->p_inout_buf; + + // Mlx4 code: + + xrcd = cl_malloc(sizeof *xrcd); + if (!xrcd) { + status = IB_INSUFFICIENT_MEMORY; + goto err_xrc; + } + + xrcd->ibv_xrcd.context = context; + + p_open_xrcd->oflags = oflag; + + *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd; + goto end; + +err_xrc: + cl_free(p_umv_buf->p_inout_buf); +err_umv_buf: +end: + return status; +} + +void +mlx4_post_open_xrc_domain ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN OUT ib_xrcd_handle_t *ph_uvp_xrcd, + IN ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd; + struct ibv_open_xrc_domain_resp *p_resp; + + UNREFERENCED_PARAMETER(h_uvp_ca); + + CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf); + + p_resp = p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + // Mlx4 code: + + xrcd->handle = p_resp->xrcd_handle; + to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn; + } + else + { + cl_free(to_mxrcd(xrcd)); + } + + cl_free(p_resp); + return; +} + +void +mlx4_post_close_xrc_domain ( + IN const ib_xrcd_handle_t h_uvp_xrcd, + IN ib_api_status_t ioctl_status ) +{ + struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd; + + CL_ASSERT(xrdc); + + if (IB_SUCCESS == ioctl_status) { + cl_free(to_mxrcd(xrdc)); + } +} +#endif diff --git a/trunk/hw/mthca/kernel/hca_data.h b/trunk/hw/mthca/kernel/hca_data.h index 8ebcda94..91432d80 100644 --- a/trunk/hw/mthca/kernel/hca_data.h +++ b/trunk/hw/mthca/kernel/hca_data.h @@ -220,7 +220,7 @@ typedef struct _mlnx_hca_t { #ifdef WIN_TO_BE_REMOVED // removed as it is found in p_ext->cl_ext.p_pdo - const void* VOID_PTR64 p_dev_obj; // Driver PDO + const void* p_dev_obj; // Driver PDO #endif } mlnx_hca_t; diff --git a/trunk/hw/mthca/kernel/hca_mcast.c b/trunk/hw/mthca/kernel/hca_mcast.c index f7c744ce..71642814 100644 --- a/trunk/hw/mthca/kernel/hca_mcast.c +++ b/trunk/hw/mthca/kernel/hca_mcast.c @@ -114,7 +114,7 @@ mlnx_attach_mcast ( cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] ))); // return the result - if (ph_mcast) *ph_mcast = (ib_mcast_handle_t VOID_PTR64)mcast_p; + if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p; status = IB_SUCCESS; goto end; diff --git a/trunk/hw/mthca/kernel/hca_memory.c b/trunk/hw/mthca/kernel/hca_memory.c index 434e5fb2..fdb626f7 100644 --- a/trunk/hw/mthca/kernel/hca_memory.c +++ b/trunk/hw/mthca/kernel/hca_memory.c @@ -101,7 +101,7 @@ mlnx_register_mr ( // results *p_lkey = mr_p->lkey; *p_rkey = cl_hton32( mr_p->rkey ); - if (ph_mr) *ph_mr = (ib_mr_handle_t VOID_PTR64)mr_p; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; status = IB_SUCCESS; err_reg_mr: @@ -184,7 +184,7 @@ mlnx_register_pmr ( // results done: - if (ph_mr) *ph_mr = (ib_mr_handle_t VOID_PTR64)mr_p; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; *p_lkey = mr_p->lkey; *p_rkey = cl_hton32( mr_p->rkey ); //NB: p_vaddr was not changed @@ -375,7 +375,7 @@ mlnx_alloc_fmr( } // results - if (ph_fmr) *ph_fmr = (mlnx_fmr_handle_t VOID_PTR64)fmr_p; + if (ph_fmr) *ph_fmr = (mlnx_fmr_handle_t)fmr_p; status = IB_SUCCESS; err_alloc_fmr: diff --git a/trunk/hw/mthca/kernel/hca_verbs.c b/trunk/hw/mthca/kernel/hca_verbs.c index 0ca785dc..64d479e9 100644 --- a/trunk/hw/mthca/kernel/hca_verbs.c +++ b/trunk/hw/mthca/kernel/hca_verbs.c @@ -260,7 +260,7 @@ mlnx_query_ca ( // get gids, using cache for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) { - union ib_gid * VOID_PTR64 gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; + union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid ); //TODO: do we need to convert gids to little endian if (err) { @@ -486,7 +486,7 @@ done: cl_spinlock_release( &ext_p->uctx_lock ); // return the result - if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t VOID_PTR64)p_context; + if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context; status = IB_SUCCESS; goto end; @@ -575,7 +575,7 @@ mlnx_allocate_pd ( } // return the result - if (ph_pd) *ph_pd = (ib_pd_handle_t VOID_PTR64)ib_pd_p; + if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p; status = IB_SUCCESS; @@ -672,7 +672,7 @@ mlnx_create_av ( } // return the result - if (ph_av) *ph_av = (ib_av_handle_t VOID_PTR64)ib_av_p; + if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p; status = IB_SUCCESS; @@ -735,7 +735,7 @@ mlnx_query_av ( #endif // results - *ph_pd = (ib_pd_handle_t VOID_PTR64)ib_ah_p->pd; + *ph_pd = (ib_pd_handle_t)ib_ah_p->pd; err_conv_mthca_av: err_user_unsupported: @@ -885,7 +885,7 @@ mlnx_create_srq ( srq_p->srq_context = (void*)srq_context; // return the result - if (ph_srq) *ph_srq = (ib_srq_handle_t VOID_PTR64)srq_p; + if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p; status = IB_SUCCESS; @@ -1065,13 +1065,13 @@ _create_qp ( // Query QP to obtain requested attributes if (p_qp_attr) { - status = mlnx_query_qp ((ib_qp_handle_t VOID_PTR64)ib_qp_p, p_qp_attr, p_umv_buf); + status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf); if (status != IB_SUCCESS) goto err_query_qp; } // return the results - if (ph_qp) *ph_qp = (ib_qp_handle_t VOID_PTR64)ib_qp_p; + if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p; status = IB_SUCCESS; goto end; @@ -1195,7 +1195,7 @@ mlnx_modify_qp ( // Query QP to obtain requested attributes query_qp: if (p_qp_attr) { - status = mlnx_query_qp ((ib_qp_handle_t VOID_PTR64)ib_qp_p, p_qp_attr, p_umv_buf); + status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf); if (status != IB_SUCCESS) goto err_query_qp; } @@ -1282,7 +1282,7 @@ mlnx_query_qp ( // fill the structure //TODO: this function is to be implemented via ibv_query_qp, which is not supported now - p_qp_attr->h_pd = (ib_pd_handle_t VOID_PTR64)qp_p->ibqp.pd; + p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd; p_qp_attr->qp_type = qp_p->ibqp.qp_type; p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data; p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr; @@ -1290,8 +1290,8 @@ mlnx_query_qp ( p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge; p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge; p_qp_attr->resp_res = qp_p->resp_depth; - p_qp_attr->h_sq_cq = (ib_cq_handle_t VOID_PTR64)qp_p->ibqp.send_cq; - p_qp_attr->h_rq_cq = (ib_cq_handle_t VOID_PTR64)qp_p->ibqp.recv_cq; + p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq; + p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq; p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR; p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state ); p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num); @@ -1422,7 +1422,7 @@ mlnx_create_cq ( // *p_size = *p_size; // return the same value *p_size = ib_cq_p->cqe; - if (ph_cq) *ph_cq = (ib_cq_handle_t VOID_PTR64)cq_p; + if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p; status = IB_SUCCESS; diff --git a/trunk/hw/mthca/kernel/mthca_mad.c b/trunk/hw/mthca/kernel/mthca_mad.c index 25969616..f001805d 100644 --- a/trunk/hw/mthca/kernel/mthca_mad.c +++ b/trunk/hw/mthca/kernel/mthca_mad.c @@ -1,293 +1,291 @@ -/* - * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Mellanox Technologies. All rights reserved. - * Copyright (c) 2004 Voltaire, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id$ - */ - -#include -#include -#include - -#include "mthca_dev.h" -#if defined(EVENT_TRACING) -#ifdef offsetof -#undef offsetof -#endif -#include "mthca_mad.tmh" -#endif -#include "mthca_cmd.h" - -enum { - MTHCA_VENDOR_CLASS1 = 0x9, - MTHCA_VENDOR_CLASS2 = 0xa -}; - -struct mthca_trap_mad { - struct scatterlist sg; -}; - -static void update_sm_ah(struct mthca_dev *dev, - u8 port_num, u16 lid, u8 sl) -{ - struct ib_ah *new_ah; - struct ib_ah_attr ah_attr; - SPIN_LOCK_PREP(lh); - - if (!dev->send_agent[port_num - 1][0]) - return; - - RtlZeroMemory(&ah_attr, sizeof ah_attr); - ah_attr.dlid = lid; - ah_attr.sl = sl; - ah_attr.port_num = port_num; - - new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, - &ah_attr, NULL, NULL); - if (IS_ERR(new_ah)) - return; - - spin_lock_irqsave(&dev->sm_lock, &lh); - if (dev->sm_ah[port_num - 1]) { - ibv_destroy_ah(dev->sm_ah[port_num - 1]); - } - dev->sm_ah[port_num - 1] = new_ah; - spin_unlock_irqrestore(&lh); -} - -/* - * Snoop SM MADs for port info and P_Key table sets, so we can - * synthesize LID change and P_Key change events. - */ -static void smp_snoop(struct ib_device *ibdev, - u8 port_num, - struct ib_mad *mad) -{ - struct ib_event event; - - if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || - mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && - mad->mad_hdr.method == IB_MGMT_METHOD_SET) { - if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { - update_sm_ah(to_mdev(ibdev), port_num, - cl_ntoh16(*(__be16 *) (mad->data + 58)), - (*(u8 *) (mad->data + 76)) & 0xf); - - event.device = ibdev; - event.event = IB_EVENT_LID_CHANGE; - event.element.port_num = port_num; - ib_dispatch_event(&event); - } - - if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { - event.device = ibdev; - event.event = IB_EVENT_PKEY_CHANGE; - event.element.port_num = port_num; - ib_dispatch_event(&event); - } - } -} - -static void forward_trap(struct mthca_dev *dev, - u8 port_num, - struct ib_mad *mad) -{ - int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; - struct mthca_trap_mad *tmad; - struct ib_sge gather_list; - struct _ib_send_wr wr; - struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; - int ret; - SPIN_LOCK_PREP(lh); - - /* fill the template */ - wr.ds_array = (ib_local_ds_t* VOID_PTR64)(void*)&gather_list; - wr.num_ds = 1; - wr.wr_type = WR_SEND; - wr.send_opt = IB_SEND_OPT_SIGNALED; - wr.dgrm.ud.remote_qp = cl_hton32(qpn); - wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0; - - if (agent) { - tmad = kmalloc(sizeof *tmad, GFP_KERNEL); - if (!tmad) - return; - - alloc_dma_zmem(dev, sizeof *mad, &tmad->sg); - if (!tmad->sg.page) { - kfree(tmad); - return; - } - - memcpy(tmad->sg.page, mad, sizeof *mad); - - wr.dgrm.ud.rsvd = (void* VOID_PTR64)&((struct ib_mad *)tmad->sg.page)->mad_hdr; - wr.wr_id = (u64)(ULONG_PTR)tmad; - gather_list.addr = tmad->sg.dma_address; - gather_list.length = tmad->sg.length; - gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; - - /* - * We rely here on the fact that MLX QPs don't use the - * address handle after the send is posted (this is - * wrong following the IB spec strictly, but we know - * it's OK for our devices). - */ - spin_lock_irqsave(&dev->sm_lock, &lh); - wr.dgrm.ud.h_av = (ib_av_handle_t VOID_PTR64)dev->sm_ah[port_num - 1]; - if (wr.dgrm.ud.h_av) { - HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" )); - ret = -EINVAL; - } - else - ret = -EINVAL; - spin_unlock_irqrestore(&lh); - - if (ret) { - free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); - kfree(tmad); - } - } -} - -int mthca_process_mad(struct ib_device *ibdev, - int mad_flags, - u8 port_num, - struct _ib_wc *in_wc, - struct _ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad) -{ - int err; - u8 status; - u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE); - - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n", - (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, - (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, - (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid )); - - /* Forward locally generated traps to the SM */ - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && - slid == 0) { - forward_trap(to_mdev(ibdev), port_num, in_mad); - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n")); - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - } - - /* - * Only handle SM gets, sets and trap represses for SM class - * - * Only handle PMA and Mellanox vendor-specific class gets and - * sets for other classes. - */ - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) { - HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n")); - return IB_MAD_RESULT_SUCCESS; - } - - /* - * Don't process SMInfo queries or vendor-specific - * MADs -- the SMA can't handle them. - */ - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || - ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == - IB_SMP_ATTR_VENDOR_MASK)) { - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n")); - return IB_MAD_RESULT_SUCCESS; - } - } - else { - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || - in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || - in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { - - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) { - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n")); - return IB_MAD_RESULT_SUCCESS; - } - } - else { - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n")); - return IB_MAD_RESULT_SUCCESS; - } - } - - // send MAD - err = mthca_MAD_IFC(to_mdev(ibdev), - mad_flags & IB_MAD_IGNORE_MKEY, - mad_flags & IB_MAD_IGNORE_BKEY, - port_num, in_wc, in_grh, in_mad, out_mad, - &status); - if (err) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n")); - return IB_MAD_RESULT_FAILURE; - } - if (status == MTHCA_CMD_STAT_BAD_PKT) - return IB_MAD_RESULT_SUCCESS; - if (status) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status)); - return IB_MAD_RESULT_FAILURE; - } - - if (!out_mad->mad_hdr.status) - smp_snoop(ibdev, port_num, in_mad); - - HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n", - (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method, - (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod, - (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid, - (u32)out_mad->mad_hdr.status )); - - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) { - /* no response for trap repress */ - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - } - - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; -} - -static void send_handler(struct ib_mad_agent *agent, - struct ib_mad_send_wc *mad_send_wc) -{ - struct mthca_trap_mad *tmad = - (void *) (ULONG_PTR) mad_send_wc->wr_id; - - free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); - kfree(tmad); -} +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_mad.tmh" +#endif +#include "mthca_cmd.h" + +enum { + MTHCA_VENDOR_CLASS1 = 0x9, + MTHCA_VENDOR_CLASS2 = 0xa +}; + +struct mthca_trap_mad { + struct scatterlist sg; +}; + +static void update_sm_ah(struct mthca_dev *dev, + u8 port_num, u16 lid, u8 sl) +{ + struct ib_ah *new_ah; + struct ib_ah_attr ah_attr; + SPIN_LOCK_PREP(lh); + + if (!dev->send_agent[port_num - 1][0]) + return; + + RtlZeroMemory(&ah_attr, sizeof ah_attr); + ah_attr.dlid = lid; + ah_attr.sl = sl; + ah_attr.port_num = port_num; + + new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, + &ah_attr, NULL, NULL); + if (IS_ERR(new_ah)) + return; + + spin_lock_irqsave(&dev->sm_lock, &lh); + if (dev->sm_ah[port_num - 1]) { + ibv_destroy_ah(dev->sm_ah[port_num - 1]); + } + dev->sm_ah[port_num - 1] = new_ah; + spin_unlock_irqrestore(&lh); +} + +/* + * Snoop SM MADs for port info and P_Key table sets, so we can + * synthesize LID change and P_Key change events. + */ +static void smp_snoop(struct ib_device *ibdev, + u8 port_num, + struct ib_mad *mad) +{ + struct ib_event event; + + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + mad->mad_hdr.method == IB_MGMT_METHOD_SET) { + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { + update_sm_ah(to_mdev(ibdev), port_num, + cl_ntoh16(*(__be16 *) (mad->data + 58)), + (*(u8 *) (mad->data + 76)) & 0xf); + + event.device = ibdev; + event.event = IB_EVENT_LID_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { + event.device = ibdev; + event.event = IB_EVENT_PKEY_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + } +} + +static void forward_trap(struct mthca_dev *dev, + u8 port_num, + struct ib_mad *mad) +{ + int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; + struct mthca_trap_mad *tmad; + struct ib_sge gather_list; + struct _ib_send_wr wr; + struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; + int ret; + SPIN_LOCK_PREP(lh); + + /* fill the template */ + wr.ds_array = (ib_local_ds_t*)(void*)&gather_list; + wr.num_ds = 1; + wr.wr_type = WR_SEND; + wr.send_opt = IB_SEND_OPT_SIGNALED; + wr.dgrm.ud.remote_qp = cl_hton32(qpn); + wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0; + + if (agent) { + tmad = kmalloc(sizeof *tmad, GFP_KERNEL); + if (!tmad) + return; + + alloc_dma_zmem(dev, sizeof *mad, &tmad->sg); + if (!tmad->sg.page) { + kfree(tmad); + return; + } + + memcpy(tmad->sg.page, mad, sizeof *mad); + + wr.dgrm.ud.rsvd = (void*)&((struct ib_mad *)tmad->sg.page)->mad_hdr; + wr.wr_id = (u64)(ULONG_PTR)tmad; + gather_list.addr = tmad->sg.dma_address; + gather_list.length = tmad->sg.length; + gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; + + /* + * We rely here on the fact that MLX QPs don't use the + * address handle after the send is posted (this is + * wrong following the IB spec strictly, but we know + * it's OK for our devices). + */ + spin_lock_irqsave(&dev->sm_lock, &lh); + wr.dgrm.ud.h_av = (ib_av_handle_t)dev->sm_ah[port_num - 1]; + if (wr.dgrm.ud.h_av) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" )); + ret = -EINVAL; + } + else + ret = -EINVAL; + spin_unlock_irqrestore(&lh); + + if (ret) { + free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); + kfree(tmad); + } + } +} + +int mthca_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct _ib_wc *in_wc, + struct _ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + int err; + u8 status; + u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE); + + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n", + (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, + (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, + (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid )); + + /* Forward locally generated traps to the SM */ + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && + slid == 0) { + forward_trap(to_mdev(ibdev), port_num, in_mad); + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n")); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + /* + * Only handle SM gets, sets and trap represses for SM class + * + * Only handle PMA and Mellanox vendor-specific class gets and + * sets for other classes. + */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) { + HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + + /* + * Don't process SMInfo queries or vendor-specific + * MADs -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || + ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == + IB_SMP_ATTR_VENDOR_MASK)) { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + else { + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { + + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + else { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + + // send MAD + err = mthca_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad, + &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n")); + return IB_MAD_RESULT_FAILURE; + } + if (status == MTHCA_CMD_STAT_BAD_PKT) + return IB_MAD_RESULT_SUCCESS; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status)); + return IB_MAD_RESULT_FAILURE; + } + + if (!out_mad->mad_hdr.status) + smp_snoop(ibdev, port_num, in_mad); + + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n", + (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method, + (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod, + (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid, + (u32)out_mad->mad_hdr.status )); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) { + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +static void send_handler(struct ib_mad_agent *agent, + struct ib_mad_send_wc *mad_send_wc) +{ + struct mthca_trap_mad *tmad = + (void *) (ULONG_PTR) mad_send_wc->wr_id; + + free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); + kfree(tmad); +} diff --git a/trunk/hw/mthca/user/mlnx_ual_av.c b/trunk/hw/mthca/user/mlnx_ual_av.c index 6e952b63..6bb6daf3 100644 --- a/trunk/hw/mthca/user/mlnx_ual_av.c +++ b/trunk/hw/mthca/user/mlnx_ual_av.c @@ -244,7 +244,7 @@ __post_create_av ( } ah->key = page->mr.lkey; } - *ph_uvp_av = (ib_av_handle_t VOID_PTR64)ah; + *ph_uvp_av = (ib_av_handle_t)ah; } else { mthca_free_av(ah); @@ -290,7 +290,7 @@ __post_query_av ( { cl_memcpy (p_addr_vector, &ah->av_attr, sizeof (ib_av_attr_t)); if (ph_pd) - *ph_pd = (ib_pd_handle_t VOID_PTR64)ah->h_uvp_pd; + *ph_pd = (ib_pd_handle_t)ah->h_uvp_pd; } UVP_EXIT(UVP_DBG_AV); diff --git a/trunk/hw/mthca/user/mlnx_ual_ca.c b/trunk/hw/mthca/user/mlnx_ual_ca.c index 31c68140..0145247b 100644 --- a/trunk/hw/mthca/user/mlnx_ual_ca.c +++ b/trunk/hw/mthca/user/mlnx_ual_ca.c @@ -111,7 +111,7 @@ __post_open_ca ( /* return results */ new_ca->ibv_ctx = ibvcontext; new_ca->p_hca_attr = NULL; - *ph_uvp_ca = (ib_ca_handle_t VOID_PTR64)new_ca; + *ph_uvp_ca = (ib_ca_handle_t)new_ca; } err_memory: diff --git a/trunk/hw/mthca/user/mlnx_ual_cq.c b/trunk/hw/mthca/user/mlnx_ual_cq.c index ddf67cb2..f695ca92 100644 --- a/trunk/hw/mthca/user/mlnx_ual_cq.c +++ b/trunk/hw/mthca/user/mlnx_ual_cq.c @@ -129,7 +129,7 @@ __post_create_cq ( goto err_create_cq; } - *ph_uvp_cq = (ib_cq_handle_t VOID_PTR64)ibv_cq; + *ph_uvp_cq = (ib_cq_handle_t)ibv_cq; } goto end; diff --git a/trunk/hw/mthca/user/mlnx_ual_pd.c b/trunk/hw/mthca/user/mlnx_ual_pd.c index 5a448b3f..a30b34b0 100644 --- a/trunk/hw/mthca/user/mlnx_ual_pd.c +++ b/trunk/hw/mthca/user/mlnx_ual_pd.c @@ -114,7 +114,7 @@ __post_allocate_pd ( /* return results */ p_new_pd->ibv_pd = ibv_pd; p_new_pd->p_hobul = p_hobul; - *ph_uvp_pd = (ib_pd_handle_t VOID_PTR64)p_new_pd; + *ph_uvp_pd = (ib_pd_handle_t)p_new_pd; } goto end; diff --git a/trunk/hw/mthca/user/mlnx_ual_qp.c b/trunk/hw/mthca/user/mlnx_ual_qp.c index a0ecde5e..fc5a2a32 100644 --- a/trunk/hw/mthca/user/mlnx_ual_qp.c +++ b/trunk/hw/mthca/user/mlnx_ual_qp.c @@ -180,7 +180,7 @@ __post_create_qp ( goto err_create_cq; } - *ph_uvp_qp = (ib_qp_handle_t VOID_PTR64)ibv_qp; + *ph_uvp_qp = (ib_qp_handle_t)ibv_qp; } goto end; diff --git a/trunk/hw/mthca/user/mlnx_ual_srq.c b/trunk/hw/mthca/user/mlnx_ual_srq.c index e3c97820..5f22b95f 100644 --- a/trunk/hw/mthca/user/mlnx_ual_srq.c +++ b/trunk/hw/mthca/user/mlnx_ual_srq.c @@ -207,7 +207,7 @@ __post_create_srq ( if (mthca_is_memfree(ibv_pd->context)) mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn); - *ph_uvp_srq = (ib_srq_handle_t VOID_PTR64)srq; + *ph_uvp_srq = (ib_srq_handle_t)srq; } else __free_srq(srq); diff --git a/trunk/inc/iba/ib_types.h b/trunk/inc/iba/ib_types.h index f656dfba..0fa13ac2 100644 --- a/trunk/inc/iba/ib_types.h +++ b/trunk/inc/iba/ib_types.h @@ -45,10 +45,6 @@ #define TYPEDEF_PTR64 #endif -#ifndef VOID_PTR64 -#define VOID_PTR64 -#endif - #ifndef STRUCT_PTR64 #define STRUCT_PTR64 #endif diff --git a/trunk/ulp/ipoib/kernel/ipoib_driver.c b/trunk/ulp/ipoib/kernel/ipoib_driver.c index 0563bca9..c8f34310 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_driver.c +++ b/trunk/ulp/ipoib/kernel/ipoib_driver.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved. * * This software is available to you under the OpenIB.org BSD license * below: @@ -2542,7 +2543,7 @@ __ipoib_ats_reg_cb( CL_ASSERT( p_reg_svc_rec ); CL_ASSERT( p_reg_svc_rec->svc_context ); - p_reg = (ats_reg_t* VOID_PTR64)p_reg_svc_rec->svc_context; + p_reg = (ats_reg_t*)p_reg_svc_rec->svc_context; port_num = p_reg->p_adapter->guids.port_num; cl_obj_lock( &p_reg->p_adapter->obj ); -- 2.41.0