From: leonidk Date: Thu, 23 Oct 2008 13:40:22 +0000 (+0000) Subject: [MLX4] added support to WPP. [mlnx:3348-9] X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=22487b0bb6a8013274f3e2a644a9848221568206;p=~shefty%2Frdma-win.git [MLX4] added support to WPP. [mlnx:3348-9] git-svn-id: svn://openib.tc.cornell.edu/gen1@1688 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/SOURCES b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/SOURCES index e0cc75ca..fe394f67 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/SOURCES +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/SOURCES @@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY !if $(FREEBUILD) -#ENABLE_EVENT_TRACING=1 +ENABLE_EVENT_TRACING=1 !else #ENABLE_EVENT_TRACING=1 !endif diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/cache.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/cache.c index c41e9ae6..b3ddc348 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/cache.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/cache.c @@ -37,6 +37,7 @@ #include "ib\mlx4_ib.h" #include "ib_cache.h" +#include #if defined(EVENT_TRACING) #ifdef offsetof diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/device.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/device.c index c68ae40e..d20b1105 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/device.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/device.c @@ -33,6 +33,11 @@ * $Id: device.c 1349 2004-12-16 21:09:43Z roland $ */ +#include "l2w.h" +#include "ib_verbs.h" +#include "core.h" +#include + #if defined(EVENT_TRACING) #ifdef offsetof #undef offsetof @@ -40,9 +45,6 @@ #include "device.tmh" #endif -#include "l2w.h" -#include "ib_verbs.h" -#include "core.h" struct ib_client_data { struct list_head list; diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w.c index d3a225e0..4b7599eb 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w.c @@ -3,6 +3,13 @@ #include "pa_cash.h" #include "mlx4.h" +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "l2w.tmh" +#endif + /* Nth element of the table contains the index of the first set bit of N; 8 - for N=0 */ char g_set_bit_tbl[256]; diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c index bb0f8851..49b8c92d 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c @@ -34,6 +34,7 @@ * $Id: mt_memory.c 2020 2007-05-01 09:29:10Z leonid $ */ #include "l2w.h" +#include #if defined (EVENT_TRACING) #ifdef offsetof diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c index 5a3d3078..245ca6bb 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c @@ -2,6 +2,13 @@ #include "l2w.h" #include "ib_verbs.h" +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "l2w_umem.tmh" +#endif + /** * ib_umem_release - release memory pinned with ib_umem_get * @umem: umem struct to release diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/verbs.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/verbs.c index fd9daa78..33e08f51 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/verbs.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/verbs.c @@ -38,6 +38,10 @@ * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $ */ +#include "l2w.h" +#include "ib_verbs.h" +#include + #if defined(EVENT_TRACING) #ifdef offsetof #undef offsetof @@ -45,8 +49,6 @@ #include "device.tmh" #endif -#include "l2w.h" -#include "ib_verbs.h" // qp_state_table static struct { diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/drv.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/drv.c index 23b9ef75..345205ec 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/drv.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/drv.c @@ -22,7 +22,10 @@ Environment: #include #include -#if defined(EVENT_TRACING) +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif #include "drv.tmh" #endif @@ -570,20 +573,20 @@ __get_resources( i, desc->ShareDisposition, desc->Flags, desc->u.MessageInterrupt.Translated.Level, desc->u.MessageInterrupt.Translated.Vector, - desc->u.MessageInterrupt.Translated.Affinity )); + (u32)desc->u.MessageInterrupt.Translated.Affinity )); MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV, - ("EvtPrepareHardware: Desc %d: RawMsiInterrupt: Share %d, Flags %#x, MessageCount #hx, Vector %#x, Affinity %#x\n", + ("EvtPrepareHardware: Desc %d: RawMsiInterrupt: Share %d, Flags %#x, MessageCount %#hx, Vector %#x, Affinity %#x\n", i, desc_raw->ShareDisposition, desc_raw->Flags, desc_raw->u.MessageInterrupt.Raw.MessageCount, desc_raw->u.MessageInterrupt.Raw.Vector, - desc_raw->u.MessageInterrupt.Raw.Affinity )); + (u32)desc_raw->u.MessageInterrupt.Raw.Affinity )); } else { // line-based interrupt MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV, ("EvtPrepareHardware: Desc %d: LineInterrupt: Share %d, Flags %#x, Level %d, Vector %#x, Affinity %#x\n", i, desc->ShareDisposition, desc->Flags, desc->u.Interrupt.Level, desc->u.Interrupt.Vector, - desc->u.Interrupt.Affinity )); + (u32)desc->u.Interrupt.Affinity )); } break; #endif diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/pdo.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/pdo.c index 920cd476..826f834e 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/pdo.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/pdo.c @@ -2,9 +2,12 @@ #include #include -#if defined(EVENT_TRACING) -#include "pdo.tmh" +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof #endif +#include "pdo.tmh" +#endif #ifdef ALLOC_PRAGMA #pragma alloc_text(PAGE, create_pdo) diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/precomp.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/precomp.h index 9fbb31cd..eefec294 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/precomp.h +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/precomp.h @@ -10,14 +10,6 @@ #include "drv.h" #include "driver.h" #include "cmd.h" - -#if 0 -#include "mxe_hca.h" -#include "mtnic_if_defs.h" -#include "mxe_utils.h" -#include "mxe_wpptrace.h" -#include "mtnic_dev.h" -#include "mxe_drv.h" -#endif +#include diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/sources b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/sources index 54d49820..00644b69 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/sources +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/sources @@ -11,7 +11,7 @@ NTTARGETFILES=$(INF_TARGET) !endif !if $(FREEBUILD) -#ENABLE_EVENT_TRACING=1 +ENABLE_EVENT_TRACING=1 !else #ENABLE_EVENT_TRACING=1 !endif @@ -54,7 +54,7 @@ C_DEFINES = $(C_DEFINES) -DEVENT_TRACING RUN_WPP= $(SOURCES) -km -dll -ext: .c .cpp .h .C .CPP .H\ # -preserveext:.cpp .h\ - -scan:..\inc\mlx4_debug.h \ + -scan:..\..\inc\mlx4_debug.h \ -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \ -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) !ENDIF diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/wmi.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/wmi.c index e3faf50c..25323d46 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/wmi.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/wmi.c @@ -23,9 +23,12 @@ Environment: #include "precomp.h" -#if defined(EVENT_TRACING) -#include "wmi.tmh" +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof #endif +#include "wmi.tmh" +#endif #ifdef ALLOC_PRAGMA #pragma alloc_text(PAGE,WmiRegistration) diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/SOURCES b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/SOURCES index 7216a5ba..4a940f09 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/SOURCES +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/SOURCES @@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY !if $(FREEBUILD) -#ENABLE_EVENT_TRACING=1 +ENABLE_EVENT_TRACING=1 !else #ENABLE_EVENT_TRACING=1 !endif @@ -37,7 +37,7 @@ TARGETLIBS= \ C_DEFINES = $(C_DEFINES) -DEVENT_TRACING RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ - -scan:..\inc\mlx4_debug.h \ + -scan:..\..\inc\mlx4_debug.h \ -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \ -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) !ENDIF diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/cq.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/cq.c index 579d4794..2cedea6c 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/cq.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/cq.c @@ -35,6 +35,14 @@ #include "qp.h" #include "user.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "cq.tmh" +#endif + + static void mlx4_ib_cq_comp(struct mlx4_cq *cq) { struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/mad.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/mad.c index 01ba48cb..4a055d95 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/mad.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/mad.c @@ -36,6 +36,14 @@ #include #include "cmd.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mad.tmh" +#endif + + enum { MLX4_IB_VENDOR_CLASS1 = 0x9, MLX4_IB_VENDOR_CLASS2 = 0xa diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/SOURCES b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/SOURCES index e1c89e54..37441cf0 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/SOURCES +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/SOURCES @@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY !if $(FREEBUILD) -#ENABLE_EVENT_TRACING=1 +ENABLE_EVENT_TRACING=1 !else #ENABLE_EVENT_TRACING=1 !endif @@ -46,7 +46,7 @@ TARGETLIBS= \ C_DEFINES = $(C_DEFINES) -DEVENT_TRACING RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ - -scan:..\mlx4_debug.h \ + -scan:..\..\inc\mlx4_debug.h \ -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \ -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) !ENDIF diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/alloc.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/alloc.c index 62a074e6..52d999fe 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/alloc.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/alloc.c @@ -31,6 +31,15 @@ */ #include "mlx4.h" +#include + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "alloc.tmh" +#endif + u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) { @@ -214,7 +223,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, buf->npages *= 2; } MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_CQ, - ("size %#x, nbufs %d, pages %d, page_shift %d, kva %p, da %llx, buf_size %#x\n", + ("size %#x, nbufs %d, pages %d, page_shift %d, kva %p, da %I64x, buf_size %#x\n", size, buf->nbufs, buf->npages, buf->page_shift, buf->u.direct.buf, t.da, t.sz )); memset(buf->u.direct.buf, 0, size); diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/cq.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/cq.c index 9fed0ade..3dab6b0d 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/cq.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/cq.c @@ -38,6 +38,15 @@ #include "cmd.h" #include "icm.h" #include "cq.h" +#include + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "cq.tmh" +#endif + #define MLX4_CQ_STATUS_OK ( 0 << 28) #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/main.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/main.c index 3a1d92e5..1273d778 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/main.c +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/main.c @@ -40,6 +40,14 @@ #include "device.h" #include "doorbell.h" #include "complib\cl_thread.h" +#include + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "main.tmh" +#endif static struct mlx4_profile default_profile = { diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/SOURCES b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/SOURCES index ab9c2973..4a257917 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/SOURCES +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/SOURCES @@ -17,7 +17,7 @@ ENABLE_EVENT_TRACING=1 !endif SOURCES= \ - hca.rc \ + hca.rc \ av.c \ ca.c \ cq.c \ @@ -25,12 +25,12 @@ SOURCES= \ direct.c \ drv.c \ fw.c \ - mcast.c \ + mcast.c \ mr.c \ pd.c \ qp.c \ srq.c \ - verbs.c \ + hverbs.c \ vp.c \ wmi.c \ diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c new file mode 100644 index 00000000..4fcf5b14 --- /dev/null +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $ + */ + + +#include "precomp.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hverbs.tmh" +#endif + + +/* Memory regions */ + +struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, enum ib_access_flags mr_access_flags) +{ + struct ib_mr *mr; + + mr = pd->device->get_dma_mr(pd, mr_access_flags); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + mr->p_uctx = pd->p_uctx; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); + } + + return mr; +} + +struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + enum ib_access_flags mr_access_flags, + u64 *iova_start) +{ + struct ib_mr *mr; + + if ( pd->device->reg_phys_mr ) + mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, + mr_access_flags, iova_start); + else + mr = ERR_PTR(-ENOSYS); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + mr->p_uctx = pd->p_uctx; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); + } + + return mr; +} + + + struct ib_mr *ibv_reg_mr(struct ib_pd *pd, + u64 start, u64 length, + u64 virt_addr, + int mr_access_flags, + ci_umv_buf_t* const p_umv_buf ) +{ + struct ib_mr *ib_mr; + int err; + HCA_ENTER(HCA_DBG_MEMORY); + + if (p_umv_buf && p_umv_buf->command) { + err = -ENOSYS; + goto err_not_supported; + } + + ib_mr = pd->device->reg_user_mr(pd, start, length, virt_addr, mr_access_flags, NULL); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err)); + goto err_reg_user_mr; + } + + ib_mr->device = pd->device; + ib_mr->pd = pd; + atomic_inc(&pd->usecnt); + atomic_set(&ib_mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); + HCA_EXIT(HCA_DBG_MEMORY); + return ib_mr; + +err_reg_user_mr: +err_not_supported: + HCA_EXIT(HCA_DBG_MEMORY); + return ERR_PTR(err); +} + +int ib_dereg_mr(struct ib_mr *mr) +{ + int ret; + struct ib_pd *pd; + struct ib_device *p_ibdev; + + if (atomic_read(&mr->usecnt)) + return -EBUSY; + + p_ibdev = mr->device; + pd = mr->pd; + ret = p_ibdev->dereg_mr(mr); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); + } + + return ret; +} + +static void release_user_cq_qp_resources( + struct ib_ucontext *p_uctx) +{ + if (p_uctx) { + atomic_dec(&p_uctx->x.usecnt); + if (!atomic_read(&p_uctx->x.usecnt) && p_uctx->closing) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("User resources are released. Removing context\n")); + ibv_um_close(p_uctx); + } + } +} + +// +// Completion queues +// + +struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev, + ib_comp_handler comp_handler, + void (*event_handler)(ib_event_rec_t *), + void *cq_context, int cqe, + struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_cq *cq; + struct ib_udata udata, *p_udata = &udata; + struct ibv_create_cq *p_req; + struct ibv_create_cq_resp *p_resp = NULL; + + if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) { + // prepare user parameters + p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf; + p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR) + p_umv_buf->p_inout_buf; + INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, + sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp)); + } + else + p_udata = NULL; + + // create cq + cq = p_ibdev->create_cq(p_ibdev, cqe, 0, p_uctx, p_udata); + if (IS_ERR(cq)) { + err = PTR_ERR(cq); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err)); + goto err_create_cq; + } + + cq->device = p_ibdev; + cq->p_uctx = p_uctx; + cq->comp_handler = comp_handler; + cq->event_handler = event_handler; + cq->cq_context = cq_context; + atomic_set(&cq->usecnt, 0); + if (p_uctx) + atomic_inc(&p_uctx->x.usecnt); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ , + ("created CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe )); + + // fill results + if (p_umv_buf) { + p_resp->cq_handle = (u64)(ULONG_PTR)cq; + p_resp->cqe = cq->cqe; + p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); + } + + return cq; + +err_create_cq: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + return ERR_PTR(err); +} + +int ib_destroy_cq(struct ib_cq *cq) +{ + int ret; + struct ib_ucontext *p_uctx = cq->p_uctx; + + if (atomic_read(&cq->usecnt)) + return -EBUSY; + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ , + ("destroying CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe )); + + ret = cq->device->destroy_cq(cq); + release_user_cq_qp_resources(p_uctx); + return ret; +} + +// +// Queue pairs +// + +static char *__print_qtype(enum ib_qp_type qtype) +{ + char *str = NULL; + switch (qtype) { + case IB_QPT_SMI: str = "SMI"; break; + case IB_QPT_GSI: str = "GSI"; break; + case IB_QPT_RC: str = "RC"; break; + case IB_QPT_UC: str = "UC"; break; + case IB_QPT_UD: str = "UD"; break; + case IB_QPT_RAW_IP_V6: str = "IP_V6"; break; + case IB_QPT_RAW_ETY: str = "ETY"; break; + default: str = "UKNWN"; break; + } + return str; +} + +struct ib_qp *ibv_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_qp *p_ib_qp; + struct ib_udata udata, *p_udata = &udata; + struct ibv_create_qp *p_req = NULL; + struct ibv_create_qp_resp *p_resp= NULL; + + HCA_ENTER(HCA_DBG_QP); + + if ( p_uctx && p_umv_buf && p_umv_buf->command ) { + // prepare user parameters + p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf; + p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf; + INIT_UDATA(&udata, &p_req->buf_addr, NULL, + sizeof(struct mlx4_ib_create_qp), 0); + } + else + p_udata = NULL; + + p_ib_qp = pd->device->create_qp( pd, qp_init_attr, p_udata ); + + if (IS_ERR(p_ib_qp)) { + err = PTR_ERR(p_ib_qp); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err)); + goto err_create_qp; + } + + // fill results + p_ib_qp->device = pd->device; + p_ib_qp->pd = pd; + p_ib_qp->send_cq = qp_init_attr->send_cq; + p_ib_qp->recv_cq = qp_init_attr->recv_cq; + p_ib_qp->srq = qp_init_attr->srq; + p_ib_qp->p_uctx = p_uctx; + p_ib_qp->event_handler = qp_init_attr->event_handler; + p_ib_qp->qp_context = qp_init_attr->qp_context; + p_ib_qp->qp_type = qp_init_attr->qp_type; + atomic_inc(&pd->usecnt); + atomic_inc(&qp_init_attr->send_cq->usecnt); + atomic_inc(&qp_init_attr->recv_cq->usecnt); + if (qp_init_attr->srq) + atomic_inc(&qp_init_attr->srq->usecnt); + if (p_uctx) + atomic_inc(&p_uctx->x.usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP , + ("qtype %s (%d), qnum %#x, q_num %#x, ssz %d, rsz %d, scq %#x:%#x, rcq %#x:%#x, port_num %d \n", + __print_qtype(p_ib_qp->qp_type), p_ib_qp->qp_type, + ((struct mlx4_ib_qp*)p_ib_qp)->mqp.qpn, p_ib_qp->qp_num, + qp_init_attr->cap.max_send_wr, qp_init_attr->cap.max_recv_wr, + ((struct mlx4_ib_cq*)p_ib_qp->send_cq)->mcq.cqn, p_ib_qp->send_cq->cqe, + ((struct mlx4_ib_cq*)p_ib_qp->recv_cq)->mcq.cqn, p_ib_qp->recv_cq->cqe, + qp_init_attr->port_num + ) ); + + // fill results for user + if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { + struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp; + p_resp->qp_handle = (__u64)(ULONG_PTR)p_ib_qp; + p_resp->qpn = p_mib_qp->mqp.qpn; + p_resp->max_send_wr = p_mib_qp->sq.max_post; + p_resp->max_recv_wr = p_mib_qp->rq.max_post; + p_resp->max_send_sge = p_mib_qp->sq.max_gs; + p_resp->max_recv_sge = p_mib_qp->rq.max_gs; + /* + * We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + p_resp->max_inline_data = 0; + p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); + } + + return p_ib_qp; + +err_create_qp: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + HCA_EXIT(HCA_DBG_QP); + return ERR_PTR(err); +} + +int ib_destroy_qp(struct ib_qp *qp) +{ + struct ib_pd *p_ib_pd; + struct ib_cq *scq, *rcq; + struct ib_srq *srq; + struct ib_ucontext *p_uctx; + int ret; + + p_ib_pd = qp->pd; + scq = qp->send_cq; + rcq = qp->recv_cq; + srq = qp->srq; + p_uctx = p_ib_pd->p_uctx; + + ret = qp->device->destroy_qp(qp); + if (!ret) { + atomic_dec(&p_ib_pd->usecnt); + atomic_dec(&scq->usecnt); + atomic_dec(&rcq->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx)); + if (srq) + atomic_dec(&srq->usecnt); + release_user_cq_qp_resources(p_uctx); + } + + return ret; +} + +// +// Shared receive queues +// + + +/* Shared receive queues */ + +struct ib_srq *ibv_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_srq *p_ib_srq; + struct ib_udata udata, *p_udata = &udata; + struct ibv_create_srq *p_req = NULL; + struct ibv_create_srq_resp *p_resp= NULL; + + if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { + // prepare user parameters + p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf; + p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf; + INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, + sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp)); + } + else + p_udata = NULL; + + p_ib_srq = pd->device->create_srq( pd, srq_init_attr, p_udata ); + if (IS_ERR(p_ib_srq)) { + err = PTR_ERR(p_ib_srq); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err)); + goto err_create_srq; + } + + // fill results + p_ib_srq->device = pd->device; + p_ib_srq->pd = pd; + p_ib_srq->p_uctx = p_uctx; + p_ib_srq->event_handler = srq_init_attr->event_handler; + p_ib_srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&p_ib_srq->usecnt, 0); + if (p_uctx) + atomic_inc(&p_uctx->x.usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ , + ("uctx %p, qhndl %p, qnum %#x \n", + pd->p_uctx, p_ib_srq, ((struct mlx4_ib_srq*)p_ib_srq)->msrq.srqn ) ); + + // fill results for user + if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { + struct mlx4_ib_srq* p_mib_srq = (struct mlx4_ib_srq*)p_ib_srq; + p_resp->srq_handle = (__u64)(ULONG_PTR)p_ib_srq; + p_resp->max_wr = p_mib_srq->msrq.max - 1; + p_resp->max_sge = p_mib_srq->msrq.max_gs; + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", + ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); + } + + return p_ib_srq; + +err_create_srq: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + HCA_EXIT(HCA_DBG_QP); + return ERR_PTR(err); +} + +int ib_destroy_srq(struct ib_srq *srq) +{ + int ret; + struct ib_pd *p_ib_pd = srq->pd; + struct ib_ucontext *p_uctx = p_ib_pd->p_uctx; + + ret = srq->device->destroy_srq(srq); + if (!ret) { + atomic_dec(&p_ib_pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx)); + release_user_cq_qp_resources(p_uctx); + } + + return ret; +} + +// +// User context +// +static NTSTATUS __map_memory_for_user( + IN io_addr_t addr, + IN SIZE_T size, + IN MEMORY_CACHING_TYPE mem_type, + OUT umap_t * p_map + ) +{ + NTSTATUS status; + + HCA_ENTER(HCA_DBG_SHIM); + + p_map->mapped = 0; + + // map UAR to kernel + p_map->kva = ioremap(addr, size); + if (!p_map->kva) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW , + ("Couldn't map kernel access region, aborting.\n") ); + status = IB_INSUFFICIENT_MEMORY; + goto err_ioremap; + } + + // build MDL + p_map->mdl = IoAllocateMdl( p_map->kva, (ULONG)size, + FALSE, TRUE, NULL ); + if( !p_map->mdl ) { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_mdl; + } + MmBuildMdlForNonPagedPool( p_map->mdl ); + + /* Map the memory into the calling process's address space. */ + __try { + p_map->uva = MmMapLockedPagesSpecifyCache( p_map->mdl, + UserMode, mem_type, NULL, FALSE, NormalPagePriority ); + } + __except(EXCEPTION_EXECUTE_HANDLER) { + status = IB_INVALID_PERMISSION; + goto err_map; + } + + p_map->mapped = 1; + status = STATUS_SUCCESS; + goto done; + +err_map: + IoFreeMdl(p_map->mdl); + +err_alloc_mdl: + iounmap(p_map->kva, PAGE_SIZE); + +err_ioremap: +done: + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +static void __unmap_memory_for_user( + IN umap_t * p_map + ) +{ + if (p_map->mapped) { + p_map->mapped = 0; + MmUnmapLockedPages( p_map->uva, p_map->mdl ); + IoFreeMdl(p_map->mdl); + iounmap(p_map->kva, PAGE_SIZE); + } +} + +ib_api_status_t ibv_um_open( + IN struct ib_device * p_ibdev, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT struct ib_ucontext ** pp_uctx ) +{ + int err; + ib_api_status_t status; + struct mlx4_ib_ucontext *p_muctx; + struct ibv_get_context_resp *p_uresp; + struct mlx4_ib_alloc_ucontext_resp ib_alloc_ucontext_resp; + struct ib_ucontext *p_uctx; + struct ib_udata udata; + + HCA_ENTER(HCA_DBG_SHIM); + + // create user context in kernel + INIT_UDATA(&udata, NULL, &ib_alloc_ucontext_resp, + 0, sizeof(struct mlx4_ib_alloc_ucontext_resp)); + + p_uctx = p_ibdev->alloc_ucontext(p_ibdev, &udata); + if (IS_ERR(p_uctx)) { + err = PTR_ERR(p_uctx); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM, + ("mthca_alloc_ucontext failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_ucontext; + } + p_muctx = to_mucontext(p_uctx); + p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf; + + // fill the rest of ib_ucontext fields + p_uctx->device = p_ibdev; + p_uctx->closing = 0; + + // livefish + if (hca_is_livefish(p_ibdev->x.p_fdo)) + goto done; + + // map uar to user space + status = __map_memory_for_user( + (io_addr_t)p_muctx->uar.pfn << PAGE_SHIFT, + PAGE_SIZE, MmNonCached, &p_uctx->x.uar ); + if( !NT_SUCCESS(status) ) { + goto err_map_uar; + } + p_uresp->uar_addr = (u64)(ULONG_PTR)p_uctx->x.uar.uva; + + // map BF to user space + if (ib_alloc_ucontext_resp.bf_reg_size) { + status = __map_memory_for_user( + (io_addr_t)(p_muctx->uar.pfn + + to_mdev(p_ibdev)->dev->caps.num_uars) << PAGE_SHIFT, + PAGE_SIZE, MmWriteCombined, &p_uctx->x.bf ); + if( !NT_SUCCESS(status) ) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM, + ("BlueFlame available, but failed to be mapped (%#x)\n", status)); + p_uresp->bf_page = 0; + p_uresp->bf_buf_size = 0; + } + else { + p_uresp->bf_page = (u64)(ULONG_PTR)p_uctx->x.bf.uva; + p_uresp->bf_buf_size = ib_alloc_ucontext_resp.bf_reg_size / 2; + p_uresp->bf_offset = 0; + } + } + else { + p_uresp->bf_page = 0; + p_uresp->bf_buf_size = 0; + } + +done: + // fill the response + p_uresp->bf_reg_size = ib_alloc_ucontext_resp.bf_reg_size; + p_uresp->bf_regs_per_page = ib_alloc_ucontext_resp.bf_regs_per_page; + p_uresp->qp_tab_size = ib_alloc_ucontext_resp.qp_tab_size; + + *pp_uctx = p_uctx; + status = IB_SUCCESS; + goto end; + +err_map_uar: + p_ibdev->dealloc_ucontext(p_uctx); +err_alloc_ucontext: +end: + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + + +void ibv_um_close( struct ib_ucontext * h_um_ca ) +{ + int err; + ib_api_status_t status; + struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca; + PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo; + + HCA_ENTER(HCA_DBG_SHIM); + + p_uctx->closing = 1; + + if (atomic_read(&p_uctx->x.usecnt)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("resources are not released (cnt %d)\n", p_uctx->x.usecnt)); + status = IB_RESOURCE_BUSY; + goto err_usage; + } + + if ( !hca_is_livefish(p_fdo)) { + __unmap_memory_for_user( &p_uctx->x.bf ); + __unmap_memory_for_user( &p_uctx->x.uar ); + } + + err = p_fdo->bus_ib_ifc.p_ibdev->dealloc_ucontext(p_uctx); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("mthca_dealloc_ucontext failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_dealloc_ucontext; + } + + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM, + ("pcs %p\n", PsGetCurrentProcess()) ); + status = IB_SUCCESS; + goto end; + +err_dealloc_ucontext: +err_usage: +end: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("completes with ERROR status %x\n", status)); + } + HCA_EXIT(HCA_DBG_SHIM); + return; +} + diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h new file mode 100644 index 00000000..d160159c --- /dev/null +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_verbs.h 1889 2006-12-31 08:33:06Z sleybo $ + */ + +#pragma once + +#include "ib_verbs.h" + +struct ib_mr *ibv_reg_mr(struct ib_pd *pd, + u64 start, u64 length, + u64 virt_addr, + int mr_access_flags, + ci_umv_buf_t* const p_umv_buf ); + +struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev, + ib_comp_handler comp_handler, + void (*event_handler)(ib_event_rec_t *), + void *cq_context, int cqe, + struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf); + +struct ib_qp *ibv_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +struct ib_srq *ibv_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +ib_api_status_t ibv_um_open( + IN struct ib_device * p_ibdev, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT struct ib_ucontext ** pp_uctx ); + +void ibv_um_close( struct ib_ucontext * h_um_ca ); + + diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/precomp.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/precomp.h index a2f8f995..1d316185 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/precomp.h +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/precomp.h @@ -39,7 +39,7 @@ #include "public.h" #include "debug.h" #include "l2w.h" -#include "verbs.h" +#include "hverbs.h" #include "mlx4_ib.h" #include "drv.h" #include "mx_abi.h" diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c deleted file mode 100644 index 44969d9e..00000000 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c +++ /dev/null @@ -1,673 +0,0 @@ -/* - * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. - * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. - * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved. - * - * This software is available to you under the OpenIB.org BSD license - * below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $ - */ - - -#include "precomp.h" - -#if defined(EVENT_TRACING) -#ifdef offsetof -#undef offsetof -#endif -#include "verbs.tmh" -#endif - - -/* Memory regions */ - -struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, enum ib_access_flags mr_access_flags) -{ - struct ib_mr *mr; - - mr = pd->device->get_dma_mr(pd, mr_access_flags); - - if (!IS_ERR(mr)) { - mr->device = pd->device; - mr->pd = pd; - mr->p_uctx = pd->p_uctx; - atomic_inc(&pd->usecnt); - atomic_set(&mr->usecnt, 0); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); - } - - return mr; -} - -struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - enum ib_access_flags mr_access_flags, - u64 *iova_start) -{ - struct ib_mr *mr; - - if ( pd->device->reg_phys_mr ) - mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, - mr_access_flags, iova_start); - else - mr = ERR_PTR(-ENOSYS); - - if (!IS_ERR(mr)) { - mr->device = pd->device; - mr->pd = pd; - mr->p_uctx = pd->p_uctx; - atomic_inc(&pd->usecnt); - atomic_set(&mr->usecnt, 0); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); - } - - return mr; -} - - - struct ib_mr *ibv_reg_mr(struct ib_pd *pd, - u64 start, u64 length, - u64 virt_addr, - int mr_access_flags, - ci_umv_buf_t* const p_umv_buf ) -{ - struct ib_mr *ib_mr; - int err; - HCA_ENTER(HCA_DBG_MEMORY); - - if (p_umv_buf && p_umv_buf->command) { - err = -ENOSYS; - goto err_not_supported; - } - - ib_mr = pd->device->reg_user_mr(pd, start, length, virt_addr, mr_access_flags, NULL); - if (IS_ERR(ib_mr)) { - err = PTR_ERR(ib_mr); - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err)); - goto err_reg_user_mr; - } - - ib_mr->device = pd->device; - ib_mr->pd = pd; - atomic_inc(&pd->usecnt); - atomic_set(&ib_mr->usecnt, 0); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); - HCA_EXIT(HCA_DBG_MEMORY); - return ib_mr; - -err_reg_user_mr: -err_not_supported: - HCA_EXIT(HCA_DBG_MEMORY); - return ERR_PTR(err); -} - -int ib_dereg_mr(struct ib_mr *mr) -{ - int ret; - struct ib_pd *pd; - struct ib_device *p_ibdev; - - if (atomic_read(&mr->usecnt)) - return -EBUSY; - - p_ibdev = mr->device; - pd = mr->pd; - ret = p_ibdev->dereg_mr(mr); - if (!ret) { - atomic_dec(&pd->usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); - } - - return ret; -} - -static void release_user_cq_qp_resources( - struct ib_ucontext *p_uctx) -{ - if (p_uctx) { - atomic_dec(&p_uctx->x.usecnt); - if (!atomic_read(&p_uctx->x.usecnt) && p_uctx->closing) { - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("User resources are released. Removing context\n")); - ibv_um_close(p_uctx); - } - } -} - -// -// Completion queues -// - -struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev, - ib_comp_handler comp_handler, - void (*event_handler)(ib_event_rec_t *), - void *cq_context, int cqe, - struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) -{ - int err; - struct ib_cq *cq; - struct ib_udata udata, *p_udata = &udata; - struct ibv_create_cq *p_req; - struct ibv_create_cq_resp *p_resp = NULL; - - if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) { - // prepare user parameters - p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf; - p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR) - p_umv_buf->p_inout_buf; - INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, - sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp)); - } - else - p_udata = NULL; - - // create cq - cq = p_ibdev->create_cq(p_ibdev, cqe, 0, p_uctx, p_udata); - if (IS_ERR(cq)) { - err = PTR_ERR(cq); - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err)); - goto err_create_cq; - } - - cq->device = p_ibdev; - cq->p_uctx = p_uctx; - cq->comp_handler = comp_handler; - cq->event_handler = event_handler; - cq->cq_context = cq_context; - atomic_set(&cq->usecnt, 0); - if (p_uctx) - atomic_inc(&p_uctx->x.usecnt); - - HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ , - ("created CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe )); - - // fill results - if (p_umv_buf) { - p_resp->cq_handle = (u64)(ULONG_PTR)cq; - p_resp->cqe = cq->cqe; - p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); - } - - return cq; - -err_create_cq: - if( p_umv_buf && p_umv_buf->command ) - p_umv_buf->status = IB_ERROR; - return ERR_PTR(err); -} - -int ib_destroy_cq(struct ib_cq *cq) -{ - int ret; - struct ib_ucontext *p_uctx = cq->p_uctx; - - if (atomic_read(&cq->usecnt)) - return -EBUSY; - - HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ , - ("destroying CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe )); - - ret = cq->device->destroy_cq(cq); - release_user_cq_qp_resources(p_uctx); - return ret; -} - -// -// Queue pairs -// - -static char *__print_qtype(enum ib_qp_type qtype) -{ - char *str = NULL; - switch (qtype) { - case IB_QPT_SMI: str = "SMI"; break; - case IB_QPT_GSI: str = "GSI"; break; - case IB_QPT_RC: str = "RC"; break; - case IB_QPT_UC: str = "UC"; break; - case IB_QPT_UD: str = "UD"; break; - case IB_QPT_RAW_IP_V6: str = "IP_V6"; break; - case IB_QPT_RAW_ETY: str = "ETY"; break; - default: str = "UKNWN"; break; - } - return str; -} - -struct ib_qp *ibv_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) -{ - int err; - struct ib_qp *p_ib_qp; - struct ib_udata udata, *p_udata = &udata; - struct ibv_create_qp *p_req = NULL; - struct ibv_create_qp_resp *p_resp= NULL; - - HCA_ENTER(HCA_DBG_QP); - - if ( p_uctx && p_umv_buf && p_umv_buf->command ) { - // prepare user parameters - p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf; - p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf; - INIT_UDATA(&udata, &p_req->buf_addr, NULL, - sizeof(struct mlx4_ib_create_qp), 0); - } - else - p_udata = NULL; - - p_ib_qp = pd->device->create_qp( pd, qp_init_attr, p_udata ); - - if (IS_ERR(p_ib_qp)) { - err = PTR_ERR(p_ib_qp); - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err)); - goto err_create_qp; - } - - // fill results - p_ib_qp->device = pd->device; - p_ib_qp->pd = pd; - p_ib_qp->send_cq = qp_init_attr->send_cq; - p_ib_qp->recv_cq = qp_init_attr->recv_cq; - p_ib_qp->srq = qp_init_attr->srq; - p_ib_qp->p_uctx = p_uctx; - p_ib_qp->event_handler = qp_init_attr->event_handler; - p_ib_qp->qp_context = qp_init_attr->qp_context; - p_ib_qp->qp_type = qp_init_attr->qp_type; - atomic_inc(&pd->usecnt); - atomic_inc(&qp_init_attr->send_cq->usecnt); - atomic_inc(&qp_init_attr->recv_cq->usecnt); - if (qp_init_attr->srq) - atomic_inc(&qp_init_attr->srq->usecnt); - if (p_uctx) - atomic_inc(&p_uctx->x.usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); - - HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP , - ("qtype %s (%d), qnum %#x, q_num %#x, ssz %d, rsz %d, scq %#x:%#x, rcq %#x:%#x, port_num %d \n", - __print_qtype(p_ib_qp->qp_type), p_ib_qp->qp_type, - ((struct mlx4_ib_qp*)p_ib_qp)->mqp.qpn, p_ib_qp->qp_num, - qp_init_attr->cap.max_send_wr, qp_init_attr->cap.max_recv_wr, - ((struct mlx4_ib_cq*)p_ib_qp->send_cq)->mcq.cqn, p_ib_qp->send_cq->cqe, - ((struct mlx4_ib_cq*)p_ib_qp->recv_cq)->mcq.cqn, p_ib_qp->recv_cq->cqe, - qp_init_attr->port_num - ) ); - - // fill results for user - if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { - struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp; - p_resp->qp_handle = (__u64)(ULONG_PTR)p_ib_qp; - p_resp->qpn = p_mib_qp->mqp.qpn; - p_resp->max_send_wr = p_mib_qp->sq.max_post; - p_resp->max_recv_wr = p_mib_qp->rq.max_post; - p_resp->max_send_sge = p_mib_qp->sq.max_gs; - p_resp->max_recv_sge = p_mib_qp->rq.max_gs; - /* - * We don't support inline sends for kernel QPs (yet), and we - * don't know what userspace's value should be. - */ - p_resp->max_inline_data = 0; - p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); - } - - return p_ib_qp; - -err_create_qp: - if( p_umv_buf && p_umv_buf->command ) - p_umv_buf->status = IB_ERROR; - HCA_EXIT(HCA_DBG_QP); - return ERR_PTR(err); -} - -int ib_destroy_qp(struct ib_qp *qp) -{ - struct ib_pd *p_ib_pd; - struct ib_cq *scq, *rcq; - struct ib_srq *srq; - struct ib_ucontext *p_uctx; - int ret; - - p_ib_pd = qp->pd; - scq = qp->send_cq; - rcq = qp->recv_cq; - srq = qp->srq; - p_uctx = p_ib_pd->p_uctx; - - ret = qp->device->destroy_qp(qp); - if (!ret) { - atomic_dec(&p_ib_pd->usecnt); - atomic_dec(&scq->usecnt); - atomic_dec(&rcq->usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx)); - if (srq) - atomic_dec(&srq->usecnt); - release_user_cq_qp_resources(p_uctx); - } - - return ret; -} - -// -// Shared receive queues -// - - -/* Shared receive queues */ - -struct ib_srq *ibv_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf) -{ - int err; - struct ib_srq *p_ib_srq; - struct ib_udata udata, *p_udata = &udata; - struct ibv_create_srq *p_req = NULL; - struct ibv_create_srq_resp *p_resp= NULL; - - if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { - // prepare user parameters - p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf; - p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf; - INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, - sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp)); - } - else - p_udata = NULL; - - p_ib_srq = pd->device->create_srq( pd, srq_init_attr, p_udata ); - if (IS_ERR(p_ib_srq)) { - err = PTR_ERR(p_ib_srq); - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err)); - goto err_create_srq; - } - - // fill results - p_ib_srq->device = pd->device; - p_ib_srq->pd = pd; - p_ib_srq->p_uctx = p_uctx; - p_ib_srq->event_handler = srq_init_attr->event_handler; - p_ib_srq->srq_context = srq_init_attr->srq_context; - atomic_inc(&pd->usecnt); - atomic_set(&p_ib_srq->usecnt, 0); - if (p_uctx) - atomic_inc(&p_uctx->x.usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx)); - - HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ , - ("uctx %p, qhndl %p, qnum %#x \n", - pd->p_uctx, p_ib_srq, ((struct mlx4_ib_srq*)p_ib_srq)->msrq.srqn ) ); - - // fill results for user - if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) { - struct mlx4_ib_srq* p_mib_srq = (struct mlx4_ib_srq*)p_ib_srq; - p_resp->srq_handle = (__u64)(ULONG_PTR)p_ib_srq; - p_resp->max_wr = p_mib_srq->msrq.max - 1; - p_resp->max_sge = p_mib_srq->msrq.max_gs; - p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", - ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt)); - } - - return p_ib_srq; - -err_create_srq: - if( p_umv_buf && p_umv_buf->command ) - p_umv_buf->status = IB_ERROR; - HCA_EXIT(HCA_DBG_QP); - return ERR_PTR(err); -} - -int ib_destroy_srq(struct ib_srq *srq) -{ - int ret; - struct ib_pd *p_ib_pd = srq->pd; - struct ib_ucontext *p_uctx = p_ib_pd->p_uctx; - - ret = srq->device->destroy_srq(srq); - if (!ret) { - atomic_dec(&p_ib_pd->usecnt); - HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", - ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx)); - release_user_cq_qp_resources(p_uctx); - } - - return ret; -} - -// -// User context -// -static NTSTATUS __map_memory_for_user( - IN io_addr_t addr, - IN SIZE_T size, - IN MEMORY_CACHING_TYPE mem_type, - OUT umap_t * p_map - ) -{ - NTSTATUS status; - - HCA_ENTER(HCA_DBG_SHIM); - - p_map->mapped = 0; - - // map UAR to kernel - p_map->kva = ioremap(addr, size); - if (!p_map->kva) { - HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW , - ("Couldn't map kernel access region, aborting.\n") ); - status = IB_INSUFFICIENT_MEMORY; - goto err_ioremap; - } - - // build MDL - p_map->mdl = IoAllocateMdl( p_map->kva, (ULONG)size, - FALSE, TRUE, NULL ); - if( !p_map->mdl ) { - status = IB_INSUFFICIENT_MEMORY; - goto err_alloc_mdl; - } - MmBuildMdlForNonPagedPool( p_map->mdl ); - - /* Map the memory into the calling process's address space. */ - __try { - p_map->uva = MmMapLockedPagesSpecifyCache( p_map->mdl, - UserMode, mem_type, NULL, FALSE, NormalPagePriority ); - } - __except(EXCEPTION_EXECUTE_HANDLER) { - status = IB_INVALID_PERMISSION; - goto err_map; - } - - p_map->mapped = 1; - status = STATUS_SUCCESS; - goto done; - -err_map: - IoFreeMdl(p_map->mdl); - -err_alloc_mdl: - iounmap(p_map->kva, PAGE_SIZE); - -err_ioremap: -done: - HCA_EXIT(HCA_DBG_SHIM); - return status; -} - -static void __unmap_memory_for_user( - IN umap_t * p_map - ) -{ - if (p_map->mapped) { - p_map->mapped = 0; - MmUnmapLockedPages( p_map->uva, p_map->mdl ); - IoFreeMdl(p_map->mdl); - iounmap(p_map->kva, PAGE_SIZE); - } -} - -ib_api_status_t ibv_um_open( - IN struct ib_device * p_ibdev, - IN OUT ci_umv_buf_t* const p_umv_buf, - OUT struct ib_ucontext ** pp_uctx ) -{ - int err; - ib_api_status_t status; - struct mlx4_ib_ucontext *p_muctx; - struct ibv_get_context_resp *p_uresp; - struct mlx4_ib_alloc_ucontext_resp ib_alloc_ucontext_resp; - struct ib_ucontext *p_uctx; - struct ib_udata udata; - - HCA_ENTER(HCA_DBG_SHIM); - - // create user context in kernel - INIT_UDATA(&udata, NULL, &ib_alloc_ucontext_resp, - 0, sizeof(struct mlx4_ib_alloc_ucontext_resp)); - - p_uctx = p_ibdev->alloc_ucontext(p_ibdev, &udata); - if (IS_ERR(p_uctx)) { - err = PTR_ERR(p_uctx); - HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM, - ("mthca_alloc_ucontext failed (%d)\n", err)); - status = errno_to_iberr(err); - goto err_alloc_ucontext; - } - p_muctx = to_mucontext(p_uctx); - p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf; - - // fill the rest of ib_ucontext fields - p_uctx->device = p_ibdev; - p_uctx->closing = 0; - - // livefish - if (hca_is_livefish(p_ibdev->x.p_fdo)) - goto done; - - // map uar to user space - status = __map_memory_for_user( - (io_addr_t)p_muctx->uar.pfn << PAGE_SHIFT, - PAGE_SIZE, MmNonCached, &p_uctx->x.uar ); - if( !NT_SUCCESS(status) ) { - goto err_map_uar; - } - p_uresp->uar_addr = (u64)(ULONG_PTR)p_uctx->x.uar.uva; - - // map BF to user space - if (ib_alloc_ucontext_resp.bf_reg_size) { - status = __map_memory_for_user( - (io_addr_t)(p_muctx->uar.pfn + - to_mdev(p_ibdev)->dev->caps.num_uars) << PAGE_SHIFT, - PAGE_SIZE, MmWriteCombined, &p_uctx->x.bf ); - if( !NT_SUCCESS(status) ) { - HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM, - ("BlueFlame available, but failed to be mapped (%#x)\n", status)); - p_uresp->bf_page = 0; - p_uresp->bf_buf_size = 0; - } - else { - p_uresp->bf_page = (u64)(ULONG_PTR)p_uctx->x.bf.uva; - p_uresp->bf_buf_size = ib_alloc_ucontext_resp.bf_reg_size / 2; - p_uresp->bf_offset = 0; - } - } - else { - p_uresp->bf_page = 0; - p_uresp->bf_buf_size = 0; - } - -done: - // fill the response - p_uresp->bf_reg_size = ib_alloc_ucontext_resp.bf_reg_size; - p_uresp->bf_regs_per_page = ib_alloc_ucontext_resp.bf_regs_per_page; - p_uresp->qp_tab_size = ib_alloc_ucontext_resp.qp_tab_size; - - *pp_uctx = p_uctx; - status = IB_SUCCESS; - goto end; - -err_map_uar: - p_ibdev->dealloc_ucontext(p_uctx); -err_alloc_ucontext: -end: - HCA_EXIT(HCA_DBG_SHIM); - return status; -} - - -void ibv_um_close( struct ib_ucontext * h_um_ca ) -{ - int err; - ib_api_status_t status; - struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca; - PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo; - - HCA_ENTER(HCA_DBG_SHIM); - - p_uctx->closing = 1; - - if (atomic_read(&p_uctx->x.usecnt)) { - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("resources are not released (cnt %d)\n", p_uctx->x.usecnt)); - status = IB_RESOURCE_BUSY; - goto err_usage; - } - - if ( !hca_is_livefish(p_fdo)) { - __unmap_memory_for_user( &p_uctx->x.bf ); - __unmap_memory_for_user( &p_uctx->x.uar ); - } - - err = p_fdo->bus_ib_ifc.p_ibdev->dealloc_ucontext(p_uctx); - if (err) { - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("mthca_dealloc_ucontext failed (%d)\n", err)); - status = errno_to_iberr(err); - goto err_dealloc_ucontext; - } - - HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM, - ("pcs %p\n", PsGetCurrentProcess()) ); - status = IB_SUCCESS; - goto end; - -err_dealloc_ucontext: -err_usage: -end: - if (status != IB_SUCCESS) - { - HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, - ("completes with ERROR status %x\n", status)); - } - HCA_EXIT(HCA_DBG_SHIM); - return; -} - diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h deleted file mode 100644 index d160159c..00000000 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_verbs.h 1889 2006-12-31 08:33:06Z sleybo $ - */ - -#pragma once - -#include "ib_verbs.h" - -struct ib_mr *ibv_reg_mr(struct ib_pd *pd, - u64 start, u64 length, - u64 virt_addr, - int mr_access_flags, - ci_umv_buf_t* const p_umv_buf ); - -struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev, - ib_comp_handler comp_handler, - void (*event_handler)(ib_event_rec_t *), - void *cq_context, int cqe, - struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf); - -struct ib_qp *ibv_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); - -struct ib_srq *ibv_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); - -ib_api_status_t ibv_um_open( - IN struct ib_device * p_ibdev, - IN OUT ci_umv_buf_t* const p_umv_buf, - OUT struct ib_ucontext ** pp_uctx ); - -void ibv_um_close( struct ib_ucontext * h_um_ca ); - - diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/inc/mlx4_debug.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/inc/mlx4_debug.h index 76e8d2ef..d9b21bcb 100644 --- a/branches/WOF2-0/trunk/hw/mlx4/kernel/inc/mlx4_debug.h +++ b/branches/WOF2-0/trunk/hw/mlx4/kernel/inc/mlx4_debug.h @@ -93,7 +93,7 @@ end: WPP_DEFINE_BIT( MLX4_DBG_INIT) \ WPP_DEFINE_BIT( MLX4_DBG_MAD) \ WPP_DEFINE_BIT( MLX4_DBG_PO) \ - WPP_DEFINE_BIT( MLX4_DBG_PD)\ + WPP_DEFINE_BIT( MLX4_DBG_PD) \ WPP_DEFINE_BIT( MLX4_DBG_CQ) \ WPP_DEFINE_BIT( MLX4_DBG_QP) \ WPP_DEFINE_BIT( MLX4_DBG_MEMORY) \ @@ -101,8 +101,8 @@ end: WPP_DEFINE_BIT( MLX4_DBG_SRQ) \ WPP_DEFINE_BIT( MLX4_DBG_MCAST) \ WPP_DEFINE_BIT( MLX4_DBG_LOW) \ - WPP_DEFINE_BIT( MLX4_DBG_SHIM)) \ - WPP_DEFINE_BIT( MLX4_DBG_DRV)) + WPP_DEFINE_BIT( MLX4_DBG_SHIM) \ + WPP_DEFINE_BIT( MLX4_DBG_DRV) ) #define WPP_GLOBALLOGGER