]> git.openfabrics.org - ~shefty/rdma-win.git/commitdiff
[MLX4] added support to WPP. [mlnx:3348-9]
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 23 Oct 2008 13:40:22 +0000 (13:40 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 23 Oct 2008 13:40:22 +0000 (13:40 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1@1688 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

26 files changed:
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/SOURCES
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/cache.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/device.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_memory.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/l2w_umem.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/core/verbs.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/drv.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/pdo.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/precomp.h
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/sources
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/drv/wmi.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/SOURCES
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/cq.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/ib/mad.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/SOURCES
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/alloc.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/cq.c
branches/WOF2-0/trunk/hw/mlx4/kernel/bus/net/main.c
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/SOURCES
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c [new file with mode: 0644]
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h [new file with mode: 0644]
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/precomp.h
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c [deleted file]
branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h [deleted file]
branches/WOF2-0/trunk/hw/mlx4/kernel/inc/mlx4_debug.h

index e0cc75cabf2aeaa387e3b723acdd1e78be5dc397..fe394f67d2c2cf66236c9cce39cae89e4588e13e 100644 (file)
@@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY
 \r
 \r
 !if $(FREEBUILD)\r
-#ENABLE_EVENT_TRACING=1\r
+ENABLE_EVENT_TRACING=1\r
 !else\r
 #ENABLE_EVENT_TRACING=1\r
 !endif\r
index c41e9ae628191d6de58f30012db4e586838991e9..b3ddc3485bf1bd445d6bd410f42c2cfd729f166f 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "ib\mlx4_ib.h"
 #include "ib_cache.h"
+#include <mlx4_debug.h>
 
 #if defined(EVENT_TRACING)
 #ifdef offsetof
index c68ae40ee1405883c7f714dd8e9f2c2b3da7f496..d20b11054f7d40587abea16b4db7594309248048 100644 (file)
  * $Id: device.c 1349 2004-12-16 21:09:43Z roland $
  */
 
+#include "l2w.h"
+#include "ib_verbs.h"
+#include "core.h"
+#include <mlx4_debug.h>
+
 #if defined(EVENT_TRACING)
 #ifdef offsetof
 #undef offsetof
@@ -40,9 +45,6 @@
 #include "device.tmh"
 #endif
 
-#include "l2w.h"
-#include "ib_verbs.h"
-#include "core.h"
 
 struct ib_client_data {
        struct list_head  list;
index d3a225e00a897ebd44d074a03f92e27bf9276d59..4b7599eb95715676410e438a49cbfd96e4c3ffbd 100644 (file)
@@ -3,6 +3,13 @@
 #include "pa_cash.h"
 #include "mlx4.h"
 
+#if defined (EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "l2w.tmh"
+#endif 
+
 /* Nth element of the table contains the index of the first set bit of N; 8 - for N=0 */
 char g_set_bit_tbl[256];
 
index bb0f88511872f238d07683cea3e40190124ca142..49b8c92d6ab3952043ce628469ec05ffe2c53ce0 100644 (file)
@@ -34,6 +34,7 @@
  * $Id: mt_memory.c 2020 2007-05-01 09:29:10Z leonid $
  */
 #include "l2w.h"
+#include <mlx4_debug.h>
 
 #if defined (EVENT_TRACING)
 #ifdef offsetof
index 5a3d30789d8d9f943409587ab53bc15f5f447747..245ca6bbdc4351ae3994b3df48fb848e4f74bc30 100644 (file)
@@ -2,6 +2,13 @@
 #include "l2w.h"
 #include "ib_verbs.h"
 
+#if defined (EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "l2w_umem.tmh"
+#endif 
+
 /**
  * ib_umem_release - release memory pinned with ib_umem_get
  * @umem: umem struct to release
index fd9daa78696e1e16d2c344a071dee45b828b9288..33e08f5136400baf266836128e836173d6745902 100644 (file)
  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  */
 
+#include "l2w.h"
+#include "ib_verbs.h"
+#include <mlx4_debug.h>
+
 #if defined(EVENT_TRACING)
 #ifdef offsetof
 #undef offsetof
@@ -45,8 +49,6 @@
 #include "device.tmh"
 #endif
 
-#include "l2w.h"
-#include "ib_verbs.h"
 
 // qp_state_table
 static struct {
index 23b9ef750f96e290e5e42557c043d157cea844b8..345205ec50df7c717f73085f6cbeca185a0485a6 100644 (file)
@@ -22,7 +22,10 @@ Environment:
 #include <initguid.h>\r
 #include <wdmguid.h>\r
 \r
-#if defined(EVENT_TRACING)\r
+#if defined (EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
 #include "drv.tmh"\r
 #endif \r
 \r
@@ -570,20 +573,20 @@ __get_resources(
                                                i, desc->ShareDisposition, desc->Flags,\r
                                                desc->u.MessageInterrupt.Translated.Level, \r
                                                desc->u.MessageInterrupt.Translated.Vector, \r
-                                               desc->u.MessageInterrupt.Translated.Affinity ));\r
+                                               (u32)desc->u.MessageInterrupt.Translated.Affinity ));\r
                                        MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,\r
-                                               ("EvtPrepareHardware: Desc %d: RawMsiInterrupt: Share %d, Flags %#x, MessageCount #hx, Vector %#x, Affinity %#x\n", \r
+                                               ("EvtPrepareHardware: Desc %d: RawMsiInterrupt: Share %d, Flags %#x, MessageCount %#hx, Vector %#x, Affinity %#x\n", \r
                                                i, desc_raw->ShareDisposition, desc_raw->Flags,\r
                                                desc_raw->u.MessageInterrupt.Raw.MessageCount, \r
                                                desc_raw->u.MessageInterrupt.Raw.Vector,\r
-                                               desc_raw->u.MessageInterrupt.Raw.Affinity ));\r
+                                               (u32)desc_raw->u.MessageInterrupt.Raw.Affinity ));\r
                                }\r
                                else { // line-based interrupt\r
                                        MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,\r
                                                ("EvtPrepareHardware: Desc %d: LineInterrupt: Share %d, Flags %#x, Level %d, Vector %#x, Affinity %#x\n", \r
                                                i, desc->ShareDisposition, desc->Flags,\r
                                                desc->u.Interrupt.Level, desc->u.Interrupt.Vector, \r
-                                               desc->u.Interrupt.Affinity ));\r
+                                               (u32)desc->u.Interrupt.Affinity ));\r
                                }\r
                                break;\r
 #endif\r
index 920cd476bb5286f395db136faa996d994addaf9b..826f834e25f5ccf9e728cd1ad40c76909fa27319 100644 (file)
@@ -2,9 +2,12 @@
 #include <initguid.h>\r
 #include <wdmguid.h>\r
 \r
-#if defined(EVENT_TRACING)\r
-#include "pdo.tmh"\r
+#if defined (EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
 #endif\r
+#include "pdo.tmh"\r
+#endif \r
 \r
 #ifdef ALLOC_PRAGMA\r
 #pragma alloc_text(PAGE, create_pdo)\r
index 9fbb31cd58f4fe655e17b5ff6716b395a37aedc9..eefec2948b67743b320ca36328c5d27f16a3912d 100644 (file)
 #include "drv.h"\r
 #include "driver.h"\r
 #include "cmd.h"\r
-\r
-#if 0\r
-#include "mxe_hca.h"\r
-#include "mtnic_if_defs.h"\r
-#include "mxe_utils.h"\r
-#include "mxe_wpptrace.h"\r
-#include "mtnic_dev.h"\r
-#include "mxe_drv.h"\r
-#endif\r
+#include <mlx4_debug.h>\r
 \r
 \r
index 54d498205e957cd05fb59f0cce6f5edece398ba1..00644b69b896746c87db8cd698f76aa0a2439d57 100644 (file)
@@ -11,7 +11,7 @@ NTTARGETFILES=$(INF_TARGET)
 !endif\r
 \r
 !if $(FREEBUILD)\r
-#ENABLE_EVENT_TRACING=1\r
+ENABLE_EVENT_TRACING=1\r
 !else\r
 #ENABLE_EVENT_TRACING=1\r
 !endif\r
@@ -54,7 +54,7 @@ C_DEFINES = $(C_DEFINES) -DEVENT_TRACING
 \r
 RUN_WPP= $(SOURCES) -km -dll -ext: .c .cpp .h .C .CPP .H\\r
 #      -preserveext:.cpp .h\\r
-       -scan:..\inc\mlx4_debug.h \\r
+       -scan:..\..\inc\mlx4_debug.h \\r
        -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
        -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \r
 !ENDIF\r
index e3faf50c838835d699db99c2205c34d6b0a7b002..25323d4683aed1fab6c06de7184bf6726121b0a6 100644 (file)
@@ -23,9 +23,12 @@ Environment:
 \r
 #include "precomp.h"\r
 \r
-#if defined(EVENT_TRACING)\r
-#include "wmi.tmh"\r
+#if defined (EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
 #endif\r
+#include "wmi.tmh"\r
+#endif \r
 \r
 #ifdef ALLOC_PRAGMA\r
 #pragma alloc_text(PAGE,WmiRegistration)\r
index 7216a5ba8bbd306e1c67c723ba40a26b7eeb8b2c..4a940f09d2c05433bd3588d381b770bbc9451500 100644 (file)
@@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY
 \r
 \r
 !if $(FREEBUILD)\r
-#ENABLE_EVENT_TRACING=1\r
+ENABLE_EVENT_TRACING=1\r
 !else\r
 #ENABLE_EVENT_TRACING=1\r
 !endif\r
@@ -37,7 +37,7 @@ TARGETLIBS= \
 C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
 \r
 RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \\r
-       -scan:..\inc\mlx4_debug.h \\r
+       -scan:..\..\inc\mlx4_debug.h \\r
        -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
        -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \r
 !ENDIF\r
index 579d4794897168c1fe8b138fe3e7fc14d4be1479..2cedea6c578e55199768553a374db39fa692d8a3 100644 (file)
 #include "qp.h"\r
 #include "user.h"\r
 \r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "cq.tmh"\r
+#endif\r
+\r
+\r
 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)\r
 {\r
        struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;\r
index 01ba48cb13013649fed5d451327c502a70d4edcc..4a055d95952866c648ed59f523fcfa2cafdb5aaa 100644 (file)
 #include <ib_smi.h>
 #include "cmd.h"
 
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mad.tmh"
+#endif
+
+
 enum {
        MLX4_IB_VENDOR_CLASS1 = 0x9,
        MLX4_IB_VENDOR_CLASS2 = 0xa
index e1c89e5493b34a10ee0ddc147785b85eea3a8fe9..37441cf0c3cd24ecb4ecc1a5aff4c6a528dae046 100644 (file)
@@ -5,7 +5,7 @@ TARGETTYPE=DRIVER_LIBRARY
 \r
 \r
 !if $(FREEBUILD)\r
-#ENABLE_EVENT_TRACING=1\r
+ENABLE_EVENT_TRACING=1\r
 !else\r
 #ENABLE_EVENT_TRACING=1\r
 !endif\r
@@ -46,7 +46,7 @@ TARGETLIBS= \
 C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
 \r
 RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \\r
-       -scan:..\mlx4_debug.h \\r
+       -scan:..\..\inc\mlx4_debug.h \\r
        -func:MLX4_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
        -func:MLX4_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \r
 !ENDIF\r
index 62a074e6b0a0eed7318f907db7e3a223844e01dc..52d999fe9a2053c7ae398c3b67d22f5aea125849 100644 (file)
  */
 
 #include "mlx4.h"
+#include <mlx4_debug.h>
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "alloc.tmh"
+#endif
+
 
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
 {
@@ -214,7 +223,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                        buf->npages *= 2;
                }
                MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_CQ,
-                       ("size %#x, nbufs %d, pages %d, page_shift %d, kva %p, da %llx, buf_size %#x\n",
+                       ("size %#x, nbufs %d, pages %d, page_shift %d, kva %p, da %I64x, buf_size %#x\n",
                        size, buf->nbufs, buf->npages, buf->page_shift, 
                        buf->u.direct.buf, t.da, t.sz ));
                memset(buf->u.direct.buf, 0, size);
index 9fed0ade483c1f5e8868ef7f8d88afe52f046660..3dab6b0d334087aefd2bac7209415071f8e12ce3 100644 (file)
 #include "cmd.h"
 #include "icm.h"
 #include "cq.h"
+#include <mlx4_debug.h>
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "cq.tmh"
+#endif
+
 
 #define MLX4_CQ_STATUS_OK              ( 0 << 28)
 #define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
index 3a1d92e550f62ef3082b075917ca713067193ccb..1273d7787b3a6cb188107ed256d017bdaca5d76d 100644 (file)
 #include "device.h"
 #include "doorbell.h"
 #include "complib\cl_thread.h"
+#include <mlx4_debug.h>
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "main.tmh"
+#endif
 
 
 static struct mlx4_profile default_profile = {
index ab9c297361ea5fef86771f4ccc69ff2c0e1e3c2d..4a2579177d509aee97994df8e563830afc2966ba 100644 (file)
@@ -17,7 +17,7 @@ ENABLE_EVENT_TRACING=1
 !endif\r
 \r
 SOURCES= \\r
-       hca.rc                  \\r
+       hca.rc                  \\r
        av.c                    \\r
        ca.c                    \\r
        cq.c                    \\r
@@ -25,12 +25,12 @@ SOURCES= \
        direct.c                \\r
        drv.c                   \\r
        fw.c                    \\r
-       mcast.c                 \\r
+       mcast.c                 \\r
        mr.c                    \\r
        pd.c                    \\r
        qp.c                    \\r
        srq.c                   \\r
-       verbs.c                 \\r
+       hverbs.c                \\r
        vp.c                    \\r
        wmi.c                   \\r
 \r
diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.c
new file mode 100644 (file)
index 0000000..4fcf5b1
--- /dev/null
@@ -0,0 +1,673 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $\r
+ */\r
+\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hverbs.tmh"\r
+#endif\r
+\r
+\r
+/* Memory regions */\r
+\r
+struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, enum ib_access_flags mr_access_flags)\r
+{\r
+       struct ib_mr *mr;\r
+\r
+       mr = pd->device->get_dma_mr(pd, mr_access_flags);\r
\r
+       if (!IS_ERR(mr)) {\r
+               mr->device  = pd->device;\r
+               mr->pd      = pd;\r
+               mr->p_uctx = pd->p_uctx;\r
+               atomic_inc(&pd->usecnt);\r
+               atomic_set(&mr->usecnt, 0);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
+       }\r
+\r
+       return mr;\r
+}\r
+\r
+struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,\r
+                                 struct ib_phys_buf *phys_buf_array,\r
+                                 int num_phys_buf,\r
+                                 enum ib_access_flags mr_access_flags,\r
+                                 u64 *iova_start)\r
+{\r
+       struct ib_mr *mr;\r
+\r
+       if ( pd->device->reg_phys_mr )\r
+               mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,\r
+                       mr_access_flags, iova_start);\r
+       else\r
+               mr = ERR_PTR(-ENOSYS);\r
+\r
+       if (!IS_ERR(mr)) {\r
+               mr->device  = pd->device;\r
+               mr->pd   = pd;\r
+               mr->p_uctx = pd->p_uctx;\r
+               atomic_inc(&pd->usecnt);\r
+               atomic_set(&mr->usecnt, 0);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
+       }\r
+\r
+       return mr;\r
+}\r
+\r
+\r
+ struct ib_mr *ibv_reg_mr(struct ib_pd *pd, \r
+       u64 start, u64 length,\r
+       u64 virt_addr,\r
+       int mr_access_flags,\r
+       ci_umv_buf_t* const p_umv_buf )\r
+{\r
+       struct ib_mr *ib_mr;\r
+       int err;\r
+       HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+       if (p_umv_buf  && p_umv_buf->command) {\r
+               err = -ENOSYS;\r
+               goto err_not_supported;\r
+       }\r
+\r
+       ib_mr = pd->device->reg_user_mr(pd, start, length, virt_addr, mr_access_flags, NULL);\r
+       if (IS_ERR(ib_mr)) {\r
+               err = PTR_ERR(ib_mr);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err));\r
+               goto err_reg_user_mr;\r
+       }\r
+\r
+       ib_mr->device  = pd->device;\r
+       ib_mr->pd      = pd;\r
+       atomic_inc(&pd->usecnt);\r
+       atomic_set(&ib_mr->usecnt, 0);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
+               ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
+       HCA_EXIT(HCA_DBG_MEMORY);\r
+       return ib_mr;\r
+\r
+err_reg_user_mr:\r
+err_not_supported:\r
+       HCA_EXIT(HCA_DBG_MEMORY);\r
+       return ERR_PTR(err);\r
+}\r
+\r
+int ib_dereg_mr(struct ib_mr *mr)\r
+{\r
+       int ret;\r
+       struct ib_pd *pd;\r
+       struct ib_device *p_ibdev;\r
+\r
+       if (atomic_read(&mr->usecnt))\r
+               return -EBUSY;\r
+\r
+       p_ibdev = mr->device;\r
+       pd = mr->pd;\r
+       ret = p_ibdev->dereg_mr(mr);\r
+       if (!ret) {\r
+               atomic_dec(&pd->usecnt);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
+       }\r
+\r
+       return ret;\r
+}\r
+\r
+static void release_user_cq_qp_resources(\r
+       struct ib_ucontext      *p_uctx)\r
+{\r
+       if (p_uctx) {\r
+               atomic_dec(&p_uctx->x.usecnt);\r
+               if (!atomic_read(&p_uctx->x.usecnt) && p_uctx->closing) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("User resources are released. Removing context\n"));\r
+                       ibv_um_close(p_uctx);\r
+               }\r
+       }\r
+}\r
+\r
+//\r
+// Completion queues\r
+//\r
+\r
+struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,\r
+                          ib_comp_handler comp_handler,\r
+                          void (*event_handler)(ib_event_rec_t *),\r
+                          void *cq_context, int cqe, \r
+                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
+{\r
+       int err;\r
+       struct ib_cq *cq;\r
+       struct ib_udata udata, *p_udata = &udata;\r
+       struct ibv_create_cq *p_req;\r
+       struct ibv_create_cq_resp *p_resp = NULL;\r
+\r
+       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) {\r
+               // prepare user parameters\r
+               p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+               p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)\r
+                       p_umv_buf->p_inout_buf;\r
+               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, \r
+                       sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp));\r
+       }\r
+       else \r
+               p_udata = NULL;\r
+\r
+       // create cq\r
+       cq = p_ibdev->create_cq(p_ibdev, cqe, 0, p_uctx, p_udata);\r
+       if (IS_ERR(cq)) {\r
+               err = PTR_ERR(cq);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err));\r
+               goto err_create_cq;\r
+       }\r
+\r
+       cq->device        = p_ibdev;\r
+       cq->p_uctx        = p_uctx;\r
+       cq->comp_handler  = comp_handler;\r
+       cq->event_handler = event_handler;\r
+       cq->cq_context    = cq_context;\r
+       atomic_set(&cq->usecnt, 0);\r
+       if (p_uctx)\r
+               atomic_inc(&p_uctx->x.usecnt);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
+               ("created CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
+\r
+       // fill results\r
+       if (p_umv_buf) {\r
+               p_resp->cq_handle = (u64)(ULONG_PTR)cq;\r
+               p_resp->cqe = cq->cqe;\r
+               p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
+       }\r
+       \r
+       return cq;\r
+\r
+err_create_cq:\r
+       if( p_umv_buf && p_umv_buf->command ) \r
+               p_umv_buf->status = IB_ERROR;\r
+       return ERR_PTR(err);\r
+}\r
+\r
+int ib_destroy_cq(struct ib_cq *cq)\r
+{\r
+       int ret;\r
+       struct ib_ucontext      *p_uctx = cq->p_uctx;\r
+       \r
+       if (atomic_read(&cq->usecnt))\r
+               return -EBUSY;\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
+               ("destroying CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
+\r
+       ret = cq->device->destroy_cq(cq);\r
+       release_user_cq_qp_resources(p_uctx);\r
+       return ret;\r
+}\r
+\r
+//\r
+// Queue pairs \r
+//\r
+\r
+static char *__print_qtype(enum ib_qp_type qtype)\r
+{\r
+       char *str = NULL;\r
+       switch (qtype) {\r
+               case IB_QPT_SMI: str = "SMI"; break;\r
+               case IB_QPT_GSI: str = "GSI"; break;\r
+               case IB_QPT_RC: str = "RC"; break;\r
+               case IB_QPT_UC: str = "UC"; break;\r
+               case IB_QPT_UD: str = "UD"; break;\r
+               case IB_QPT_RAW_IP_V6: str = "IP_V6"; break;\r
+               case IB_QPT_RAW_ETY: str = "ETY"; break;\r
+               default: str = "UKNWN"; break;\r
+       }\r
+       return str;\r
+}\r
+\r
+struct ib_qp *ibv_create_qp(struct ib_pd *pd,\r
+       struct ib_qp_init_attr *qp_init_attr,\r
+       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
+{\r
+       int err;\r
+       struct ib_qp *p_ib_qp;\r
+       struct ib_udata udata, *p_udata = &udata;\r
+       struct ibv_create_qp *p_req = NULL;\r
+       struct ibv_create_qp_resp *p_resp= NULL;\r
+\r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       if ( p_uctx && p_umv_buf && p_umv_buf->command ) {\r
+               // prepare user parameters\r
+               p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+               p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+               INIT_UDATA(&udata, &p_req->buf_addr, NULL, \r
+                       sizeof(struct mlx4_ib_create_qp), 0);\r
+       }\r
+       else \r
+               p_udata = NULL;\r
+\r
+       p_ib_qp = pd->device->create_qp( pd, qp_init_attr, p_udata );\r
+\r
+       if (IS_ERR(p_ib_qp)) {\r
+               err = PTR_ERR(p_ib_qp);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err));\r
+               goto err_create_qp;\r
+       }\r
+\r
+       // fill results\r
+       p_ib_qp->device                         = pd->device;\r
+       p_ib_qp->pd                             = pd;\r
+       p_ib_qp->send_cq                        = qp_init_attr->send_cq;\r
+       p_ib_qp->recv_cq                        = qp_init_attr->recv_cq;\r
+       p_ib_qp->srq                            = qp_init_attr->srq;\r
+       p_ib_qp->p_uctx                                 = p_uctx;\r
+       p_ib_qp->event_handler                  = qp_init_attr->event_handler;\r
+       p_ib_qp->qp_context                     = qp_init_attr->qp_context;\r
+       p_ib_qp->qp_type                                = qp_init_attr->qp_type;\r
+       atomic_inc(&pd->usecnt);\r
+       atomic_inc(&qp_init_attr->send_cq->usecnt);\r
+       atomic_inc(&qp_init_attr->recv_cq->usecnt);\r
+       if (qp_init_attr->srq)\r
+               atomic_inc(&qp_init_attr->srq->usecnt);\r
+       if (p_uctx)\r
+               atomic_inc(&p_uctx->x.usecnt);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,\r
+               ("qtype %s (%d), qnum %#x, q_num  %#x, ssz %d, rsz %d, scq %#x:%#x, rcq %#x:%#x, port_num %d \n",\r
+               __print_qtype(p_ib_qp->qp_type), p_ib_qp->qp_type,\r
+               ((struct mlx4_ib_qp*)p_ib_qp)->mqp.qpn, p_ib_qp->qp_num, \r
+               qp_init_attr->cap.max_send_wr, qp_init_attr->cap.max_recv_wr,\r
+               ((struct mlx4_ib_cq*)p_ib_qp->send_cq)->mcq.cqn, p_ib_qp->send_cq->cqe,\r
+               ((struct mlx4_ib_cq*)p_ib_qp->recv_cq)->mcq.cqn, p_ib_qp->recv_cq->cqe,\r
+               qp_init_attr->port_num\r
+               ) );\r
+\r
+       // fill results for user\r
+       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
+               struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp;\r
+               p_resp->qp_handle = (__u64)(ULONG_PTR)p_ib_qp;\r
+               p_resp->qpn = p_mib_qp->mqp.qpn;\r
+               p_resp->max_send_wr = p_mib_qp->sq.max_post;\r
+               p_resp->max_recv_wr = p_mib_qp->rq.max_post;\r
+               p_resp->max_send_sge = p_mib_qp->sq.max_gs;\r
+               p_resp->max_recv_sge = p_mib_qp->rq.max_gs;\r
+               /*\r
+                * We don't support inline sends for kernel QPs (yet), and we\r
+                * don't know what userspace's value should be.\r
+                */\r
+               p_resp->max_inline_data = 0;\r
+               p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
+       }\r
+\r
+       return p_ib_qp;\r
+\r
+err_create_qp:\r
+       if( p_umv_buf && p_umv_buf->command ) \r
+               p_umv_buf->status = IB_ERROR;\r
+       HCA_EXIT(HCA_DBG_QP);\r
+       return ERR_PTR(err);\r
+}\r
+\r
+int ib_destroy_qp(struct ib_qp *qp)\r
+{\r
+       struct ib_pd *p_ib_pd;\r
+       struct ib_cq *scq, *rcq;\r
+       struct ib_srq *srq;\r
+       struct ib_ucontext      *p_uctx;\r
+       int ret;\r
+\r
+       p_ib_pd  = qp->pd;\r
+       scq = qp->send_cq;\r
+       rcq = qp->recv_cq;\r
+       srq = qp->srq;\r
+       p_uctx = p_ib_pd->p_uctx;\r
+\r
+       ret = qp->device->destroy_qp(qp);\r
+       if (!ret) {\r
+               atomic_dec(&p_ib_pd->usecnt);\r
+               atomic_dec(&scq->usecnt);\r
+               atomic_dec(&rcq->usecnt);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
+                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
+               if (srq)\r
+                       atomic_dec(&srq->usecnt);\r
+               release_user_cq_qp_resources(p_uctx);\r
+       }\r
+\r
+       return ret;\r
+}\r
+\r
+//\r
+// Shared receive queues\r
+//\r
+\r
+\r
+/* Shared receive queues */\r
+\r
+struct ib_srq *ibv_create_srq(struct ib_pd *pd,\r
+       struct ib_srq_init_attr *srq_init_attr,\r
+       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
+{\r
+       int err;\r
+       struct ib_srq *p_ib_srq;\r
+       struct ib_udata udata, *p_udata = &udata;\r
+       struct ibv_create_srq *p_req = NULL;\r
+       struct ibv_create_srq_resp *p_resp= NULL;\r
+\r
+       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
+               // prepare user parameters\r
+               p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+               p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, \r
+                       sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp));\r
+       }\r
+       else \r
+               p_udata = NULL;\r
+\r
+       p_ib_srq = pd->device->create_srq( pd, srq_init_attr, p_udata );\r
+       if (IS_ERR(p_ib_srq)) {\r
+               err = PTR_ERR(p_ib_srq);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err));\r
+               goto err_create_srq;\r
+       }\r
+\r
+       // fill results\r
+       p_ib_srq->device                        = pd->device;\r
+       p_ib_srq->pd                            = pd;\r
+       p_ib_srq->p_uctx                                = p_uctx;\r
+       p_ib_srq->event_handler                 = srq_init_attr->event_handler;\r
+       p_ib_srq->srq_context                   = srq_init_attr->srq_context;\r
+       atomic_inc(&pd->usecnt);\r
+       atomic_set(&p_ib_srq->usecnt, 0);\r
+       if (p_uctx)\r
+               atomic_inc(&p_uctx->x.usecnt);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ ,\r
+               ("uctx %p, qhndl %p, qnum %#x \n", \r
+               pd->p_uctx, p_ib_srq, ((struct mlx4_ib_srq*)p_ib_srq)->msrq.srqn ) );\r
+\r
+       // fill results for user\r
+       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
+               struct mlx4_ib_srq* p_mib_srq = (struct mlx4_ib_srq*)p_ib_srq;\r
+               p_resp->srq_handle = (__u64)(ULONG_PTR)p_ib_srq;\r
+               p_resp->max_wr = p_mib_srq->msrq.max - 1;\r
+               p_resp->max_sge = p_mib_srq->msrq.max_gs;\r
+               p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", \r
+                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
+       }\r
+\r
+       return p_ib_srq;\r
+       \r
+err_create_srq:\r
+       if( p_umv_buf && p_umv_buf->command ) \r
+               p_umv_buf->status = IB_ERROR;\r
+       HCA_EXIT(HCA_DBG_QP);\r
+       return ERR_PTR(err);\r
+}\r
+\r
+int ib_destroy_srq(struct ib_srq *srq)\r
+{\r
+       int ret;\r
+       struct ib_pd *p_ib_pd = srq->pd;\r
+       struct ib_ucontext      *p_uctx = p_ib_pd->p_uctx;\r
+\r
+       ret = srq->device->destroy_srq(srq);\r
+       if (!ret) {\r
+               atomic_dec(&p_ib_pd->usecnt);\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
+                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
+               release_user_cq_qp_resources(p_uctx);\r
+       }\r
+\r
+       return ret;\r
+}\r
+\r
+//\r
+// User context\r
+//\r
+static NTSTATUS __map_memory_for_user(\r
+       IN              io_addr_t       addr,\r
+       IN              SIZE_T          size,\r
+       IN              MEMORY_CACHING_TYPE mem_type,\r
+       OUT             umap_t  *       p_map\r
+       )\r
+{\r
+       NTSTATUS status;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       p_map->mapped = 0;\r
+       \r
+       // map UAR to kernel \r
+       p_map->kva = ioremap(addr, size);\r
+       if (!p_map->kva) {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,\r
+                       ("Couldn't map kernel access region, aborting.\n") );\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_ioremap;\r
+       }\r
+\r
+       // build MDL \r
+       p_map->mdl = IoAllocateMdl( p_map->kva, (ULONG)size,\r
+               FALSE, TRUE, NULL );\r
+       if( !p_map->mdl ) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_mdl;\r
+       }\r
+       MmBuildMdlForNonPagedPool( p_map->mdl );\r
+\r
+       /* Map the memory into the calling process's address space. */\r
+       __try   {\r
+               p_map->uva = MmMapLockedPagesSpecifyCache( p_map->mdl,\r
+                       UserMode, mem_type, NULL, FALSE, NormalPagePriority );\r
+       }\r
+       __except(EXCEPTION_EXECUTE_HANDLER) {\r
+               status = IB_INVALID_PERMISSION;\r
+               goto err_map;\r
+       }\r
+\r
+       p_map->mapped = 1;\r
+       status = STATUS_SUCCESS;\r
+       goto done;\r
+\r
+err_map:\r
+       IoFreeMdl(p_map->mdl);\r
+\r
+err_alloc_mdl: \r
+       iounmap(p_map->kva, PAGE_SIZE);\r
+\r
+err_ioremap:\r
+done:  \r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+static void __unmap_memory_for_user(\r
+       IN              umap_t  *       p_map\r
+       )\r
+{\r
+       if (p_map->mapped) {\r
+               p_map->mapped = 0;\r
+               MmUnmapLockedPages( p_map->uva, p_map->mdl );\r
+               IoFreeMdl(p_map->mdl);\r
+               iounmap(p_map->kva, PAGE_SIZE);\r
+       }\r
+}\r
+\r
+ib_api_status_t ibv_um_open(   \r
+       IN                      struct ib_device                *       p_ibdev,\r
+       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,\r
+       OUT                     struct ib_ucontext              **      pp_uctx )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct mlx4_ib_ucontext *p_muctx;\r
+       struct ibv_get_context_resp *p_uresp;\r
+       struct mlx4_ib_alloc_ucontext_resp ib_alloc_ucontext_resp;\r
+       struct ib_ucontext              *p_uctx;\r
+       struct ib_udata udata;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // create user context in kernel\r
+       INIT_UDATA(&udata, NULL, &ib_alloc_ucontext_resp, \r
+               0, sizeof(struct mlx4_ib_alloc_ucontext_resp));\r
+\r
+       p_uctx = p_ibdev->alloc_ucontext(p_ibdev, &udata);\r
+       if (IS_ERR(p_uctx)) {\r
+               err = PTR_ERR(p_uctx);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
+                       ("mthca_alloc_ucontext failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_ucontext;\r
+       }\r
+       p_muctx = to_mucontext(p_uctx);\r
+       p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+\r
+       // fill the rest of ib_ucontext fields \r
+       p_uctx->device = p_ibdev;\r
+       p_uctx->closing = 0;\r
+\r
+       // livefish\r
+       if (hca_is_livefish(p_ibdev->x.p_fdo))\r
+               goto done;\r
+       \r
+       // map uar to user space\r
+       status = __map_memory_for_user( \r
+               (io_addr_t)p_muctx->uar.pfn << PAGE_SHIFT, \r
+               PAGE_SIZE, MmNonCached, &p_uctx->x.uar );\r
+       if( !NT_SUCCESS(status) ) {\r
+               goto err_map_uar;\r
+       }\r
+       p_uresp->uar_addr        = (u64)(ULONG_PTR)p_uctx->x.uar.uva;\r
+\r
+       // map BF to user space\r
+       if (ib_alloc_ucontext_resp.bf_reg_size) {\r
+               status = __map_memory_for_user( \r
+                       (io_addr_t)(p_muctx->uar.pfn + \r
+                       to_mdev(p_ibdev)->dev->caps.num_uars) << PAGE_SHIFT, \r
+                       PAGE_SIZE, MmWriteCombined, &p_uctx->x.bf );\r
+               if( !NT_SUCCESS(status) ) {\r
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM,\r
+                               ("BlueFlame available, but failed to be mapped (%#x)\n", status));\r
+                       p_uresp->bf_page         = 0;\r
+                       p_uresp->bf_buf_size = 0;\r
+               } \r
+               else {\r
+                       p_uresp->bf_page         = (u64)(ULONG_PTR)p_uctx->x.bf.uva;\r
+                       p_uresp->bf_buf_size = ib_alloc_ucontext_resp.bf_reg_size / 2;\r
+                       p_uresp->bf_offset       = 0;\r
+               }\r
+       }\r
+       else {\r
+                       p_uresp->bf_page         = 0;\r
+                       p_uresp->bf_buf_size = 0;\r
+       }\r
+\r
+done:\r
+       // fill the response\r
+       p_uresp->bf_reg_size             = ib_alloc_ucontext_resp.bf_reg_size;\r
+       p_uresp->bf_regs_per_page        = ib_alloc_ucontext_resp.bf_regs_per_page;\r
+       p_uresp->qp_tab_size             = ib_alloc_ucontext_resp.qp_tab_size;\r
+\r
+       *pp_uctx = p_uctx;\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+\r
+err_map_uar:\r
+       p_ibdev->dealloc_ucontext(p_uctx);\r
+err_alloc_ucontext: \r
+end:\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+\r
+void ibv_um_close(     struct ib_ucontext * h_um_ca )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca;\r
+       PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       p_uctx->closing = 1;\r
+\r
+       if (atomic_read(&p_uctx->x.usecnt)) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+                       ("resources are not released (cnt %d)\n", p_uctx->x.usecnt));\r
+               status = IB_RESOURCE_BUSY;\r
+               goto err_usage;\r
+       }\r
+       \r
+       if ( !hca_is_livefish(p_fdo)) {\r
+               __unmap_memory_for_user( &p_uctx->x.bf );\r
+               __unmap_memory_for_user( &p_uctx->x.uar );\r
+       }\r
+\r
+       err = p_fdo->bus_ib_ifc.p_ibdev->dealloc_ucontext(p_uctx);\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+                       ("mthca_dealloc_ucontext failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_dealloc_ucontext;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM,\r
+               ("pcs %p\n", PsGetCurrentProcess()) );\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+       \r
+err_dealloc_ucontext: \r
+err_usage:\r
+end:\r
+       if (status != IB_SUCCESS)\r
+       {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+                       ("completes with ERROR status %x\n", status));\r
+       }\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return;\r
+}\r
+\r
diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/hverbs.h
new file mode 100644 (file)
index 0000000..d160159
--- /dev/null
@@ -0,0 +1,72 @@
+/*\r
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.\r
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.\r
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.\r
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: ib_verbs.h 1889 2006-12-31 08:33:06Z sleybo $\r
+ */\r
+\r
+#pragma once\r
+\r
+#include "ib_verbs.h"\r
+\r
+struct ib_mr *ibv_reg_mr(struct ib_pd *pd, \r
+       u64 start, u64 length,\r
+       u64 virt_addr,\r
+       int mr_access_flags,\r
+       ci_umv_buf_t* const p_umv_buf );\r
+\r
+struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,\r
+                          ib_comp_handler comp_handler,\r
+                          void (*event_handler)(ib_event_rec_t *),\r
+                          void *cq_context, int cqe, \r
+                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf);\r
+\r
+struct ib_qp *ibv_create_qp(struct ib_pd *pd,\r
+       struct ib_qp_init_attr *qp_init_attr,\r
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);\r
+\r
+struct ib_srq *ibv_create_srq(struct ib_pd *pd,\r
+       struct ib_srq_init_attr *srq_init_attr,\r
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);\r
+\r
+ib_api_status_t ibv_um_open(   \r
+       IN                      struct ib_device                *       p_ibdev,\r
+       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,\r
+       OUT                     struct ib_ucontext              **      pp_uctx );\r
+\r
+void ibv_um_close(     struct ib_ucontext * h_um_ca );\r
+\r
+\r
index a2f8f9957c4b587b19e3166c6e51004cd3d1fa76..1d3161853afe7b4ad799161811040ab19cbbdf5b 100644 (file)
@@ -39,7 +39,7 @@
 #include "public.h"\r
 #include "debug.h"\r
 #include "l2w.h"\r
-#include "verbs.h"\r
+#include "hverbs.h"\r
 #include "mlx4_ib.h"\r
 #include "drv.h"\r
 #include "mx_abi.h"\r
diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.c
deleted file mode 100644 (file)
index 44969d9..0000000
+++ /dev/null
@@ -1,673 +0,0 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
- * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $\r
- */\r
-\r
-\r
-#include "precomp.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "verbs.tmh"\r
-#endif\r
-\r
-\r
-/* Memory regions */\r
-\r
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, enum ib_access_flags mr_access_flags)\r
-{\r
-       struct ib_mr *mr;\r
-\r
-       mr = pd->device->get_dma_mr(pd, mr_access_flags);\r
\r
-       if (!IS_ERR(mr)) {\r
-               mr->device  = pd->device;\r
-               mr->pd      = pd;\r
-               mr->p_uctx = pd->p_uctx;\r
-               atomic_inc(&pd->usecnt);\r
-               atomic_set(&mr->usecnt, 0);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return mr;\r
-}\r
-\r
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,\r
-                                 struct ib_phys_buf *phys_buf_array,\r
-                                 int num_phys_buf,\r
-                                 enum ib_access_flags mr_access_flags,\r
-                                 u64 *iova_start)\r
-{\r
-       struct ib_mr *mr;\r
-\r
-       if ( pd->device->reg_phys_mr )\r
-               mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,\r
-                       mr_access_flags, iova_start);\r
-       else\r
-               mr = ERR_PTR(-ENOSYS);\r
-\r
-       if (!IS_ERR(mr)) {\r
-               mr->device  = pd->device;\r
-               mr->pd   = pd;\r
-               mr->p_uctx = pd->p_uctx;\r
-               atomic_inc(&pd->usecnt);\r
-               atomic_set(&mr->usecnt, 0);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return mr;\r
-}\r
-\r
-\r
- struct ib_mr *ibv_reg_mr(struct ib_pd *pd, \r
-       u64 start, u64 length,\r
-       u64 virt_addr,\r
-       int mr_access_flags,\r
-       ci_umv_buf_t* const p_umv_buf )\r
-{\r
-       struct ib_mr *ib_mr;\r
-       int err;\r
-       HCA_ENTER(HCA_DBG_MEMORY);\r
-\r
-       if (p_umv_buf  && p_umv_buf->command) {\r
-               err = -ENOSYS;\r
-               goto err_not_supported;\r
-       }\r
-\r
-       ib_mr = pd->device->reg_user_mr(pd, start, length, virt_addr, mr_access_flags, NULL);\r
-       if (IS_ERR(ib_mr)) {\r
-               err = PTR_ERR(ib_mr);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err));\r
-               goto err_reg_user_mr;\r
-       }\r
-\r
-       ib_mr->device  = pd->device;\r
-       ib_mr->pd      = pd;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_set(&ib_mr->usecnt, 0);\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-               ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-       HCA_EXIT(HCA_DBG_MEMORY);\r
-       return ib_mr;\r
-\r
-err_reg_user_mr:\r
-err_not_supported:\r
-       HCA_EXIT(HCA_DBG_MEMORY);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_dereg_mr(struct ib_mr *mr)\r
-{\r
-       int ret;\r
-       struct ib_pd *pd;\r
-       struct ib_device *p_ibdev;\r
-\r
-       if (atomic_read(&mr->usecnt))\r
-               return -EBUSY;\r
-\r
-       p_ibdev = mr->device;\r
-       pd = mr->pd;\r
-       ret = p_ibdev->dereg_mr(mr);\r
-       if (!ret) {\r
-               atomic_dec(&pd->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-static void release_user_cq_qp_resources(\r
-       struct ib_ucontext      *p_uctx)\r
-{\r
-       if (p_uctx) {\r
-               atomic_dec(&p_uctx->x.usecnt);\r
-               if (!atomic_read(&p_uctx->x.usecnt) && p_uctx->closing) {\r
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("User resources are released. Removing context\n"));\r
-                       ibv_um_close(p_uctx);\r
-               }\r
-       }\r
-}\r
-\r
-//\r
-// Completion queues\r
-//\r
-\r
-struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,\r
-                          ib_comp_handler comp_handler,\r
-                          void (*event_handler)(ib_event_rec_t *),\r
-                          void *cq_context, int cqe, \r
-                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_cq *cq;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_cq *p_req;\r
-       struct ibv_create_cq_resp *p_resp = NULL;\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)\r
-                       p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, \r
-                       sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp));\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       // create cq\r
-       cq = p_ibdev->create_cq(p_ibdev, cqe, 0, p_uctx, p_udata);\r
-       if (IS_ERR(cq)) {\r
-               err = PTR_ERR(cq);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err));\r
-               goto err_create_cq;\r
-       }\r
-\r
-       cq->device        = p_ibdev;\r
-       cq->p_uctx        = p_uctx;\r
-       cq->comp_handler  = comp_handler;\r
-       cq->event_handler = event_handler;\r
-       cq->cq_context    = cq_context;\r
-       atomic_set(&cq->usecnt, 0);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
-               ("created CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
-\r
-       // fill results\r
-       if (p_umv_buf) {\r
-               p_resp->cq_handle = (u64)(ULONG_PTR)cq;\r
-               p_resp->cqe = cq->cqe;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
-       }\r
-       \r
-       return cq;\r
-\r
-err_create_cq:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_cq(struct ib_cq *cq)\r
-{\r
-       int ret;\r
-       struct ib_ucontext      *p_uctx = cq->p_uctx;\r
-       \r
-       if (atomic_read(&cq->usecnt))\r
-               return -EBUSY;\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
-               ("destroying CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
-\r
-       ret = cq->device->destroy_cq(cq);\r
-       release_user_cq_qp_resources(p_uctx);\r
-       return ret;\r
-}\r
-\r
-//\r
-// Queue pairs \r
-//\r
-\r
-static char *__print_qtype(enum ib_qp_type qtype)\r
-{\r
-       char *str = NULL;\r
-       switch (qtype) {\r
-               case IB_QPT_SMI: str = "SMI"; break;\r
-               case IB_QPT_GSI: str = "GSI"; break;\r
-               case IB_QPT_RC: str = "RC"; break;\r
-               case IB_QPT_UC: str = "UC"; break;\r
-               case IB_QPT_UD: str = "UD"; break;\r
-               case IB_QPT_RAW_IP_V6: str = "IP_V6"; break;\r
-               case IB_QPT_RAW_ETY: str = "ETY"; break;\r
-               default: str = "UKNWN"; break;\r
-       }\r
-       return str;\r
-}\r
-\r
-struct ib_qp *ibv_create_qp(struct ib_pd *pd,\r
-       struct ib_qp_init_attr *qp_init_attr,\r
-       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_qp *p_ib_qp;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_qp *p_req = NULL;\r
-       struct ibv_create_qp_resp *p_resp= NULL;\r
-\r
-       HCA_ENTER(HCA_DBG_QP);\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->command ) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, NULL, \r
-                       sizeof(struct mlx4_ib_create_qp), 0);\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       p_ib_qp = pd->device->create_qp( pd, qp_init_attr, p_udata );\r
-\r
-       if (IS_ERR(p_ib_qp)) {\r
-               err = PTR_ERR(p_ib_qp);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err));\r
-               goto err_create_qp;\r
-       }\r
-\r
-       // fill results\r
-       p_ib_qp->device                         = pd->device;\r
-       p_ib_qp->pd                             = pd;\r
-       p_ib_qp->send_cq                        = qp_init_attr->send_cq;\r
-       p_ib_qp->recv_cq                        = qp_init_attr->recv_cq;\r
-       p_ib_qp->srq                            = qp_init_attr->srq;\r
-       p_ib_qp->p_uctx                                 = p_uctx;\r
-       p_ib_qp->event_handler                  = qp_init_attr->event_handler;\r
-       p_ib_qp->qp_context                     = qp_init_attr->qp_context;\r
-       p_ib_qp->qp_type                                = qp_init_attr->qp_type;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_inc(&qp_init_attr->send_cq->usecnt);\r
-       atomic_inc(&qp_init_attr->recv_cq->usecnt);\r
-       if (qp_init_attr->srq)\r
-               atomic_inc(&qp_init_attr->srq->usecnt);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,\r
-               ("qtype %s (%d), qnum %#x, q_num  %#x, ssz %d, rsz %d, scq %#x:%#x, rcq %#x:%#x, port_num %d \n",\r
-               __print_qtype(p_ib_qp->qp_type), p_ib_qp->qp_type,\r
-               ((struct mlx4_ib_qp*)p_ib_qp)->mqp.qpn, p_ib_qp->qp_num, \r
-               qp_init_attr->cap.max_send_wr, qp_init_attr->cap.max_recv_wr,\r
-               ((struct mlx4_ib_cq*)p_ib_qp->send_cq)->mcq.cqn, p_ib_qp->send_cq->cqe,\r
-               ((struct mlx4_ib_cq*)p_ib_qp->recv_cq)->mcq.cqn, p_ib_qp->recv_cq->cqe,\r
-               qp_init_attr->port_num\r
-               ) );\r
-\r
-       // fill results for user\r
-       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp;\r
-               p_resp->qp_handle = (__u64)(ULONG_PTR)p_ib_qp;\r
-               p_resp->qpn = p_mib_qp->mqp.qpn;\r
-               p_resp->max_send_wr = p_mib_qp->sq.max_post;\r
-               p_resp->max_recv_wr = p_mib_qp->rq.max_post;\r
-               p_resp->max_send_sge = p_mib_qp->sq.max_gs;\r
-               p_resp->max_recv_sge = p_mib_qp->rq.max_gs;\r
-               /*\r
-                * We don't support inline sends for kernel QPs (yet), and we\r
-                * don't know what userspace's value should be.\r
-                */\r
-               p_resp->max_inline_data = 0;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
-       }\r
-\r
-       return p_ib_qp;\r
-\r
-err_create_qp:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       HCA_EXIT(HCA_DBG_QP);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_qp(struct ib_qp *qp)\r
-{\r
-       struct ib_pd *p_ib_pd;\r
-       struct ib_cq *scq, *rcq;\r
-       struct ib_srq *srq;\r
-       struct ib_ucontext      *p_uctx;\r
-       int ret;\r
-\r
-       p_ib_pd  = qp->pd;\r
-       scq = qp->send_cq;\r
-       rcq = qp->recv_cq;\r
-       srq = qp->srq;\r
-       p_uctx = p_ib_pd->p_uctx;\r
-\r
-       ret = qp->device->destroy_qp(qp);\r
-       if (!ret) {\r
-               atomic_dec(&p_ib_pd->usecnt);\r
-               atomic_dec(&scq->usecnt);\r
-               atomic_dec(&rcq->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
-               if (srq)\r
-                       atomic_dec(&srq->usecnt);\r
-               release_user_cq_qp_resources(p_uctx);\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-//\r
-// Shared receive queues\r
-//\r
-\r
-\r
-/* Shared receive queues */\r
-\r
-struct ib_srq *ibv_create_srq(struct ib_pd *pd,\r
-       struct ib_srq_init_attr *srq_init_attr,\r
-       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_srq *p_ib_srq;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_srq *p_req = NULL;\r
-       struct ibv_create_srq_resp *p_resp= NULL;\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, \r
-                       sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp));\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       p_ib_srq = pd->device->create_srq( pd, srq_init_attr, p_udata );\r
-       if (IS_ERR(p_ib_srq)) {\r
-               err = PTR_ERR(p_ib_srq);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err));\r
-               goto err_create_srq;\r
-       }\r
-\r
-       // fill results\r
-       p_ib_srq->device                        = pd->device;\r
-       p_ib_srq->pd                            = pd;\r
-       p_ib_srq->p_uctx                                = p_uctx;\r
-       p_ib_srq->event_handler                 = srq_init_attr->event_handler;\r
-       p_ib_srq->srq_context                   = srq_init_attr->srq_context;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_set(&p_ib_srq->usecnt, 0);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ ,\r
-               ("uctx %p, qhndl %p, qnum %#x \n", \r
-               pd->p_uctx, p_ib_srq, ((struct mlx4_ib_srq*)p_ib_srq)->msrq.srqn ) );\r
-\r
-       // fill results for user\r
-       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               struct mlx4_ib_srq* p_mib_srq = (struct mlx4_ib_srq*)p_ib_srq;\r
-               p_resp->srq_handle = (__u64)(ULONG_PTR)p_ib_srq;\r
-               p_resp->max_wr = p_mib_srq->msrq.max - 1;\r
-               p_resp->max_sge = p_mib_srq->msrq.max_gs;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return p_ib_srq;\r
-       \r
-err_create_srq:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       HCA_EXIT(HCA_DBG_QP);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_srq(struct ib_srq *srq)\r
-{\r
-       int ret;\r
-       struct ib_pd *p_ib_pd = srq->pd;\r
-       struct ib_ucontext      *p_uctx = p_ib_pd->p_uctx;\r
-\r
-       ret = srq->device->destroy_srq(srq);\r
-       if (!ret) {\r
-               atomic_dec(&p_ib_pd->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
-               release_user_cq_qp_resources(p_uctx);\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-//\r
-// User context\r
-//\r
-static NTSTATUS __map_memory_for_user(\r
-       IN              io_addr_t       addr,\r
-       IN              SIZE_T          size,\r
-       IN              MEMORY_CACHING_TYPE mem_type,\r
-       OUT             umap_t  *       p_map\r
-       )\r
-{\r
-       NTSTATUS status;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       p_map->mapped = 0;\r
-       \r
-       // map UAR to kernel \r
-       p_map->kva = ioremap(addr, size);\r
-       if (!p_map->kva) {\r
-               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,\r
-                       ("Couldn't map kernel access region, aborting.\n") );\r
-               status = IB_INSUFFICIENT_MEMORY;\r
-               goto err_ioremap;\r
-       }\r
-\r
-       // build MDL \r
-       p_map->mdl = IoAllocateMdl( p_map->kva, (ULONG)size,\r
-               FALSE, TRUE, NULL );\r
-       if( !p_map->mdl ) {\r
-               status = IB_INSUFFICIENT_MEMORY;\r
-               goto err_alloc_mdl;\r
-       }\r
-       MmBuildMdlForNonPagedPool( p_map->mdl );\r
-\r
-       /* Map the memory into the calling process's address space. */\r
-       __try   {\r
-               p_map->uva = MmMapLockedPagesSpecifyCache( p_map->mdl,\r
-                       UserMode, mem_type, NULL, FALSE, NormalPagePriority );\r
-       }\r
-       __except(EXCEPTION_EXECUTE_HANDLER) {\r
-               status = IB_INVALID_PERMISSION;\r
-               goto err_map;\r
-       }\r
-\r
-       p_map->mapped = 1;\r
-       status = STATUS_SUCCESS;\r
-       goto done;\r
-\r
-err_map:\r
-       IoFreeMdl(p_map->mdl);\r
-\r
-err_alloc_mdl: \r
-       iounmap(p_map->kva, PAGE_SIZE);\r
-\r
-err_ioremap:\r
-done:  \r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return status;\r
-}\r
-\r
-static void __unmap_memory_for_user(\r
-       IN              umap_t  *       p_map\r
-       )\r
-{\r
-       if (p_map->mapped) {\r
-               p_map->mapped = 0;\r
-               MmUnmapLockedPages( p_map->uva, p_map->mdl );\r
-               IoFreeMdl(p_map->mdl);\r
-               iounmap(p_map->kva, PAGE_SIZE);\r
-       }\r
-}\r
-\r
-ib_api_status_t ibv_um_open(   \r
-       IN                      struct ib_device                *       p_ibdev,\r
-       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,\r
-       OUT                     struct ib_ucontext              **      pp_uctx )\r
-{\r
-       int err;\r
-       ib_api_status_t         status;\r
-       struct mlx4_ib_ucontext *p_muctx;\r
-       struct ibv_get_context_resp *p_uresp;\r
-       struct mlx4_ib_alloc_ucontext_resp ib_alloc_ucontext_resp;\r
-       struct ib_ucontext              *p_uctx;\r
-       struct ib_udata udata;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       // create user context in kernel\r
-       INIT_UDATA(&udata, NULL, &ib_alloc_ucontext_resp, \r
-               0, sizeof(struct mlx4_ib_alloc_ucontext_resp));\r
-\r
-       p_uctx = p_ibdev->alloc_ucontext(p_ibdev, &udata);\r
-       if (IS_ERR(p_uctx)) {\r
-               err = PTR_ERR(p_uctx);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
-                       ("mthca_alloc_ucontext failed (%d)\n", err));\r
-               status = errno_to_iberr(err);\r
-               goto err_alloc_ucontext;\r
-       }\r
-       p_muctx = to_mucontext(p_uctx);\r
-       p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-\r
-       // fill the rest of ib_ucontext fields \r
-       p_uctx->device = p_ibdev;\r
-       p_uctx->closing = 0;\r
-\r
-       // livefish\r
-       if (hca_is_livefish(p_ibdev->x.p_fdo))\r
-               goto done;\r
-       \r
-       // map uar to user space\r
-       status = __map_memory_for_user( \r
-               (io_addr_t)p_muctx->uar.pfn << PAGE_SHIFT, \r
-               PAGE_SIZE, MmNonCached, &p_uctx->x.uar );\r
-       if( !NT_SUCCESS(status) ) {\r
-               goto err_map_uar;\r
-       }\r
-       p_uresp->uar_addr        = (u64)(ULONG_PTR)p_uctx->x.uar.uva;\r
-\r
-       // map BF to user space\r
-       if (ib_alloc_ucontext_resp.bf_reg_size) {\r
-               status = __map_memory_for_user( \r
-                       (io_addr_t)(p_muctx->uar.pfn + \r
-                       to_mdev(p_ibdev)->dev->caps.num_uars) << PAGE_SHIFT, \r
-                       PAGE_SIZE, MmWriteCombined, &p_uctx->x.bf );\r
-               if( !NT_SUCCESS(status) ) {\r
-                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM,\r
-                               ("BlueFlame available, but failed to be mapped (%#x)\n", status));\r
-                       p_uresp->bf_page         = 0;\r
-                       p_uresp->bf_buf_size = 0;\r
-               } \r
-               else {\r
-                       p_uresp->bf_page         = (u64)(ULONG_PTR)p_uctx->x.bf.uva;\r
-                       p_uresp->bf_buf_size = ib_alloc_ucontext_resp.bf_reg_size / 2;\r
-                       p_uresp->bf_offset       = 0;\r
-               }\r
-       }\r
-       else {\r
-                       p_uresp->bf_page         = 0;\r
-                       p_uresp->bf_buf_size = 0;\r
-       }\r
-\r
-done:\r
-       // fill the response\r
-       p_uresp->bf_reg_size             = ib_alloc_ucontext_resp.bf_reg_size;\r
-       p_uresp->bf_regs_per_page        = ib_alloc_ucontext_resp.bf_regs_per_page;\r
-       p_uresp->qp_tab_size             = ib_alloc_ucontext_resp.qp_tab_size;\r
-\r
-       *pp_uctx = p_uctx;\r
-       status = IB_SUCCESS;\r
-       goto end;\r
-\r
-err_map_uar:\r
-       p_ibdev->dealloc_ucontext(p_uctx);\r
-err_alloc_ucontext: \r
-end:\r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return status;\r
-}\r
-\r
-\r
-void ibv_um_close(     struct ib_ucontext * h_um_ca )\r
-{\r
-       int err;\r
-       ib_api_status_t         status;\r
-       struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca;\r
-       PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       p_uctx->closing = 1;\r
-\r
-       if (atomic_read(&p_uctx->x.usecnt)) {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("resources are not released (cnt %d)\n", p_uctx->x.usecnt));\r
-               status = IB_RESOURCE_BUSY;\r
-               goto err_usage;\r
-       }\r
-       \r
-       if ( !hca_is_livefish(p_fdo)) {\r
-               __unmap_memory_for_user( &p_uctx->x.bf );\r
-               __unmap_memory_for_user( &p_uctx->x.uar );\r
-       }\r
-\r
-       err = p_fdo->bus_ib_ifc.p_ibdev->dealloc_ucontext(p_uctx);\r
-       if (err) {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("mthca_dealloc_ucontext failed (%d)\n", err));\r
-               status = errno_to_iberr(err);\r
-               goto err_dealloc_ucontext;\r
-       }\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM,\r
-               ("pcs %p\n", PsGetCurrentProcess()) );\r
-       status = IB_SUCCESS;\r
-       goto end;\r
-       \r
-err_dealloc_ucontext: \r
-err_usage:\r
-end:\r
-       if (status != IB_SUCCESS)\r
-       {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("completes with ERROR status %x\n", status));\r
-       }\r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return;\r
-}\r
-\r
diff --git a/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h b/branches/WOF2-0/trunk/hw/mlx4/kernel/hca/verbs.h
deleted file mode 100644 (file)
index d160159..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*\r
- * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.\r
- * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.\r
- * Copyright (c) 2004 Intel Corporation.  All rights reserved.\r
- * Copyright (c) 2004 Topspin Corporation.  All rights reserved.\r
- * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.\r
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.\r
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.\r
- *\r
- * This software is available to you under a choice of one of two\r
- * licenses.  You may choose to be licensed under the terms of the GNU\r
- * General Public License (GPL) Version 2, available from the file\r
- * COPYING in the main directory of this source tree, or the\r
- * OpenIB.org BSD license below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: ib_verbs.h 1889 2006-12-31 08:33:06Z sleybo $\r
- */\r
-\r
-#pragma once\r
-\r
-#include "ib_verbs.h"\r
-\r
-struct ib_mr *ibv_reg_mr(struct ib_pd *pd, \r
-       u64 start, u64 length,\r
-       u64 virt_addr,\r
-       int mr_access_flags,\r
-       ci_umv_buf_t* const p_umv_buf );\r
-\r
-struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,\r
-                          ib_comp_handler comp_handler,\r
-                          void (*event_handler)(ib_event_rec_t *),\r
-                          void *cq_context, int cqe, \r
-                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf);\r
-\r
-struct ib_qp *ibv_create_qp(struct ib_pd *pd,\r
-       struct ib_qp_init_attr *qp_init_attr,\r
-       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);\r
-\r
-struct ib_srq *ibv_create_srq(struct ib_pd *pd,\r
-       struct ib_srq_init_attr *srq_init_attr,\r
-       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);\r
-\r
-ib_api_status_t ibv_um_open(   \r
-       IN                      struct ib_device                *       p_ibdev,\r
-       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,\r
-       OUT                     struct ib_ucontext              **      pp_uctx );\r
-\r
-void ibv_um_close(     struct ib_ucontext * h_um_ca );\r
-\r
-\r
index 76e8d2efe2eb4202a03a82c51788906ae6446097..d9b21bcb9f9f0d23d8594432a314dc50817d5ff6 100644 (file)
@@ -93,7 +93,7 @@ end:
        WPP_DEFINE_BIT( MLX4_DBG_INIT) \\r
        WPP_DEFINE_BIT( MLX4_DBG_MAD) \\r
        WPP_DEFINE_BIT( MLX4_DBG_PO) \\r
-       WPP_DEFINE_BIT( MLX4_DBG_PD)\\r
+       WPP_DEFINE_BIT( MLX4_DBG_PD) \\r
        WPP_DEFINE_BIT( MLX4_DBG_CQ) \\r
        WPP_DEFINE_BIT( MLX4_DBG_QP) \\r
        WPP_DEFINE_BIT( MLX4_DBG_MEMORY) \\r
@@ -101,8 +101,8 @@ end:
        WPP_DEFINE_BIT( MLX4_DBG_SRQ) \\r
        WPP_DEFINE_BIT( MLX4_DBG_MCAST) \\r
        WPP_DEFINE_BIT( MLX4_DBG_LOW) \\r
-       WPP_DEFINE_BIT( MLX4_DBG_SHIM)) \\r
-       WPP_DEFINE_BIT( MLX4_DBG_DRV))\r
+       WPP_DEFINE_BIT( MLX4_DBG_SHIM) \\r
+       WPP_DEFINE_BIT( MLX4_DBG_DRV) )\r
 \r
 \r
 #define WPP_GLOBALLOGGER\r