]> git.openfabrics.org - ~shefty/rdma-win.git/commitdiff
[MTHCA] added alpha version of new low-level driver, supporting memfree HCA devices
authortzachid <tzachid@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Tue, 21 Mar 2006 18:12:39 +0000 (18:12 +0000)
committertzachid <tzachid@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Tue, 21 Mar 2006 18:12:39 +0000 (18:12 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1@248 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

113 files changed:
trunk/hw/mthca/dirs [new file with mode: 0644]
trunk/hw/mthca/hca_utils.c [new file with mode: 0644]
trunk/hw/mthca/hca_utils.h [new file with mode: 0644]
trunk/hw/mthca/kernel/Makefile [new file with mode: 0644]
trunk/hw/mthca/kernel/SOURCES [new file with mode: 0644]
trunk/hw/mthca/kernel/hca.rc [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_data.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_data.h [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_debug.h [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_direct.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_driver.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_driver.h [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_mcast.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_memory.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_pci.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_pci.h [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_pnp.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_pnp.h [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_smp.c [new file with mode: 0644]
trunk/hw/mthca/kernel/hca_verbs.c [new file with mode: 0644]
trunk/hw/mthca/kernel/ib_cache.h [new file with mode: 0644]
trunk/hw/mthca/kernel/ib_mad.h [new file with mode: 0644]
trunk/hw/mthca/kernel/ib_pack.h [new file with mode: 0644]
trunk/hw/mthca/kernel/ib_smi.h [new file with mode: 0644]
trunk/hw/mthca/kernel/ib_verbs.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_atomic.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_bitmap.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_cache.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_device.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_l2w.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_l2w.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_list.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_memory.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_memory.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_packer.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_pci.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_pcipool.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_reset_tavor.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_spinlock.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_sync.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_time.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_types.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_ud_header.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_uverbs.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_uverbs.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_uverbsmem.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mt_verbs.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca.inf [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_allocator.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_av.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_catas.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_cmd.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_cmd.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_config_reg.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_cq.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_dev.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_doorbell.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_eq.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_log.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_log.mc [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_log.rc [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_mad.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_main.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_mcg.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_memfree.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_memfree.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_mr.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_pd.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_profile.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_profile.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_provider.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_provider.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_qp.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_srq.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_uar.c [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_user.h [new file with mode: 0644]
trunk/hw/mthca/kernel/mthca_wqe.h [new file with mode: 0644]
trunk/hw/mthca/mx_abi.h [new file with mode: 0644]
trunk/hw/mthca/user/Makefile [new file with mode: 0644]
trunk/hw/mthca/user/SOURCES [new file with mode: 0644]
trunk/hw/mthca/user/arch.h [new file with mode: 0644]
trunk/hw/mthca/user/driver.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_av.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_ca.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_cq.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_data.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_main.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_main.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_mcast.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_mrw.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_osbypass.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_pd.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_ual_qp.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp.def [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp.rc [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_abi.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_ah.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_cq.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_debug.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_debug.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_doorbell.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_kern_abi.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_memfree.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_qp.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_srq.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_verbs.c [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_verbs.h [new file with mode: 0644]
trunk/hw/mthca/user/mlnx_uvp_wqe.h [new file with mode: 0644]
trunk/hw/mthca/user/mt_l2w.h [new file with mode: 0644]
trunk/hw/mthca/user/opcode.h [new file with mode: 0644]

diff --git a/trunk/hw/mthca/dirs b/trunk/hw/mthca/dirs
new file mode 100644 (file)
index 0000000..aa69813
--- /dev/null
@@ -0,0 +1,3 @@
+DIRS=\\r
+       kernel  \\r
+       user\r
diff --git a/trunk/hw/mthca/hca_utils.c b/trunk/hw/mthca/hca_utils.c
new file mode 100644 (file)
index 0000000..3ea83ce
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $
+ */
+
+
+#include "mthca_dev.h"
+
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "hca_data.tmh"
+#endif
+
+
+mthca_qp_access_t
+map_qp_ibal_acl(
+       IN                              ib_access_t                                     ibal_acl)
+{
+#define IBAL_ACL(ifl,mfl) if (ibal_acl & ifl)   mthca_acl |= mfl
+       mthca_qp_access_t               mthca_acl = 0;
+
+       IBAL_ACL(IB_AC_RDMA_READ,MTHCA_ACCESS_REMOTE_READ);
+       IBAL_ACL(IB_AC_RDMA_WRITE,MTHCA_ACCESS_REMOTE_WRITE);
+       IBAL_ACL(IB_AC_ATOMIC,MTHCA_ACCESS_REMOTE_ATOMIC);
+       IBAL_ACL(IB_AC_LOCAL_WRITE,MTHCA_ACCESS_LOCAL_WRITE);
+       IBAL_ACL(IB_AC_MW_BIND,MTHCA_ACCESS_MW_BIND);
+
+       return mthca_acl;
+}
+
+/////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////
+ib_access_t
+map_qp_mthca_acl(
+       IN                              mthca_qp_access_t                               mthca_acl)
+{
+#define ACL_IBAL(mfl,ifl) if (mthca_acl & mfl)   ibal_acl |= ifl
+       ib_access_t ibal_acl = 0;
+
+       ACL_IBAL(MTHCA_ACCESS_REMOTE_READ,IB_AC_RDMA_READ);
+       ACL_IBAL(MTHCA_ACCESS_REMOTE_WRITE,IB_AC_RDMA_WRITE);
+       ACL_IBAL(MTHCA_ACCESS_REMOTE_ATOMIC,IB_AC_ATOMIC);
+       ACL_IBAL(MTHCA_ACCESS_LOCAL_WRITE,IB_AC_LOCAL_WRITE);
+       ACL_IBAL(MTHCA_ACCESS_MW_BIND,IB_AC_MW_BIND);
+
+       return ibal_acl;
+}
+
+
diff --git a/trunk/hw/mthca/hca_utils.h b/trunk/hw/mthca/hca_utils.h
new file mode 100644 (file)
index 0000000..ba259a8
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: hca_data.h 148 2005-07-12 07:48:46Z sleybo $
+ */
+
+#ifndef __HCA_UTILS_H__
+#define __HCA_UTILS_H__
+
+#include <iba\ib_types.h>
+#include <ib_verbs.h>
+
+mthca_qp_access_t
+map_qp_ibal_acl(
+       IN                              ib_access_t                                     ibal_acl)
+;
+
+ib_access_t
+map_qp_mthca_acl(
+       IN                              mthca_qp_access_t                               mthca_acl);
+
+#endif
+
diff --git a/trunk/hw/mthca/kernel/Makefile b/trunk/hw/mthca/kernel/Makefile
new file mode 100644 (file)
index 0000000..1c8f294
--- /dev/null
@@ -0,0 +1,6 @@
+# DO NOT EDIT THIS FILE!!!  Edit .\sources. if you want to add a new source\r
+# file to this component.  This file merely indirects to the real make file\r
+# that is shared by all the driver components of the Windows NT DDK\r
+#\r
+\r
+!INCLUDE ..\..\..\inc\openib.def#\r
diff --git a/trunk/hw/mthca/kernel/SOURCES b/trunk/hw/mthca/kernel/SOURCES
new file mode 100644 (file)
index 0000000..9bc19b1
--- /dev/null
@@ -0,0 +1,83 @@
+TRUNK=..\..\..\r
+\r
+TARGETNAME=mthca\r
+TARGETPATH=$(TRUNK)\bin\kernel\obj$(BUILD_ALT_DIR)\r
+TARGETTYPE=DRIVER\r
+\r
+#ENABLE_EVENT_TRACING=1\r
+\r
+SOURCES= \\r
+       mthca_log.mc            \\r
+       mthca_log.rc            \\r
+       hca.rc                  \\r
+       ..\hca_utils.c          \\r
+       hca_data.c              \\r
+       hca_mcast.c             \\r
+       hca_verbs.c             \\r
+       hca_pnp.c               \\r
+       hca_pci.c                       \\r
+       hca_driver.c            \\r
+       hca_direct.c            \\r
+       hca_memory.c    \\r
+       hca_smp.c               \\r
+                                       \\r
+       mt_l2w.c                        \\r
+       mt_memory.c             \\r
+       mt_cache.c              \\r
+       mt_packer.c             \\r
+       mt_ud_header.c  \\r
+       mt_device.c             \\r
+       mt_verbs.c              \\r
+       mt_reset_tavor.c        \\r
+       mt_uverbs.c             \\r
+       mt_uverbsmem.c  \\r
+                                       \\r
+       mthca_allocator.c       \\r
+       mthca_av.c              \\r
+       mthca_cmd.c             \\r
+       mthca_cq.c              \\r
+       mthca_eq.c              \\r
+       mthca_main.c            \\r
+       mthca_memfree.c \\r
+       mthca_mr.c              \\r
+       mthca_mcg.c             \\r
+       mthca_mad.c             \\r
+       mthca_pd.c              \\r
+       mthca_profile.c \\r
+       mthca_provider.c        \\r
+       mthca_qp.c              \\r
+       mthca_srq.c             \\r
+       mthca_uar.c             \\r
+       mthca_log.c             \\r
+       mthca_catas.c           \r
+\r
+\r
+INCLUDES=\\r
+       ..; \\r
+       $(TRUNK)\inc;   \\r
+       $(TRUNK)\inc\kernel; \\r
+       $(TRUNK)\inc\complib; \\r
+       $(TRUNK)\inc\kernel\complib; \\r
+       \r
+C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN\r
+\r
+TARGETLIBS= \\r
+       $(TARGETPATH)\*\complib.lib     \\r
+       $(TARGETPATH)\*\ibal.lib        \\r
+       $(DDK_LIB_PATH)\wdmguid.lib\r
+\r
+\r
+\r
+!IFDEF ENABLE_EVENT_TRACING\r
+\r
+C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
+\r
+RUN_WPP= -ext:.c.h $(SOURCES) -km \\r
+       -scan:hca_debug.h \\r
+       -func:HCA_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
+       -func:HCA_PRINT_EV(LEVEL,FLAGS,(MSG,...)) \\r
+       -func:HCA_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \r
+!ENDIF\r
+\r
+\r
+MSC_WARNING_LEVEL= /W4\r
diff --git a/trunk/hw/mthca/kernel/hca.rc b/trunk/hw/mthca/kernel/hca.rc
new file mode 100644 (file)
index 0000000..345f439
--- /dev/null
@@ -0,0 +1,44 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <oib_ver.h>\r
+\r
+#define VER_FILETYPE                           VFT_DRV\r
+#define VER_FILESUBTYPE                                VFT2_UNKNOWN\r
+#ifdef DBG\r
+#define VER_FILEDESCRIPTION_STR     "HCA Driver (checked)"\r
+#else\r
+#define VER_FILEDESCRIPTION_STR     "HCA Driver"\r
+#endif\r
+#define VER_INTERNALNAME_STR        "mthca.sys"\r
+#define VER_ORIGINALFILENAME_STR    "mthca.sys"\r
+#include <common.ver>\r
diff --git a/trunk/hw/mthca/kernel/hca_data.c b/trunk/hw/mthca/kernel/hca_data.c
new file mode 100644 (file)
index 0000000..29d83fb
--- /dev/null
@@ -0,0 +1,868 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_utils.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_data.tmh"\r
+#endif\r
+\r
+#include "mthca_dev.h"\r
+#include <ib_cache.h>\r
+\r
+static cl_spinlock_t   hob_lock;\r
+\r
+\r
+\r
+uint32_t               g_mlnx_dpc2thread = 0;\r
+\r
+\r
+cl_qlist_t             mlnx_hca_list;\r
+\r
+mlnx_hob_t             mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
+mlnx_hobul_t   *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
+\r
+/////////////////////////////////////////////////////////\r
+// ### HCA\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_hca_insert(\r
+       IN                              mlnx_hca_t                                      *p_hca )\r
+{\r
+       cl_spinlock_acquire( &hob_lock );\r
+       cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
+       cl_spinlock_release( &hob_lock );\r
+}\r
+\r
+void\r
+mlnx_hca_remove(\r
+       IN                              mlnx_hca_t                                      *p_hca )\r
+{\r
+       cl_spinlock_acquire( &hob_lock );\r
+       cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
+       cl_spinlock_release( &hob_lock );\r
+}\r
+\r
+mlnx_hca_t*\r
+mlnx_hca_from_guid(\r
+       IN                              ib_net64_t                                      guid )\r
+{\r
+       cl_list_item_t  *p_item;\r
+       mlnx_hca_t              *p_hca = NULL;\r
+\r
+       cl_spinlock_acquire( &hob_lock );\r
+       p_item = cl_qlist_head( &mlnx_hca_list );\r
+       while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
+       {\r
+               p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
+               if( p_hca->guid == guid )\r
+                       break;\r
+               p_item = cl_qlist_next( p_item );\r
+               p_hca = NULL;\r
+       }\r
+       cl_spinlock_release( &hob_lock );\r
+       return p_hca;\r
+}\r
+\r
+/*\r
+void\r
+mlnx_names_from_guid(\r
+       IN                              ib_net64_t                                      guid,\r
+               OUT                     char                                            **hca_name_p,\r
+               OUT                     char                                            **dev_name_p)\r
+{\r
+       unsigned int idx;\r
+\r
+       if (!hca_name_p) return;\r
+       if (!dev_name_p) return;\r
+\r
+       for (idx = 0; idx < mlnx_num_hca; idx++)\r
+       {\r
+               if (mlnx_hca_array[idx].ifx.guid == guid)\r
+               {\r
+                       *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
+                       *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
+               }\r
+       }\r
+}\r
+*/\r
+\r
+/////////////////////////////////////////////////////////\r
+// ### HCA\r
+/////////////////////////////////////////////////////////\r
+cl_status_t\r
+mlnx_hcas_init( void )\r
+{\r
+       cl_qlist_init( &mlnx_hca_list );\r
+       return cl_spinlock_init( &hob_lock );\r
+}\r
+\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+ib_api_status_t\r
+mlnx_hobs_set_cb(\r
+       IN                              mlnx_hob_t                                      *hob_p, \r
+       IN                              ci_completion_cb_t                      comp_cb_p,\r
+       IN                              ci_async_event_cb_t                     async_cb_p,\r
+       IN              const   void* const                                     ib_context)\r
+{\r
+       cl_status_t             cl_status;\r
+\r
+       // Setup the callbacks\r
+       if (!hob_p->async_proc_mgr_p)\r
+       {\r
+               hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
+               if( !hob_p->async_proc_mgr_p )\r
+               {\r
+                       return IB_INSUFFICIENT_MEMORY;\r
+               }\r
+               cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
+               cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
+               if( cl_status != CL_SUCCESS )\r
+               {\r
+                       cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
+                       cl_free(hob_p->async_proc_mgr_p);\r
+                       hob_p->async_proc_mgr_p = NULL;\r
+                       return IB_INSUFFICIENT_RESOURCES;\r
+               }\r
+       }\r
+\r
+       hob_p->comp_cb_p        = comp_cb_p;\r
+       hob_p->async_cb_p = async_cb_p;\r
+       hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context));\r
+       return IB_SUCCESS;\r
+}\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_hobs_remove(\r
+       IN                              mlnx_hob_t                                      *hob_p)\r
+{\r
+       cl_async_proc_t *p_async_proc;\r
+       mlnx_cache_t    *p_cache;\r
+\r
+       cl_spinlock_acquire( &hob_lock );\r
+\r
+       hob_p->mark = E_MARK_INVALID;\r
+\r
+       p_async_proc = hob_p->async_proc_mgr_p;\r
+       hob_p->async_proc_mgr_p = NULL;\r
+\r
+       p_cache = hob_p->cache;\r
+       hob_p->cache = NULL;\r
+\r
+       hob_p->comp_cb_p = NULL;\r
+       hob_p->async_cb_p = NULL;\r
+       hob_p->ca_context = NULL;\r
+       hob_p->cl_device_h = NULL;\r
+\r
+       cl_spinlock_release( &hob_lock );\r
+\r
+       if( p_async_proc )\r
+       {\r
+               cl_async_proc_destroy( p_async_proc );\r
+               cl_free( p_async_proc );\r
+       }\r
+\r
+       if( p_cache )\r
+               cl_free( p_cache );\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array)));\r
+}\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+void\r
+mthca_port_cap_to_ibal(\r
+       IN                              u32                     mthca_port_cap,\r
+               OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
+{\r
+       if (mthca_port_cap & IB_PORT_CM_SUP)\r
+               ibal_port_cap_p->cm = TRUE;\r
+       if (mthca_port_cap & IB_PORT_SNMP_TUNNEL_SUP)\r
+               ibal_port_cap_p->snmp = TRUE;\r
+       if (mthca_port_cap & IB_PORT_DEVICE_MGMT_SUP)\r
+               ibal_port_cap_p->dev_mgmt = TRUE;\r
+       if (mthca_port_cap & IB_PORT_VENDOR_CLASS_SUP)\r
+               ibal_port_cap_p->vend = TRUE;\r
+       if (mthca_port_cap & IB_PORT_SM_DISABLED)\r
+               ibal_port_cap_p->sm_disable = TRUE;\r
+       if (mthca_port_cap & IB_PORT_SM)\r
+               ibal_port_cap_p->sm = TRUE;\r
+}\r
+\r
+\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_conv_hca_cap(\r
+       IN                              struct ib_device *ib_dev,\r
+       IN                              struct ib_device_attr *hca_info_p,\r
+       IN                              struct ib_port_attr  *hca_ports,\r
+       OUT                     ib_ca_attr_t                            *ca_attr_p)\r
+{\r
+       uint8_t                 port_num;\r
+       ib_port_attr_t  *ibal_port_p;\r
+       struct ib_port_attr  *mthca_port_p;\r
+\r
+       ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
+       ca_attr_p->dev_id   = (uint16_t)hca_info_p->vendor_part_id;\r
+       ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
+       ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
+       ca_attr_p->ca_guid   = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
+       ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
+       ca_attr_p->max_qps   = hca_info_p->max_qp;\r
+       ca_attr_p->max_wrs   = hca_info_p->max_qp_wr;\r
+       ca_attr_p->max_sges   = hca_info_p->max_sge;\r
+       ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
+       ca_attr_p->max_cqs    = hca_info_p->max_cq;\r
+       ca_attr_p->max_cqes  = hca_info_p->max_cqe;\r
+       ca_attr_p->max_pds    = hca_info_p->max_pd;\r
+       ca_attr_p->init_regions = hca_info_p->max_mr;\r
+       ca_attr_p->init_windows = hca_info_p->max_mw;\r
+       ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
+       ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
+       ca_attr_p->atomicity     = hca_info_p->atomic_cap;\r
+       ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
+       ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
+       ca_attr_p->max_resp_res    = (uint8_t)hca_info_p->max_res_rd_atom;\r
+       ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
+       ca_attr_p->max_ipv6_qps    = hca_info_p->max_raw_ipv6_qp;\r
+       ca_attr_p->max_ether_qps   = hca_info_p->max_raw_ethy_qp;\r
+       ca_attr_p->max_mcast_grps  = hca_info_p->max_mcast_grp;\r
+       ca_attr_p->max_mcast_qps   = hca_info_p->max_total_mcast_qp_attach;\r
+       ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
+       ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
+       ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
+       ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
+       ca_attr_p->raw_mcast_support    = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
+       ca_attr_p->apm_support          = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
+       ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
+       ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
+       ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
+       ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
+\r
+       ca_attr_p->num_page_sizes = 1;\r
+       ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
+\r
+       for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num)\r
+       {\r
+               // Setup port pointers\r
+               ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
+               mthca_port_p = &hca_ports[port_num];\r
+\r
+               // Port Cabapilities\r
+               cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
+               mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
+\r
+               // Port Atributes\r
+               ibal_port_p->port_num   = port_num + start_port(ib_dev);\r
+               ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
+               ibal_port_p->lid        = cl_ntoh16(mthca_port_p->lid);\r
+               ibal_port_p->lmc        = mthca_port_p->lmc;\r
+               ibal_port_p->max_vls    = mthca_port_p->max_vl_num;\r
+               ibal_port_p->sm_lid     = cl_ntoh16(mthca_port_p->sm_lid);\r
+               ibal_port_p->sm_sl      = mthca_port_p->sm_sl;\r
+               ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
+               ibal_port_p->num_gids   = (uint16_t)mthca_port_p->gid_tbl_len;\r
+               ibal_port_p->num_pkeys  = mthca_port_p->pkey_tbl_len;\r
+               ibal_port_p->pkey_ctr   = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
+               ibal_port_p->qkey_ctr   = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
+               ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
+               ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
+\r
+               ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
+               // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
+#if 0\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
+                       ibal_port_p->port_num, ibal_port_p->port_guid));\r
+#endif\r
+       }\r
+}\r
+\r
+void cq_comp_handler(struct ib_cq *cq, void *context)\r
+{\r
+       mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+       struct mthca_cq *mcq =(struct mthca_cq *)cq; \r
+       HCA_ENTER(HCA_DBG_CQ);\r
+       if (hob_p) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
+               (hob_p->comp_cb_p)(mcq->cq_context);\r
+       }\r
+       else {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
+       }\r
+       HCA_EXIT(HCA_DBG_CQ);\r
+}\r
+\r
+void ca_event_handler(struct ib_event *ev, void *context)\r
+{\r
+       mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+       ib_event_rec_t event_rec;\r
+\r
+       // prepare parameters\r
+       event_rec.context = (void *)hob_p->ca_context;\r
+       event_rec.trap.info.port_num = ev->element.port_num;\r
+       event_rec.type = ev->event;\r
+       if (event_rec.type > IB_AE_UNKNOWN) {\r
+               // CL_ASSERT(0); // This shouldn't happen\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
+                       event_rec.type, IB_AE_LOCAL_FATAL));\r
+               event_rec.type = IB_AE_LOCAL_FATAL;\r
+       }\r
+\r
+       // call the user callback\r
+       if (hob_p)\r
+               (hob_p->async_cb_p)(&event_rec);\r
+       else {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+       }\r
+}\r
+\r
+void qp_event_handler(struct ib_event *ev, void *context)\r
+{\r
+       mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+       ib_event_rec_t event_rec;\r
+       struct mthca_qp *qp_p;\r
+\r
+       // prepare parameters\r
+       event_rec.type = ev->event;\r
+       qp_p = (struct mthca_qp *)ev->element.qp;\r
+       event_rec.context = qp_p->qp_context;\r
+\r
+       // call the user callback\r
+       if (hob_p)\r
+               (hob_p->async_cb_p)(&event_rec);\r
+       else {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+       }\r
+}\r
+\r
+void cq_event_handler(struct ib_event *ev, void *context)\r
+{\r
+       mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+       ib_event_rec_t event_rec;\r
+       struct mthca_cq *cq_p;\r
+\r
+       // prepare parameters\r
+       event_rec.type = ev->event;\r
+       cq_p = (struct mthca_cq *)ev->element.cq;\r
+       event_rec.context = cq_p->cq_context;\r
+\r
+       // call the user callback\r
+       if (hob_p)\r
+               (hob_p->async_cb_p)(&event_rec);\r
+       else {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+       }\r
+}\r
+\r
+ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)\r
+{\r
+#define MAP_QPS(val1,val2) case val1: ib_qps = val2; break\r
+       ib_qp_state_t ib_qps;\r
+       switch (qps) {\r
+               MAP_QPS( IBQPS_RESET, IB_QPS_RESET );\r
+               MAP_QPS( IBQPS_INIT, IB_QPS_INIT );\r
+               MAP_QPS( IBQPS_RTR, IB_QPS_RTR );\r
+               MAP_QPS( IBQPS_RTS, IB_QPS_RTS );\r
+               MAP_QPS( IBQPS_SQD, IB_QPS_SQD );\r
+               MAP_QPS( IBQPS_SQE, IB_QPS_SQERR );\r
+               MAP_QPS( IBQPS_ERR, IB_QPS_ERROR );\r
+               default:\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps));\r
+                       ib_qps = 0xffffffff;\r
+       }\r
+       return ib_qps;\r
+}\r
+\r
+enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps)\r
+{\r
+#define MAP_IBQPS(val1,val2) case val1: qps = val2; break\r
+       enum ib_qp_state qps;\r
+       switch (ib_qps) {\r
+               MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET );\r
+               MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT );\r
+               MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR );\r
+               MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS );\r
+               MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD );\r
+               MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD );\r
+               MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD );\r
+               MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE );\r
+               MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR );\r
+               default:\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
+                       qps = 0xffffffff;\r
+       }\r
+       return qps;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_conv_qp_modify_attr(\r
+       IN       const  struct ib_qp *ib_qp_p,\r
+       IN                              ib_qp_type_t    qp_type,\r
+       IN       const  ib_qp_mod_t *modify_attr_p,             \r
+       OUT     struct ib_qp_attr *qp_attr_p,\r
+       OUT     int *qp_attr_mask_p\r
+       )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
+\r
+       RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p );\r
+       *qp_attr_mask_p = IB_QP_STATE;\r
+       qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state ); \r
+\r
+       // skipped cases\r
+       if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT)\r
+               return IB_NOT_DONE;\r
+               \r
+       switch (modify_attr_p->req_state) {\r
+       case IB_QPS_RESET:\r
+       case IB_QPS_ERROR:\r
+       case IB_QPS_SQERR:\r
+       case IB_QPS_TIME_WAIT:\r
+               break;\r
+\r
+       case IB_QPS_INIT:\r
+               \r
+               switch (qp_type) {\r
+                       case IB_QPT_RELIABLE_CONN:\r
+                       case IB_QPT_UNRELIABLE_CONN:\r
+                               *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
+                               qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+                               break;\r
+                       case IB_QPT_UNRELIABLE_DGRM:\r
+                       case IB_QPT_QP0:\r
+                       case IB_QPT_QP1:\r
+                       default:        \r
+                               *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
+                               qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
+                               break;\r
+               }                               \r
+               \r
+               // IB_QP_PORT\r
+               qp_attr_p->port_num    = modify_attr_p->state.init.primary_port;\r
+\r
+               // IB_QP_PKEY_INDEX\r
+               qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index;\r
+\r
+               break;\r
+               \r
+       case IB_QPS_RTR:\r
+               /* modifying the WQE depth is not supported */\r
+               if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
+                       modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )    {\r
+                       status = IB_UNSUPPORTED;\r
+                       break;\r
+               }\r
+\r
+               switch (qp_type) {\r
+                       case IB_QPT_RELIABLE_CONN:\r
+                               *qp_attr_mask_p |= /* required flags */\r
+                                       IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
+                                       IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
+\r
+                               // IB_QP_DEST_QPN\r
+                               qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
+\r
+                               // IB_QP_RQ_PSN\r
+                               qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
+                               \r
+                               // IB_QP_MAX_DEST_RD_ATOMIC\r
+                               qp_attr_p->max_dest_rd_atomic   = modify_attr_p->state.rtr.resp_res;\r
+\r
+                               // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
+                               err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                       &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
+                               if (err) {\r
+                                       status = IB_ERROR;\r
+                                       break;\r
+                               }\r
+                               qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
+                               qp_attr_p->timeout              = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
+                               qp_attr_p->retry_cnt            = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
+                               qp_attr_p->rnr_retry            = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
+\r
+                               // IB_QP_MIN_RNR_TIMER\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
+                                       qp_attr_p->min_rnr_timer         = modify_attr_p->state.rtr.rnr_nak_timeout;\r
+                               }\r
+\r
+                               // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+                                       *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
+                                       qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+                               }\r
+\r
+                               // IB_QP_ALT_PATH: Convert alternate RC AV\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+                                       *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
+                                       err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                               &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
+                                       if (err) {\r
+                                               status = IB_ERROR;\r
+                                               break;\r
+                                       }\r
+                                       qp_attr_p->alt_timeout           = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
+                               }\r
+\r
+                               // IB_QP_PKEY_INDEX \r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
+                                       qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+                               }\r
+                               break;\r
+                               \r
+                       case IB_QPT_UNRELIABLE_CONN:\r
+                               *qp_attr_mask_p |= /* required flags */\r
+                                       IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
+\r
+                               // IB_QP_DEST_QPN\r
+                               qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
+\r
+                               // IB_QP_RQ_PSN\r
+                               qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
+\r
+                               // IB_QP_PATH_MTU\r
+                               qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu;\r
+\r
+                               // IB_QP_AV: Convert primary AV (mandatory)\r
+                               err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                       &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
+                               if (err) {\r
+                                       status = IB_ERROR;\r
+                                       break;\r
+                               }\r
+\r
+                               // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+                                       *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
+                                       qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+                               }\r
+\r
+                               // IB_QP_ALT_PATH: Convert alternate RC AV\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+                                       *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
+                                       err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                               &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
+                                       if (err) {\r
+                                               status = IB_ERROR;\r
+                                               break;\r
+                                       }\r
+                               }\r
+\r
+                               // IB_QP_PKEY_INDEX \r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
+                                       qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+                               }\r
+                               break;\r
+                                       \r
+                       case IB_QPT_UNRELIABLE_DGRM:\r
+                       case IB_QPT_QP0:\r
+                       case IB_QPT_QP1:\r
+                       default:        \r
+                               // IB_QP_PKEY_INDEX \r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
+                                       qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+                               }\r
+\r
+                               // IB_QP_QKEY\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_QKEY;  \r
+                                       qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
+                               }\r
+                               break;\r
+                               \r
+               }\r
+               break;\r
+               \r
+       case IB_QPS_RTS:\r
+               /* modifying the WQE depth is not supported */\r
+               if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
+                       modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
+               {\r
+                       status = IB_UNSUPPORTED;\r
+                       break;\r
+               }\r
+\r
+               switch (qp_type) {\r
+                       case IB_QPT_RELIABLE_CONN:\r
+                               *qp_attr_mask_p |= /* required flags */\r
+                                       IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
+                                       IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
+\r
+                               // IB_QP_MAX_QP_RD_ATOMIC\r
+                               qp_attr_p->max_rd_atomic        = modify_attr_p->state.rts.init_depth;\r
+\r
+                               // IB_QP_TIMEOUT\r
+                               qp_attr_p->timeout               = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
+                               \r
+                               // IB_QP_RETRY_CNT\r
+                               qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt;\r
+                               \r
+                               // IB_QP_RNR_RETRY\r
+                               qp_attr_p->rnr_retry     = modify_attr_p->state.rts.rnr_retry_cnt;\r
+\r
+                               // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
+                                       *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
+                                       qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
+                               }\r
+\r
+#if 0\r
+               // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
+\r
+                               // IB_QP_PKEY_INDEX \r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
+                                       qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
+                               }\r
+#endif                         \r
+\r
+                               // IB_QP_MIN_RNR_TIMER\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
+                                       *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER; \r
+                                       qp_attr_p->min_rnr_timer         = modify_attr_p->state.rts.rnr_nak_timeout;\r
+                               }\r
+\r
+                               // IB_QP_PATH_MIG_STATE\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_APM_STATE) {\r
+                                       *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
+                                       qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
+                               }\r
+\r
+                               // IB_QP_ACCESS_FLAGS\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+                                       *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
+                                       qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+                               }\r
+\r
+                               // IB_QP_ALT_PATH: Convert alternate RC AV\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+                                       *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
+                                       err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                               &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
+                                       if (err) {\r
+                                               status = IB_ERROR;\r
+                                               break;\r
+                                       }\r
+                                       qp_attr_p->alt_timeout           = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
+                               }\r
+                               break;\r
+                               \r
+                       case IB_QPT_UNRELIABLE_CONN:\r
+                               *qp_attr_mask_p |= /* required flags */\r
+                                       IB_QP_SQ_PSN;\r
+\r
+                               // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
+                                       *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
+                                       qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
+                               }\r
+\r
+#if 0\r
+               // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
+\r
+                               // IB_QP_PKEY_INDEX \r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
+                                       qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
+                               }\r
+#endif                         \r
+\r
+                               // IB_QP_PATH_MIG_STATE\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_APM_STATE) {\r
+                                       *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
+                                       qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
+                               }\r
+\r
+                               // IB_QP_ACCESS_FLAGS\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+                                       *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
+                                       qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+                               }\r
+\r
+                               // IB_QP_ALT_PATH: Convert alternate RC AV\r
+                               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+                                       *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
+                                       err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+                                               &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
+                                       if (err) {\r
+                                               status = IB_ERROR;\r
+                                               break;\r
+                                       }\r
+                               }\r
+                               break;\r
+                                       \r
+                       case IB_QPT_UNRELIABLE_DGRM:\r
+                       case IB_QPT_QP0:\r
+                       case IB_QPT_QP1:\r
+                       default:        \r
+                               *qp_attr_mask_p |= /* required flags */\r
+                                       IB_QP_SQ_PSN;\r
+\r
+                               // IB_QP_QKEY\r
+                               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
+                                       *qp_attr_mask_p |= IB_QP_QKEY;  \r
+                                       qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
+                               }\r
+                               break;\r
+                               \r
+                               break;\r
+                               \r
+               }\r
+\r
+               // IB_QP_SQ_PSN: common for all\r
+               qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
+               //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
+               break;\r
+               \r
+       case IB_QPS_SQD:\r
+       case IB_QPS_SQD_DRAINING:\r
+       case IB_QPS_SQD_DRAINED:\r
+               *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
+               qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event;\r
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
+               break;\r
+               \r
+       default:        \r
+               //NB: is this an error case and we need this message  ? What about returning an error ?\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state));\r
+               break;\r
+               \r
+       }\r
+\r
+       return status;\r
+}      \r
+\r
+int\r
+mlnx_conv_ibal_av(\r
+       IN              const   struct ib_device *ib_dev_p,\r
+       IN              const   ib_av_attr_t                            *ibal_av_p,\r
+       OUT                     struct ib_ah_attr       *ah_attr_p)\r
+{\r
+       int err = 0;\r
+       u8 port_num;\r
+       u16 gid_index;\r
+       \r
+       ah_attr_p->port_num = ibal_av_p->port_num;\r
+       ah_attr_p->sl   = ibal_av_p->sl;\r
+       ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid);\r
+       //TODO: how static_rate is coded ?\r
+       ah_attr_p->static_rate   =\r
+               (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);\r
+       ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
+\r
+       /* For global destination or Multicast address:*/\r
+       if (ibal_av_p->grh_valid)\r
+       {\r
+               ah_attr_p->ah_flags |= IB_AH_GRH;\r
+               ah_attr_p->grh.hop_limit     = ibal_av_p->grh.hop_limit;\r
+               ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
+                       &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );\r
+               err = ib_find_cached_gid((struct ib_device *)ib_dev_p, \r
+                       (union ib_gid   *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);\r
+               if (err) {\r\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
+                       gid_index = 0;\r
+               }\r
+               else if (port_num != ah_attr_p->port_num) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
+                               (u32)port_num, (u32)ah_attr_p->port_num));\r
+               }\r
+               ah_attr_p->grh.sgid_index = (u8)gid_index;\r
+               RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid));\r
+       }\r
+\r
+       return err;\r
+}\r
+\r
+int\r
+mlnx_conv_mthca_av(\r
+       IN              const   struct ib_ah *ib_ah_p,\r
+       OUT                     ib_av_attr_t                            *ibal_av_p)\r
+{\r
+       int err = 0;\r
+       struct ib_ud_header header;\r
+       struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p;\r
+       struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
+       struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
+\r
+       err = mthca_read_ah( dev_p, ah_p, &header);\r
+       if (err)\r
+               goto err_read_ah;\r
+\r
+       // common part\r
+       ibal_av_p->sl                   = header.lrh.service_level;\r
+       mthca_get_av_params(ah_p, &ibal_av_p->port_num,\r
+               &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits );\r
+\r
+       // GRH\r
+       ibal_av_p->grh_valid = header.grh_present;\r
+       if (ibal_av_p->grh_valid) {\r
+               ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+                       header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label );\r
+               ibal_av_p->grh.hop_limit = header.grh.hop_limit;\r
+               RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
+                       header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid));\r
+               RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
+                       header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid));\r
+       }\r
+\r
+       //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also.\r
+\r
+err_read_ah:\r
+               return err;\r
+}\r
+\r
+void\r
+mlnx_modify_ah(\r
+       IN              const   struct ib_ah *ib_ah_p,\r
+       IN      const   struct ib_ah_attr *ah_attr_p)\r
+{\r
+       struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
+       struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
+       \r
+       mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );\r
+}\r
+\r
diff --git a/trunk/hw/mthca/kernel/hca_data.h b/trunk/hw/mthca/kernel/hca_data.h
new file mode 100644 (file)
index 0000000..47575b9
--- /dev/null
@@ -0,0 +1,380 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_data.h 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+#ifndef __HCA_DATA_H__\r
+#define __HCA_DATA_H__\r
+\r
+\r
+#include <iba/ib_ci.h>\r
+#include <complib/comp_lib.h>\r
+#include <mt_l2w.h>\r
+#include <mthca_provider.h>\r
+\r
+extern uint32_t                        g_sqp_max_avs;\r
+extern char                            mlnx_uvp_lib_name[];\r
+\r
+\r
+#define MLNX_MAX_HCA   4\r
+#define MLNX_NUM_HOBKL MLNX_MAX_HCA\r
+#define MLNX_NUM_HOBUL MLNX_MAX_HCA\r
+#define MLNX_NUM_CB_THR     1\r
+#define MLNX_SIZE_CB_POOL 256\r
+#define MLNX_UAL_ALLOC_HCA_UL_RES 1\r
+#define MLNX_UAL_FREE_HCA_UL_RES 2\r
+\r
+\r
+// Defines for QP ops\r
+#define        MLNX_MAX_NUM_SGE 8\r
+#define        MLNX_MAX_WRS_PER_CHAIN 4\r
+\r
+#define MLNX_NUM_RESERVED_QPS 16\r
+\r
+/*\r
+ * Completion model.\r
+ *     0: No DPC processor assignment\r
+ *     1: DPCs per-CQ, processor affinity set at CQ initialization time.\r
+ *     2: DPCs per-CQ, processor affinity set at runtime.\r
+ *     3: DPCs per-CQ, no processor affinity set.\r
+ */\r
+#define MLNX_COMP_MODEL                3\r
+\r
+#ifdef DBG\r
+#define VALIDATE_INDEX(index, limit, error, label) \\r
+       {                  \\r
+               if (index >= limit) \\r
+               {                   \\r
+                       status = error;   \\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  , g_mlnx_dbg_lvl  ,("file %s line %d\n", __FILE__, __LINE__)));\\r
+                       goto label;       \\r
+               }                   \\r
+       }\r
+#else\r
+#define VALIDATE_INDEX(index, limit, error, label)\r
+#endif\r
+\r
+\r
+\r
+// Typedefs\r
+\r
+typedef enum {\r
+       E_EV_CA=1,\r
+       E_EV_QP,\r
+       E_EV_CQ,\r
+       E_EV_LAST\r
+} ENUM_EVENT_CLASS;\r
+\r
+typedef enum {\r
+       E_MARK_CA=1, // Channel Adaptor\r
+       E_MARK_PD, // Protection Domain\r
+       E_MARK_CQ, // Completion Queue\r
+       E_MARK_QP, // Queue Pair\r
+       E_MARK_AV, // Address Vector (UD)\r
+       E_MARK_MG, // Multicast Group\r
+       E_MARK_MR, // Memory Region\r
+       E_MARK_MW, // Memory Windows\r
+       E_MARK_INVALID,\r
+} ENUM_MARK;\r
+\r
+typedef enum {\r
+       E_MR_PHYS=1,\r
+       E_MR_SHARED,\r
+       E_MR_ANY,\r
+       E_MR_INVALID\r
+} ENUM_MR_TYPE;\r
+\r
+/*\r
+ * Attribute cache for port info saved to expedite local MAD processing.\r
+ * Note that the cache accounts for the worst case GID and PKEY table size\r
+ * but is allocated from paged pool, so it's nothing to worry about.\r
+ */\r
+\r
+typedef struct _guid_block\r
+{\r
+       boolean_t                               valid;\r
+       ib_guid_info_t                  tbl;\r
+\r
+}      mlnx_guid_block_t;\r
+\r
+typedef struct _port_info_cache\r
+{\r
+       boolean_t                               valid;\r
+       ib_port_info_t                  info;\r
+\r
+}      mlnx_port_info_cache_t;\r
+\r
+typedef struct _pkey_block\r
+{\r
+       boolean_t                               valid;\r
+       ib_pkey_table_info_t    tbl;\r
+\r
+}      mlnx_pkey_block_t;\r
+\r
+typedef struct _sl_vl_cache\r
+{\r
+       boolean_t                               valid;\r
+       ib_slvl_table_t                 tbl;\r
+\r
+}      mlnx_sl_vl_cache_t;\r
+\r
+typedef struct _vl_arb_block\r
+{\r
+       boolean_t                               valid;\r
+       ib_vl_arb_table_t               tbl;\r
+\r
+}      mlnx_vl_arb_block_t;\r
+\r
+typedef struct _attr_cache\r
+{\r
+       mlnx_guid_block_t               guid_block[32];\r
+       mlnx_port_info_cache_t  port_info;\r
+       mlnx_pkey_block_t               pkey_tbl[2048];\r
+       mlnx_sl_vl_cache_t              sl_vl;\r
+       mlnx_vl_arb_block_t             vl_arb[4];\r
+\r
+}      mlnx_cache_t;\r
+\r
+typedef struct _ib_ca {\r
+       ENUM_MARK           mark;\r
+       ci_completion_cb_t  comp_cb_p;\r
+       ci_async_event_cb_t async_cb_p;\r
+       const void          *ca_context;\r
+       void                *cl_device_h;\r
+       uint32_t           index;\r
+       cl_async_proc_t     *async_proc_mgr_p;\r
+       mlnx_cache_t            *cache; // Cached port attributes.\r
+       const void * __ptr64    p_dev_obj; // store underlying device object\r
+} mlnx_hob_t;\r
+\r
+typedef struct HOBUL_t {\r
+       int dummy;\r
+#ifdef WIN_TO_BE_REMOVED       \r
+       pd_info_t         *pd_info_tbl;\r
+       HH_hca_hndl_t     hh_hndl;                /* For HH direct access */\r
+       HHUL_hca_hndl_t   hhul_hndl;              /* user level HCA resources handle for HH */\r
+       uint32_t         cq_idx_mask;            /*                                                */\r
+       uint32_t         qp_idx_mask;            /*                                                */\r
+       uint32_t         vendor_id;              /* \                                              */\r
+       uint32_t         device_id;              /*  >  3 items needed for initializing user level */\r
+       void              *hca_ul_resources_p;    /* /                                              */\r
+       MT_size_t         cq_ul_resources_sz;     /* Needed for allocating user resources for CQs  */\r
+       MT_size_t         qp_ul_resources_sz;     /* Needed for allocating user resources for QPs  */\r
+       MT_size_t         pd_ul_resources_sz;     /* Needed for allocating user resources for PDs  */\r
+       uint32_t         max_cq;                 /* Max num. of CQs - size of following table */\r
+       cq_info_t         *cq_info_tbl;\r
+       uint32_t         max_qp;                 /* Max num. of QPs - size of following table */\r
+       qp_info_t         *qp_info_tbl;\r
+       uint32_t         max_pd;                 /* Max num. of PDs - size of following table */\r
+       uint32_t         log2_mpt_size;\r
+       atomic32_t        count;\r
+#endif \r
+} mlnx_hobul_t, *mlnx_hobul_hndl_t;\r
+\r
+typedef struct _ib_mcast {\r
+       ib_gid_t         mcast_gid;\r
+       struct ib_qp *ib_qp_p;\r
+       uint16_t                        mcast_lid;\r
+} mlnx_mcast_t;\r
+\r
+typedef struct _mlnx_hca_t {\r
+       cl_list_item_t  list_item;                      // to include in the HCA chain\r
+       net64_t                 guid;                                   // HCA node Guid\r
+       struct mthca_dev *mdev;         // VP Driver device\r
+       uint32_t                        hw_ver;                         // HCA HW version\r
+       mlnx_hob_t              hob;                                    // HOB - IBAL-related HCA resources\r
+       mlnx_hobul_t    hobul;                                  // HOBUL - - IBAL-related kernel client resources\r
+\r
+#ifdef WIN_TO_BE_REMOVED \r
+       // removed as it is found in p_ext->cl_ext.p_pdo\r
+       const void* __ptr64     p_dev_obj;              // Driver PDO\r
+#endif \r
+} mlnx_hca_t;\r
+\r
+\r
+typedef mlnx_hob_t *mlnx_hca_h;\r
+\r
+// Global Variables\r
+//extern mlnx_hca_t       mlnx_hca_array[];\r
+//extern uint32_t         mlnx_num_hca;\r
+\r
+extern mlnx_hob_t   mlnx_hob_array[];\r
+extern mlnx_hobul_t *mlnx_hobul_array[];\r
+\r
+// Functions\r
+void\r
+setup_ci_interface(\r
+       IN              const   ib_net64_t                                      ca_guid,\r
+               OUT                     ci_interface_t                          *p_interface );\r
+\r
+void\r
+mlnx_hca_insert(\r
+       IN                              mlnx_hca_t                                      *p_hca );\r
+\r
+void\r
+mlnx_hca_remove(\r
+       IN                              mlnx_hca_t                                      *p_hca );\r
+\r
+mlnx_hca_t*\r
+mlnx_hca_from_guid(\r
+       IN                              ib_net64_t                                      guid );\r
+\r
+/*\r
+void\r
+mlnx_names_from_guid(\r
+       IN                              ib_net64_t                                      guid,\r
+               OUT                     char                                            **hca_name_p,\r
+               OUT                     char                                            **dev_name_p);\r
+*/\r
+\r
+cl_status_t\r
+mlnx_hcas_init( void );\r
+\r
+cl_status_t\r
+mlnx_hobs_init( void );\r
+\r
+ib_api_status_t\r
+mlnx_hobs_insert(\r
+       IN                              mlnx_hca_t                                      *p_hca,\r
+               OUT                     mlnx_hob_t                                      **hob_p);\r
+\r
+\r
+ib_api_status_t\r
+mlnx_hobs_set_cb(\r
+       IN                              mlnx_hob_t                                      *hob_p, \r
+       IN                              ci_completion_cb_t                      comp_cb_p,\r
+       IN                              ci_async_event_cb_t                     async_cb_p,\r
+       IN              const   void* const                                     ib_context);\r
+\r
+ib_api_status_t\r
+mlnx_hobs_get_context(\r
+       IN                              mlnx_hob_t                                      *hob_p,\r
+               OUT                     void                                            **context_p);\r
+\r
+ib_api_status_t\r
+mlnx_hobs_create_device(\r
+       IN                              mlnx_hob_t                                      *hob_p,\r
+               OUT                     char*                                           dev_name);\r
+\r
+void\r
+mlnx_hobs_remove(\r
+       IN                              mlnx_hob_t                                      *hob_p);\r
+\r
+mlnx_hobul_t *\r
+mlnx_hobs_get_hobul(\r
+       IN                              mlnx_hob_t                                      *hob_p);\r
+\r
+void\r
+mlnx_hobul_get(\r
+       IN                              mlnx_hob_t                                      *hob_p,\r
+               OUT                     void                                            **resources_p );\r
+\r
+void\r
+mlnx_hobul_delete(\r
+       IN                              mlnx_hob_t                                      *hob_p);\r
+\r
+void\r
+mlnx_conv_hca_cap(\r
+       IN                              struct ib_device *ib_dev,\r
+       IN                              struct ib_device_attr *hca_info_p,\r
+       IN                              struct ib_port_attr  *hca_ports,\r
+       OUT                     ib_ca_attr_t                            *ca_attr_p);\r
+\r
+ib_api_status_t\r
+mlnx_local_mad (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out );\r
+\r
+void\r
+mlnx_memory_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface );\r
+\r
+void\r
+mlnx_ecc_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface );\r
+\r
+void\r
+mlnx_direct_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface );\r
+\r
+void\r
+mlnx_mcast_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface );\r
+\r
+ib_api_status_t\r
+fw_access_ctrl(\r
+       IN              const   void* __ptr64                           context,\r
+       IN              const   void* __ptr64* const            handle_array    OPTIONAL,\r
+       IN                              uint32_t                                        num_handles,\r
+       IN                              ib_ci_op_t* const                       p_ci_op,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf              OPTIONAL);\r
+\r
+\r
+void cq_comp_handler(struct ib_cq *cq, void *context);\r
+\r
+void ca_event_handler(struct ib_event *ev, void *context);\r
+\r
+void qp_event_handler(struct ib_event *ev, void *context);\r
+\r
+void cq_event_handler(struct ib_event *ev, void *context);\r
+\r
+ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps);\r
+\r
+enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps);\r
+\r
+ib_api_status_t\r
+mlnx_conv_qp_modify_attr(\r
+       IN       const  struct ib_qp *ib_qp_p,\r
+       IN                              ib_qp_type_t    qp_type,\r
+       IN       const  ib_qp_mod_t *modify_attr_p,             \r
+       OUT     struct ib_qp_attr *qp_attr_p,\r
+       OUT     int *qp_attr_mask_p\r
+       );\r
+\r
+int\r
+mlnx_conv_ibal_av(\r
+       IN              const   struct ib_device *ib_dev_p,\r
+       IN              const   ib_av_attr_t                            *ibal_av_p,\r
+       OUT                     struct ib_ah_attr       *ah_attr_p);\r
+\r
+int\r
+mlnx_conv_mthca_av(\r
+       IN              const   struct ib_ah *ib_ah_p,\r
+       OUT                     ib_av_attr_t                            *ibal_av_p);\r
+\r
+void\r
+mlnx_modify_ah(\r
+       IN              const   struct ib_ah *ib_ah_p,\r
+       IN      const   struct ib_ah_attr *ah_attr_p);\r
+\r
+\r
+#endif\r
diff --git a/trunk/hw/mthca/kernel/hca_debug.h b/trunk/hw/mthca/kernel/hca_debug.h
new file mode 100644 (file)
index 0000000..a60b1b1
--- /dev/null
@@ -0,0 +1,177 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_debug.h 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#ifndef  _HCA_DEBUG_H_\r
+#define _HCA_DEBUG_H_\r
+\r
+\r
+extern uint32_t                g_mthca_dbg_level;\r
+extern uint32_t                g_mthca_dbg_flags;\r
+#define MAX_LOG_BUF_LEN                512\r
+extern WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; \r
+extern UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ];  \r
+\r
+\r
+#if defined(EVENT_TRACING)\r
+//\r
+// Software Tracing Definitions \r
+//\r
+\r
+#define WPP_CONTROL_GUIDS \\r
+       WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE),  \\r
+       WPP_DEFINE_BIT( HCA_DBG_DEV) \\r
+       WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
+       WPP_DEFINE_BIT( HCA_DBG_PNP) \\r
+       WPP_DEFINE_BIT( HCA_DBG_MAD) \\r
+       WPP_DEFINE_BIT( HCA_DBG_PO) \\r
+       WPP_DEFINE_BIT( HCA_DBG_CQ) \\r
+       WPP_DEFINE_BIT( HCA_DBG_QP) \\r
+       WPP_DEFINE_BIT( HCA_DBG_MEMORY) \\r
+       WPP_DEFINE_BIT( HCA_DBG_AV) \\r
+       WPP_DEFINE_BIT( HCA_DBG_LOW) \\r
+       WPP_DEFINE_BIT( HCA_DBG_SHIM))\r
+\r
+#define WPP_GLOBALLOGGER\r
+\r
+\r
+#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level  >= lvl)\r
+#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags)\r
+#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level  >= TRACE_LEVEL_VERBOSE)\r
+#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags)\r
+\r
+\r
+// begin_wpp config\r
+// HCA_ENTER(FLAG);\r
+// HCA_EXIT(FLAG);\r
+// USEPREFIX(HCA_PRINT, "%!STDPREFIX! %!FUNC!() :");\r
+// USESUFFIX(HCA_ENTER, " %!FUNC!()===>");\r
+// USESUFFIX(HCA_EXIT, " %!FUNC!()<===");\r
+// end_wpp\r
+\r
+\r
+\r
+#else\r
+\r
+\r
+#include <evntrace.h>\r
+\r
+/*\r
+ * Debug macros\r
+ */\r
+\r
+\r
+#define HCA_DBG_DEV    (1 << 0)\r
+#define HCA_DBG_INIT   (1<<1)\r
+#define HCA_DBG_PNP    (1 << 2)\r
+#define HCA_DBG_MAD    (1 << 3)\r
+#define HCA_DBG_PO     (1 << 4)\r
+#define HCA_DBG_QP     (1 << 5)\r
+#define HCA_DBG_CQ     (1 << 6)\r
+#define HCA_DBG_MEMORY (1 << 7)\r
+#define HCA_DBG_AV     (1<<8)\r
+#define HCA_DBG_LOW    (1 << 9)\r
+#define HCA_DBG_SHIM   (1 << 10)\r
+\r
+static void _build_str( const char *   format, ... )\r
+{\r
+       va_list p_arg;\r
+       va_start(p_arg, format);\r
+       vsprintf((char *)g_slog_buf , format , p_arg);\r
+       swprintf(g_wlog_buf, L"%S", g_slog_buf);\r
+       va_end(p_arg);\r
+}\r
+\r
+#define HCA_PRINT_TO_EVENT_LOG(_obj_,_level_,_flag_,_msg_)  \\r
+       { \\r
+               NTSTATUS event_id; \\r
+               switch (_level_) { \\r
+                       case TRACE_LEVEL_FATAL: case TRACE_LEVEL_ERROR: event_id = EVENT_MTHCA_ANY_ERROR; break; \\r
+                       case TRACE_LEVEL_WARNING: event_id = EVENT_MTHCA_ANY_WARN; break; \\r
+                       default: event_id = EVENT_MTHCA_ANY_INFO; break; \\r
+               } \\r
+               _build_str _msg_; \\r
+               WriteEventLogEntryStr( _obj_, (ULONG)event_id, 0, 0, g_wlog_buf, 0, 0 ); \\r
+       }\r
+\r
+#define HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_)  \\r
+       HCA_PRINT_TO_EVENT_LOG(mdev->ext->cl_ext.p_self_do,_level_,_flag_,_msg_)\r
+\r
+\r
+#if DBG\r
+\r
+// assignment of _level_ is need to to overcome warning C4127\r
+#define HCA_PRINT(_level_,_flag_,_msg_)  \\r
+       { \\r
+               int __lvl = _level_; \\r
+               if (g_mthca_dbg_level >= (_level_) && \\r
+                       (g_mthca_dbg_flags & (_flag_))) { \\r
+                               DbgPrint ("[MTHCA] %s() :", __FUNCTION__); \\r
+                               if(__lvl == TRACE_LEVEL_ERROR) DbgPrint ("***ERROR***  "); \\r
+                               DbgPrint _msg_; \\r
+               } \\r
+       }\r
+\r
+#else\r
+\r
+#define HCA_PRINT(lvl ,flags, msg) \r
+\r
+#endif\r
+\r
+#define HCA_PRINT_EV(_level_,_flag_,_msg_)  \\r
+    { \\r
+           HCA_PRINT(_level_,_flag_,_msg_) \\r
+           HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \\r
+       }\r
+\r
+#define HCA_ENTER(flags)\\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE, flags,("===>\n"));\r
+\r
+#define HCA_EXIT(flags)\\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE, flags, ("<===\n" ));\r
+\r
+\r
+#define HCA_PRINT_EXIT(_level_,_flag_,_msg_)   \\r
+       {\\r
+               if (status != IB_SUCCESS) {\\r
+                       HCA_PRINT(_level_,_flag_,_msg_);\\r
+               }\\r
+               HCA_EXIT(_flag_);\\r
+       }\r
+\r
+#endif //EVENT_TRACING\r
+\r
+\r
+\r
+\r
+#endif /*_HCA_DEBUG_H_ */\r
+\r
+\r
diff --git a/trunk/hw/mthca/kernel/hca_direct.c b/trunk/hw/mthca/kernel/hca_direct.c
new file mode 100644 (file)
index 0000000..cdece3e
--- /dev/null
@@ -0,0 +1,246 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_direct.c 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_debug.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_direct.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+\r
+\r
+/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */\r
+#define MLNX_SEND_NATIVE       1\r
+#define MLNX_RECV_NATIVE       1\r
+#define MLNX_POLL_NATIVE       1\r
+\r
+\r
+/*\r
+* Work Request Processing Verbs.\r
+*/\r
+ib_api_status_t\r
+mlnx_post_send (\r
+       IN      const   ib_qp_handle_t                                  h_qp,\r
+       IN                      ib_send_wr_t                                    *p_send_wr,\r
+               OUT             ib_send_wr_t                                    **pp_failed )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+       struct ib_device *ib_dev_p = ib_qp_p->device;\r
+       \r
+       HCA_ENTER(HCA_DBG_QP);\r
+       \r
+       // sanity checks\r
+\r
+       // create CQ\r
+       err = ib_dev_p->post_send(ib_qp_p, p_send_wr, pp_failed );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP  ,("post_send failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+               goto err_post_send;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+               \r
+err_post_send: \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_QP  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+                                                                                                                                                       \r
+}\r
+\r
+\r
+ib_api_status_t \r
+mlnx_post_recv (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+       IN                              ib_recv_wr_t                            *p_recv_wr,\r
+               OUT                     ib_recv_wr_t                            **pp_failed OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+       struct ib_device *ib_dev_p = ib_qp_p->device;\r
+       \r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       // sanity checks\r
+       \r
+       // create CQ\r
+       err = ib_dev_p->post_recv(ib_qp_p, p_recv_wr, pp_failed );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("post_recv failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+               goto err_post_recv;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+               \r
+err_post_recv: \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_QP  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+                                                                                                                                                       \r
+}\r
+\r
+/*\r
+* Completion Processing and Completion Notification Request Verbs.\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_peek_cq(\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       OUT                             uint32_t* const                         p_n_cqes )\r
+{\r
+       UNREFERENCED_PARAMETER(h_cq);\r
+       UNREFERENCED_PARAMETER(p_n_cqes);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_peek_cq not implemented\n"));\r
+       return IB_INVALID_CA_HANDLE;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_poll_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,\r
+               OUT                     ib_wc_t** const                         pp_done_wclist )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+       \r
+       HCA_ENTER(HCA_DBG_CQ);\r
+\r
+       // sanity checks\r
+       if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_params;\r
+       }\r
+\r
+       // poll CQ\r
+       err = mthca_poll_cq_list(ib_cq_p, pp_free_wclist, pp_done_wclist );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("mthca_poll_cq_list failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }else if (!*pp_done_wclist)\r
+               status = IB_NOT_FOUND;\r
+               \r
+err_invalid_params:    \r
+       if (status != IB_NOT_FOUND){\r
+               HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_CQ  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       }else\r
+               HCA_EXIT(HCA_DBG_CQ);\r
+       return status;\r
+                                                                                                                                                       \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_enable_cq_notify (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       IN              const   boolean_t                                       solicited )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+       \r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // REARM CQ\r
+       err = ib_req_notify_cq(ib_cq_p, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_SHIM   ,("ib_req_notify_cq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }\r
+               \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_enable_ncomp_cq_notify (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       IN              const   uint32_t                                        n_cqes )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       err = ib_req_ncomp_notif(ib_cq_p, n_cqes );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_SHIM   ,("ib_req_ncomp_notif failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }\r
+               \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_bind_mw (\r
+       IN              const   ib_mw_handle_t                          h_mw,\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+       IN                              ib_bind_wr_t* const                     p_mw_bind,\r
+               OUT                     net32_t* const                          p_rkey )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mw);\r
+       UNREFERENCED_PARAMETER(h_qp);\r
+       UNREFERENCED_PARAMETER(p_mw_bind);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_bind_mw not implemented\n"));\r
+       return IB_INVALID_CA_HANDLE;\r
+}\r
+\r
+\r
+void\r
+mlnx_direct_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->post_send = mlnx_post_send;\r
+       p_interface->post_recv = mlnx_post_recv;\r
+\r
+       p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify;\r
+       p_interface->peek_cq =  NULL; /* mlnx_peek_cq: Not implemented */\r
+       p_interface->poll_cq = mlnx_poll_cq;\r
+       p_interface->enable_cq_notify = mlnx_enable_cq_notify;\r
+\r
+       p_interface->bind_mw = mlnx_bind_mw;\r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_driver.c b/trunk/hw/mthca/kernel/hca_driver.c
new file mode 100644 (file)
index 0000000..49b88eb
--- /dev/null
@@ -0,0 +1,885 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_driver.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+/*\r
+ * Provides the driver entry points for the Tavor VPD.\r
+ */\r
+\r
+#include "hca_driver.h"\r
+#include "hca_debug.h"\r
+\r
+#include "mthca_log.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_driver.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+#include <wdmguid.h>\r
+#include <initguid.h>\r
+#pragma warning( push, 3 )\r
+//#include "MdCard.h"\r
+#pragma warning( pop )\r
+#include <iba/ib_ci_ifc.h>\r
+\r
+/* from \inc\platform\evntrace.h\r
+#define TRACE_LEVEL_NONE        0   // Tracing is not on\r
+#define TRACE_LEVEL_FATAL       1   // Abnormal exit or termination\r
+#define TRACE_LEVEL_ERROR       2   // Severe errors that need logging\r
+#define TRACE_LEVEL_WARNING     3   // Warnings such as allocation failure\r
+#define TRACE_LEVEL_INFORMATION 4   // Includes non-error cases(e.g.,Entry-Exit)\r
+#define TRACE_LEVEL_VERBOSE     5   // Detailed traces from intermediate steps\r
+*/\r
+uint32_t g_mthca_dbg_level = TRACE_LEVEL_INFORMATION;\r
+uint32_t g_mthca_dbg_flags= 0xffff;\r
+WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; \r
+UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ];  \r
+\r
+/*\r
+ * UVP name does not include file extension.  For debug builds, UAL\r
+ * will append "d.dll".  For release builds, UAL will append ".dll"\r
+ */\r
+char                   mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mthcau"};\r
+\r
+\r
+NTSTATUS\r
+DriverEntry(\r
+       IN                              PDRIVER_OBJECT                          p_driver_obj,\r
+       IN                              PUNICODE_STRING                         p_registry_path );\r
+\r
+static NTSTATUS\r
+__read_registry(\r
+       IN                              UNICODE_STRING* const           p_Param_Path );\r
+\r
+static void\r
+hca_drv_unload(\r
+       IN                              PDRIVER_OBJECT                          p_driver_obj );\r
+\r
+static NTSTATUS\r
+hca_sysctl(\r
+       IN                              PDEVICE_OBJECT                          p_dev_obj,\r
+       IN                              PIRP                                            p_irp );\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+       IN                              TARGET_DEVICE_REMOVAL_NOTIFICATION      *p_notify,\r
+       IN                              void                                            *context );\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+       IN                              DEVICE_INTERFACE_CHANGE_NOTIFICATION    *p_notify,\r
+       IN                              void                                            *context );\r
+\r
+static NTSTATUS\r
+fw_access_pciconf (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              ULONG                                                   op_flag,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length );\r
+\r
+static NTSTATUS\r
+fw_get_pci_bus_interface(\r
+       IN              DEVICE_OBJECT                           *p_dev_obj,\r
+       OUT             BUS_INTERFACE_STANDARD          *p_BusInterface );\r
+\r
+static NTSTATUS\r
+fw_flash_write_data (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length );\r
+\r
+static NTSTATUS\r
+fw_flash_read_data (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length );\r
+\r
+static NTSTATUS\r
+fw_flash_get_ca_guid(\r
+       IN              DEVICE_OBJECT           *p_dev_obj,\r
+       OUT             uint64_t                        *ca_guid );\r
+\r
+static NTSTATUS\r
+fw_flash_read4( \r
+       IN                      BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+       IN                      uint32_t                                addr, \r
+       IN      OUT             uint32_t                                *p_data);\r
+\r
+static NTSTATUS\r
+fw_flash_readbuf(\r
+       IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+       IN              uint32_t                                offset,\r
+       IN OUT  void                                    *p_data,\r
+       IN              uint32_t                                len);\r
+static NTSTATUS\r
+fw_set_bank(\r
+       IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+       IN              uint32_t                                bank );\r
+\r
+static NTSTATUS\r
+fw_flash_init(\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface  );\r
+\r
+static NTSTATUS\r
+fw_flash_deinit(\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface  );\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (INIT, DriverEntry)\r
+#pragma alloc_text (INIT, __read_registry)\r
+#pragma alloc_text (PAGE, hca_drv_unload)\r
+#pragma alloc_text (PAGE, hca_sysctl)\r
+#pragma alloc_text (PAGE, fw_flash_get_ca_guid)\r
+#endif\r
+\r
+NTSTATUS\r
+DriverEntry(\r
+       IN                              PDRIVER_OBJECT                  p_driver_obj,\r
+       IN                              PUNICODE_STRING                 p_registry_path )\r
+{\r
+       NTSTATUS                        status;\r
+       cl_status_t                     cl_status;\r
+#if defined(EVENT_TRACING)\r
+       WPP_INIT_TRACING(p_driver_obj ,p_registry_path);\r
+#endif\r
+       HCA_ENTER( HCA_DBG_DEV );\r
+\r
+       status = __read_registry( p_registry_path );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, \r
+                       ("__read_registry_path returned 0x%X.\n", status));\r
+               return status;\r
+       }\r
+\r
+       /* Initialize Adapter DB */\r
+       cl_status = mlnx_hcas_init();\r
+       if( cl_status != CL_SUCCESS )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,\r
+                       ("mlnx_hcas_init returned %s.\n", cl_status_text[cl_status]));\r
+               return cl_to_ntstatus( cl_status );\r
+       }\r
+//     cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) );\r
+\r
+       /*leo:  init function table */\r
+       hca_init_vfptr();\r
+\r
+       /*leo: calibrate CPU */\r
+       MT_time_calibrate();\r
+       \r
+       p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp;\r
+       p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power;\r
+       p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl;\r
+       p_driver_obj->DriverUnload = hca_drv_unload;\r
+       p_driver_obj->DriverExtension->AddDevice = hca_add_device;\r
+\r
+       /* init core */\r
+       if (ib_core_init()) {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed to init core, aborting.\n"));\r
+               return STATUS_UNSUCCESSFUL;\r
+       }\r
+\r
+       /* init uverbs module */\r
+       if (ib_uverbs_init()) {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed ib_uverbs_init, aborting.\n"));\r
+               return STATUS_UNSUCCESSFUL;\r
+       }\r
+       HCA_EXIT( HCA_DBG_DEV );\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__read_registry(\r
+       IN                              UNICODE_STRING* const   p_registry_path )\r
+{\r
+       NTSTATUS                                        status;\r
+       /* Remember the terminating entry in the table below. */\r
+       RTL_QUERY_REGISTRY_TABLE        table[3];\r
+       UNICODE_STRING                          param_path;\r
+\r
+       HCA_ENTER( HCA_DBG_DEV );\r
+\r
+       RtlInitUnicodeString( &param_path, NULL );\r
+       param_path.MaximumLength = p_registry_path->Length + \r
+               sizeof(L"\\Parameters");\r
+       param_path.Buffer = cl_zalloc( param_path.MaximumLength );\r
+       if( !param_path.Buffer )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_DEV, \r
+                       ("Failed to allocate parameters path buffer.\n"));\r
+               return STATUS_INSUFFICIENT_RESOURCES;\r
+       }\r
+\r
+       RtlAppendUnicodeStringToString( &param_path, p_registry_path );\r
+       RtlAppendUnicodeToString( &param_path, L"\\Parameters" );\r
+\r
+       /*\r
+        * Clear the table.  This clears all the query callback pointers,\r
+        * and sets up the terminating table entry.\r
+        */\r
+       cl_memclr( table, sizeof(table) );\r
+\r
+       /* Setup the table entries. */\r
+       table[0].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+       table[0].Name = L"DebugLevel";\r
+       table[0].EntryContext = &g_mthca_dbg_level;\r
+       table[0].DefaultType = REG_DWORD;\r
+       table[0].DefaultData = &g_mthca_dbg_level;\r
+       table[0].DefaultLength = sizeof(ULONG);\r
+\r
+       \r
+       table[1].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+       table[1].Name = L"DebugFlags";\r
+       table[1].EntryContext = &g_mthca_dbg_flags;\r
+       table[1].DefaultType = REG_DWORD;\r
+       table[1].DefaultData = &g_mthca_dbg_flags;\r
+       table[1].DefaultLength = sizeof(ULONG);\r
+\r
+       /* Have at it! */\r
+       status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
+               param_path.Buffer, table, NULL, NULL );\r
+\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_INIT, \r
+                       ("debug level  %d debug flags  0x%.8x\n",\r
+                       g_mthca_dbg_level ,\r
+                       g_mthca_dbg_flags));\r
+\r
+\r
+       cl_free( param_path.Buffer );\r
+       HCA_EXIT( HCA_DBG_DEV );\r
+       return status;\r
+}\r
+\r
+\r
+static void\r
+hca_drv_unload(\r
+       IN                              PDRIVER_OBJECT                  p_driver_obj )\r
+{\r
+       HCA_ENTER( HCA_DBG_DEV );\r
+\r
+       UNUSED_PARAM( p_driver_obj );\r
+\r
+       ib_uverbs_cleanup();\r
+       ib_core_cleanup();\r
+       \r
+       HCA_EXIT( HCA_DBG_DEV );\r
+#if defined(EVENT_TRACING)\r
+       WPP_CLEANUP(p_driver_obj);\r
+#endif\r
+\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_sysctl(\r
+       IN                              PDEVICE_OBJECT                          p_dev_obj,\r
+       IN                              PIRP                                            p_irp )\r
+{\r
+       NTSTATUS                status;\r
+       hca_dev_ext_t   *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_DEV );\r
+\r
+       p_ext = p_dev_obj->DeviceExtension;\r
+\r
+       IoSkipCurrentIrpStackLocation( p_irp );\r
+       status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
+\r
+       HCA_EXIT( HCA_DBG_DEV );\r
+       return status;\r
+}\r
+\r
+typedef struct Primary_Sector{\r
+       uint32_t fi_addr;\r
+       uint32_t fi_size;\r
+       uint32_t signature;\r
+       uint32_t fw_reserved[5];\r
+       uint32_t vsd[56];\r
+       uint32_t branch_to;\r
+       uint32_t crc016;\r
+} primary_sector_t;\r
+\r
+static uint32_t old_dir;\r
+static uint32_t old_pol;\r
+static uint32_t old_mod;\r
+static uint32_t old_dat;\r
+\r
+static NTSTATUS\r
+fw_access_pciconf (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              ULONG                                                   op_flag,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length )\r
+{\r
+\r
+       ULONG                           bytes;  \r
+       NTSTATUS                        status = STATUS_SUCCESS;\r
+\r
+       PAGED_CODE();\r
+\r
+       if (p_BusInterface)\r
+       {\r
+\r
+               bytes = p_BusInterface->SetBusData(\r
+                                               p_BusInterface->Context,\r
+                                               PCI_WHICHSPACE_CONFIG,\r
+                                               (PVOID)&offset,\r
+                                               PCI_CONF_ADDR,\r
+                                               sizeof(ULONG) );\r
+\r
+               if( op_flag == 0 )\r
+               {\r
+                       if ( bytes )\r
+                               bytes = p_BusInterface->GetBusData(\r
+                                                       p_BusInterface->Context,\r
+                                                       PCI_WHICHSPACE_CONFIG,\r
+                                                       p_buffer,\r
+                                                       PCI_CONF_DATA,\r
+                                                       length );\r
+                       if ( !bytes )\r
+                               status = STATUS_NOT_SUPPORTED;\r
+               }\r
+\r
+               else\r
+               {\r
+                       if ( bytes )\r
+                               bytes = p_BusInterface->SetBusData(\r
+                                                       p_BusInterface->Context,\r
+                                                       PCI_WHICHSPACE_CONFIG,\r
+                                                       p_buffer,\r
+                                                       PCI_CONF_DATA,\r
+                                                       length);\r
+\r
+                       if ( !bytes )\r
+                               status = STATUS_NOT_SUPPORTED;\r
+               }\r
+       }\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_get_pci_bus_interface(\r
+       IN              DEVICE_OBJECT                           *p_dev_obj,\r
+       OUT             BUS_INTERFACE_STANDARD          *p_BusInterface )\r
+{\r
+       KEVENT event;\r
+       NTSTATUS status;\r
+       PIRP p_irp;\r
+       IO_STATUS_BLOCK ioStatus;\r
+       PIO_STACK_LOCATION p_irpStack;\r
+       PDEVICE_OBJECT p_target_obj;\r
+\r
+       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+       p_target_obj = IoGetAttachedDeviceReference( p_dev_obj );\r
+\r
+       p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP,\r
+                                                                               p_target_obj,\r
+                                                                               NULL,\r
+                                                                               0,\r
+                                                                               NULL,\r
+                                                                               &event,\r
+                                                                               &ioStatus );\r
+       if (p_irp == NULL) {\r
+               status = STATUS_INSUFFICIENT_RESOURCES;\r
+               goto End;\r
+       }\r
+       p_irpStack = IoGetNextIrpStackLocation( p_irp );\r
+       p_irpStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+       p_irpStack->Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD;\r
+       p_irpStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD);\r
+       p_irpStack->Parameters.QueryInterface.Version = 1;\r
+       p_irpStack->Parameters.QueryInterface.Interface = (PINTERFACE) p_BusInterface;\r
+       p_irpStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+\r
+       p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+       \r
+       status = IoCallDriver( p_target_obj, p_irp );\r
+\r
+       if ( status == STATUS_PENDING )\r
+       {\r
+               KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL );\r
+               status = ioStatus.Status;\r
+       }\r
+End:\r
+       // Done with reference\r
+       ObDereferenceObject( p_target_obj );\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+fw_access_ctrl(\r
+       IN              const   void* __ptr64                           p_context,\r
+       IN              const   void* __ptr64* const            handle_array    OPTIONAL,\r
+       IN                              uint32_t                                        num_handles,\r
+       IN                              ib_ci_op_t* const                       p_ci_op,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       DEVICE_OBJECT                           *p_dev_obj;\r
+       static BUS_INTERFACE_STANDARD    BusInterface;\r
+       static uint32_t                         if_ready;\r
+       NTSTATUS                                        status;\r
+       PVOID                                           p_data;\r
+       ULONG                                           offset;\r
+       ULONG POINTER_ALIGNMENT         length;\r
+       ib_ci_op_t                                      *p_ci;\r
+       mlnx_hob_t                                      *p_hob;\r
+\r
+       UNREFERENCED_PARAMETER(handle_array);\r
+       UNREFERENCED_PARAMETER(num_handles);\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+       status =  STATUS_SUCCESS;\r
+       p_hob = (mlnx_hob_t *)(const void *)p_context;\r
+\r
+       p_dev_obj = (DEVICE_OBJECT *)(const void *)p_hob->p_dev_obj;\r
+       p_ci =  p_ci_op;\r
+\r
+       if ( !p_ci )\r
+               return STATUS_INVALID_DEVICE_REQUEST;\r
+       if ( !p_ci->buf_size )\r
+               return STATUS_INVALID_DEVICE_REQUEST;\r
+\r
+       length = p_ci->buf_size;\r
+       offset = p_ci->buf_info;\r
+       p_data = p_ci->p_buf;\r
+\r
+       switch ( p_ci->command )\r
+       {\r
+               case    FW_READ: // read data from flash\r
+                               if ( if_ready )\r
+                               {\r
+                                       status = fw_flash_read_data(&BusInterface, p_data, offset, length);\r
+                               }\r
+                               break;\r
+               case    FW_WRITE: // write data to flash\r
+                               if ( if_ready )\r
+                               {\r
+\r
+                                       status = fw_flash_write_data(&BusInterface, p_data, offset, length);\r
+                               }\r
+                               break;\r
+               case    FW_READ_CMD:\r
+                               if ( if_ready )\r
+                               {\r
+                                       status = fw_access_pciconf(&BusInterface, 0 , p_data, offset, 4);\r
+                               }\r
+                               break;\r
+               case    FW_WRITE_CMD:\r
+                               if ( if_ready )\r
+                               {\r
+                                       status = fw_access_pciconf(&BusInterface, 1 , p_data, offset, 4);\r
+                               }\r
+                               break;\r
+               case    FW_CLOSE_IF: // close BusInterface\r
+                               if (if_ready )\r
+                               {\r
+                                       if_ready = 0;\r
+                                       BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+                               }\r
+                               return status;\r
+               case    FW_OPEN_IF: // open BusInterface\r
+                               if ( !if_ready )\r
+                               {\r
+                                       status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface);\r
+                               \r
+                                       if ( NT_SUCCESS( status ) )\r
+                                       {\r
+                                               if_ready = 1;\r
+                                               status = STATUS_SUCCESS;\r
+                                       }\r
+                               }\r
+                               return status;\r
+               default:\r
+                               status = STATUS_NOT_SUPPORTED;\r
+       }\r
+\r
+       if ( status != STATUS_SUCCESS )\r
+       {\r
+               if ( if_ready )\r
+               {\r
+                       if_ready = 0;\r
+                       BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+               }\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, \r
+                       ("fw_access_ctrl failed returns %08x.\n", status));\r
+       }\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_write_data (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length )\r
+{\r
+       NTSTATUS                status;\r
+       uint32_t                cnt = 0;\r
+       uint32_t                lcl_data;\r
+\r
+       lcl_data = (*((uint32_t*)p_buffer) << 24);\r
+\r
+       status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET+4, length );\r
+       if ( status != STATUS_SUCCESS )\r
+               return status;\r
+       lcl_data = ( WRITE_BIT | (offset & ADDR_MSK));\r
+               \r
+       status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET, 4 );\r
+       if ( status != STATUS_SUCCESS )\r
+       return status;\r
+\r
+       lcl_data = 0;\r
+       \r
+       do\r
+       {\r
+               if (++cnt > 5000)\r
+               {\r
+                       return STATUS_DEVICE_NOT_READY;\r
+               }\r
+\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &lcl_data, FLASH_OFFSET, 4 );\r
+               if ( status != STATUS_SUCCESS )\r
+               return status;\r
+\r
+       } while(lcl_data & CMD_MASK);\r
+\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_read_data (\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length )\r
+{\r
+       NTSTATUS        status = STATUS_SUCCESS;\r
+       uint32_t        cnt = 0;\r
+       uint32_t        lcl_data = ( READ_BIT | (offset & ADDR_MSK));\r
+       \r
+       status = fw_access_pciconf(p_BusInterface, FW_WRITE, &lcl_data, FLASH_OFFSET, 4 );\r
+       if ( status != STATUS_SUCCESS )\r
+               return status;\r
+\r
+       lcl_data = 0;\r
+       do\r
+       {\r
+               // Timeout checks\r
+               if (++cnt > 5000 )\r
+               {\r
+                       return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ, &lcl_data, FLASH_OFFSET, 4 );\r
+       \r
+               if ( status != STATUS_SUCCESS )\r
+                       return status;\r
+\r
+       } while(lcl_data & CMD_MASK);\r
+\r
+       status = fw_access_pciconf(p_BusInterface, FW_READ, p_buffer, FLASH_OFFSET+4, length );\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_get_ca_guid(\r
+               IN              DEVICE_OBJECT           *p_dev_obj,\r
+               OUT             net64_t                 *ca_guid )\r
+{\r
+       NTSTATUS                status = STATUS_SUCCESS;\r
+       BUS_INTERFACE_STANDARD          BusInterface;\r
+\r
+    uint32_t NODE_GUIDH, NODE_GUIDL;\r
+       uint32_t prim_ptr = 0;\r
+    uint32_t signature;\r
+\r
+       primary_sector_t        ps;\r
+       cl_memset( &ps, 0, sizeof(primary_sector_t));\r
+\r
+       status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface);\r
+\r
+       if ( !NT_SUCCESS( status ) )\r
+               return status;\r
+       \r
+       status = fw_flash_init (&BusInterface);\r
+       if (status != STATUS_SUCCESS )\r
+               return status;\r
+    status = fw_flash_read_data(&BusInterface, &signature, 0x24, 4); \r
+       if (status != STATUS_SUCCESS )\r
+               return status;\r
+    //signature = cl_ntoh32(signature);\r
+\r
+    if (signature == FW_SIGNATURE)\r
+    {\r
+       //Fail Safe image\r
+        \r
+        // Assume flash has been verified, and both images have the same guids, therefore,\r
+        // we only need to read the primary image's guids\r
+        status = fw_flash_readbuf(&BusInterface, FW_SECT_SIZE, &ps, sizeof(ps));\r
+               if ( status == STATUS_SUCCESS )\r
+               {\r
+                       status = fw_flash_read_data(&BusInterface, &prim_ptr, ps.fi_addr+0x24, 4);\r
+                       if (status == STATUS_SUCCESS )\r
+                               prim_ptr = prim_ptr + ps.fi_addr;\r
+               }\r
+    }\r
+    else\r
+    {\r
+        // Short image\r
+        prim_ptr = signature;       \r
+    }\r
+\r
+    if ( signature == FW_SIGNATURE || prim_ptr < MAX_FLASH_SIZE )\r
+    {\r
+               /* now we can read ca guid\r
+                * since we read it in host mode fw_flash_read4() \r
+                * swaps it back in BE - how it was stored in FW\r
+                */\r
+        if (( status = fw_flash_read4(&BusInterface, prim_ptr, &NODE_GUIDL)) == STATUS_SUCCESS )\r
+                       if (( status = fw_flash_read4(&BusInterface, prim_ptr+4, &NODE_GUIDH)) == STATUS_SUCCESS )\r
+                       {\r
+                               *ca_guid = NODE_GUIDH;\r
+                               *ca_guid = (*ca_guid << 32) | NODE_GUIDL;\r
+                       }\r
+       }\r
+       else \r
+    {\r
+        //invalid GUID pointer\r
+        return STATUS_NO_SUCH_DEVICE;\r
+    }\r
+       fw_flash_deinit(&BusInterface);\r
+       BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+    return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_read4( \r
+       IN                      BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+       IN                      uint32_t                                addr, \r
+       IN      OUT             uint32_t                                *p_data)\r
+{\r
+       NTSTATUS        status = STATUS_SUCCESS;\r
+       uint32_t lcl_data = 0;\r
+       uint32_t bank;\r
+       static uint32_t curr_bank =     0xffffffff;\r
+\r
+       if (addr & 0x3)\r
+               return STATUS_INVALID_PARAMETER;\r
+\r
+       bank = addr & BANK_MASK;\r
+       if (bank !=  curr_bank)\r
+       {\r
+               curr_bank = bank;\r
+               if ((status = fw_set_bank(p_BusInterface, bank)) != STATUS_SUCCESS )\r
+                       return STATUS_INVALID_PARAMETER;\r
+       }\r
+       status = fw_flash_read_data(p_BusInterface, &lcl_data, addr, 4);\r
+       *p_data = cl_ntoh32(lcl_data);\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_readbuf(\r
+               IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+               IN              uint32_t                                offset,\r
+               IN OUT  void                                    *p_data,\r
+               IN              uint32_t                                len)\r
+{\r
+       NTSTATUS        status = STATUS_SUCCESS;\r
+       uint32_t *p_lcl_data;\r
+       uint32_t        i;\r
+\r
+    if (offset & 0x3)\r
+    {\r
+        //Address should be 4-bytes aligned\r
+        return STATUS_INVALID_PARAMETER;\r
+    }\r
+    if (len & 0x3)\r
+    {\r
+        //Length should be 4-bytes aligned\r
+        return STATUS_INVALID_PARAMETER;\r
+    }\r
+    p_lcl_data = (uint32_t *)p_data;\r
+    \r
+       for ( i=0; i < (len >> 2); i++)\r
+    {                                  \r
+        if ( (status = fw_flash_read_data( p_BusInterface, p_lcl_data, offset, sizeof(uint32_t) )) != STATUS_SUCCESS )\r
+            return status;\r
+        offset += 4;\r
+               p_lcl_data++;\r
+    }\r
+    return STATUS_SUCCESS;\r
+} // Flash::flash_read\r
+\r
+static NTSTATUS\r
+fw_flash_writebuf(\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
+               IN              PVOID                                                   p_buffer,\r
+               IN              ULONG                                                   offset,\r
+               IN              ULONG POINTER_ALIGNMENT                 length )\r
+{\r
+       NTSTATUS status = STATUS_SUCCESS;\r
+       uint32_t        i;\r
+       uint8_t *p_data = (uint8_t *)p_buffer;\r
+\r
+       for ( i = 0; i < length;  i++ )\r
+       {\r
+               status = fw_flash_write_data (p_BusInterface, p_data, offset, 1 );\r
+               if (status != STATUS_SUCCESS )\r
+                       return status;\r
+               p_data++;\r
+               offset++;\r
+       }\r
+       return status;\r
+}\r
+static NTSTATUS\r
+fw_flash_init(\r
+               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface  )\r
+{\r
+       uint32_t dir;\r
+    uint32_t pol;\r
+    uint32_t mod;\r
+\r
+    uint32_t cnt=0;\r
+    uint32_t data;\r
+       NTSTATUS status = STATUS_SUCCESS;\r
+       uint32_t        semaphore = 0;\r
+    \r
+       while ( !semaphore )\r
+       {\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &data, SEMAP63, 4);\r
+               if ( status != STATUS_SUCCESS )\r
+                       break;\r
+               if( !data )\r
+               {\r
+                       semaphore = 1;\r
+                       break;\r
+               }\r
+        if (++cnt > 5000 )\r
+        {\r
+            break;\r
+        }\r
+    } \r
+\r
+       if ( !semaphore )\r
+       {\r
+               return STATUS_NOT_SUPPORTED;\r
+       }\r
+\r
+    // Save old values\r
+    \r
+       status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dir,GPIO_DIR_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &old_pol,GPIO_POL_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &old_mod,GPIO_MOD_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dat,GPIO_DAT_L , 4);\r
+\r
+   // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits\r
+    dir = old_dir | 0x70;\r
+    pol = old_pol & ~0x70;\r
+    mod = old_mod & ~0x70;\r
+\r
+       status = fw_access_pciconf(p_BusInterface, FW_WRITE , &dir,GPIO_DIR_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &pol,GPIO_POL_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &mod,GPIO_MOD_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               // Set CPUMODE\r
+               status = fw_access_pciconf(p_BusInterface, FW_READ , &data, CPUMODE, 4);\r
+    if ( status == STATUS_SUCCESS )\r
+       {\r
+               data &= ~CPUMODE_MSK;\r
+               data |= 1 << CPUMODE_SHIFT;\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, CPUMODE, 4);\r
+       }\r
+       if ( status == STATUS_SUCCESS )\r
+       {\r
+               // Reset flash\r
+               data = 0xf0;\r
+               status = fw_flash_write_data(p_BusInterface, &data, 0x0, 4);\r
+       }\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_deinit(\r
+       IN              BUS_INTERFACE_STANDARD  *p_BusInterface )\r
+{\r
+       uint32_t data = 0;\r
+       NTSTATUS status = STATUS_SUCCESS;\r
+    \r
+       status = fw_set_bank(p_BusInterface, 0);\r
+       if ( status == STATUS_SUCCESS )\r
+               // Restore origin values\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dir,GPIO_DIR_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_pol,GPIO_POL_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_mod,GPIO_MOD_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dat,GPIO_DAT_L , 4);\r
+       if ( status == STATUS_SUCCESS )\r
+               // Free GPIO Semaphore\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, SEMAP63, 4);\r
+       return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_set_bank(\r
+       IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
+       IN               uint32_t bank )\r
+{\r
+       NTSTATUS  status = STATUS_SUCCESS;\r
+       uint32_t        data = ( (uint32_t)0x70 << 24 );\r
+       uint32_t        mask = ((bank >> (BANK_SHIFT-4)) << 24 );\r
+\r
+       status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATACLEAR_L, 4);\r
+       if (status == STATUS_SUCCESS)\r
+       {\r
+       // A1\r
+               data &= mask;\r
+               //data |= mask; // for A0\r
+               status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATASET_L, 4);\r
+       }\r
+       return status;\r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_driver.h b/trunk/hw/mthca/kernel/hca_driver.h
new file mode 100644 (file)
index 0000000..3099c4f
--- /dev/null
@@ -0,0 +1,227 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_driver.h 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#if !defined( _HCA_DRIVER_H_ )\r
+#define _HCA_DRIVER_H_\r
+\r
+\r
+#include <complib/cl_types.h>\r
+#include <complib/cl_pnp_po.h>\r
+#include <complib/cl_mutex.h>\r
+#include <iba/ib_ci_ifc.h>\r
+#include "hca_data.h"\r
+#include "mt_l2w.h"\r
+#include "hca_debug.h"\r
+\r
+\r
+#include "hca_pnp.h"\r
+#include "hca_pci.h"\r
+\r
+#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK\r
+#define FILE_DEVICE_INFINIBAND          0x0000003B\r
+#endif\r
+\r
+/****s* HCA/hca_reg_state_t\r
+* NAME\r
+*      hca_reg_state_t\r
+*\r
+* DESCRIPTION\r
+*      State for tracking registration with AL.  This state is independent of the\r
+*      device PnP state, and both are used to properly register with AL.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef enum _hca_reg_state\r
+{\r
+       HCA_SHUTDOWN,\r
+       HCA_ADDED,\r
+       HCA_STARTED,\r
+       HCA_REGISTERED\r
+\r
+}      hca_reg_state_t;\r
+/*\r
+* VALUES\r
+*      HCA_SHUTDOWN\r
+*              Cleaning up.\r
+*\r
+*      HCA_ADDED\r
+*              AddDevice was called and successfully registered for interface\r
+*              notifications.\r
+*\r
+*      HCA_STARTED\r
+*              IRP_MN_START_DEVICE was called.  The HCA is fully functional.\r
+*\r
+*      HCA_REGISTERED\r
+*              Fully functional and registered with the bus root.\r
+*********/\r
+\r
+\r
+typedef enum _hca_bar_type\r
+{\r
+       HCA_BAR_TYPE_HCR,\r
+       HCA_BAR_TYPE_UAR,\r
+       HCA_BAR_TYPE_DDR,\r
+       HCA_BAR_TYPE_MAX\r
+\r
+}      hca_bar_type_t;\r
+\r
+\r
+typedef struct _hca_bar\r
+{\r
+       uint64_t                        phys;\r
+       void                            *virt;\r
+       SIZE_T                          size;\r
+\r
+}      hca_bar_t;\r
+\r
+\r
+typedef struct _hca_dev_ext\r
+{\r
+       /* -------------------------------------------------\r
+       *               PNP DATA         \r
+       * ------------------------------------------------ */\r
+       cl_pnp_po_ext_t cl_ext;                                         /* COMPLIB PnP object */\r
+       void                                    *       pnp_ifc_entry;                  /* Notification entry for PnP interface events. */\r
+       void                                    *       pnp_target_entry;       /* Notification entry for PnP target events. */\r
+       PNP_DEVICE_STATE                        pnpState; /* state for PnP Manager */\r
+\r
+       /* -------------------------------------------------\r
+       *               POWER MANAGER DATA       \r
+       * ------------------------------------------------ */\r
+       /* Cache of the system to device power states. */\r
+       DEVICE_POWER_STATE              DevicePower[PowerSystemMaximum];\r
+       DEVICE_POWER_STATE              PowerState;                     /* state for Power Manager */\r
+       PIO_WORKITEM                                    pPoWorkItem;\r
+\r
+       /* -------------------------------------------------\r
+       *               IB_AL DATA       \r
+       * ------------------------------------------------ */\r
+       ib_ci_ifc_t                                                     ci_ifc;                         /* Interface for the lower edge of the IB_AL device. */\r
+       hca_reg_state_t                                 state;                          /* State for tracking registration with AL */\r
+       DEVICE_OBJECT                           *       p_al_dev;               /* IB_AL FDO */\r
+       FILE_OBJECT                                     *       p_al_file_obj;  /* IB_AL file object */\r
+\r
+       /* -------------------------------------------------\r
+       *               LOW LEVEL DRIVER' DATA   \r
+       * ------------------------------------------------ */\r
+       mlnx_hca_t                                                      hca;\r
+\r
+       /* -------------------------------------------------\r
+       *               OS DATA          \r
+       * ------------------------------------------------ */\r
+       hca_bar_t                                                       bar[HCA_BAR_TYPE_MAX];          /* HCA memory bars */\r
+       CM_PARTIAL_RESOURCE_DESCRIPTOR  interruptInfo;  /* HCA interrupt resources */\r
+       PKINTERRUPT                                             int_obj;                                                                                /* HCA interrupt object */\r
+       spinlock_t                                                      isr_lock;                                                                       /* lock for the ISR */\r
+       ULONG                                                                   bus_number;                                                     /* HCA's bus number */\r
+\r
+       /* -------------------------------------------------\r
+       *               VARIABLES        \r
+       * ------------------------------------------------ */\r
+       DMA_ADAPTER                             *       p_dma_adapter;          /* HCA adapter object */\r
+       ULONG                                                                   n_map_regs;                     /* num of allocated adapter map registers */\r
+       PCI_COMMON_CONFIG               hcaConfig;                              /* saved HCA PCI configuration header */\r
+       int                                                                             hca_hidden;                     /* flag: when set - no attached DDR memory */\r
+       \r
+}      hca_dev_ext_t;\r
+\r
+#define EXT_FROM_HOB(hob_p)                    (container_of(hob_p,  hca_dev_ext_t, hca.hob))\r
+#define IBDEV_FROM_HOB(hob_p)          (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev)\r
+#define HOBUL_FROM_HOB(hob_p)          (&EXT_FROM_HOB(hob_p)->hca.hobul)\r
+#define HOB_FROM_IBDEV(dev_p)          (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob\r
+\r
+\r
+\r
+\r
+/***********************************\r
+Firmware Update definitions\r
+***********************************/\r
+#define PCI_CONF_ADDR  (0x00000058)\r
+#define PCI_CONF_DATA  (0x0000005c)\r
+#define FLASH_OFFSET   (0x000f01a4)\r
+#define READ_BIT               (1<<29)\r
+#define WRITE_BIT              (2<<29)\r
+#define ADDR_MSK               (0x0007ffff)\r
+#define CMD_MASK               (0xe0000000)\r
+#define BANK_SHIFT             (19)\r
+#define BANK_MASK              (0xfff80000)\r
+#define MAX_FLASH_SIZE (0x80000) // 512K\r
+\r
+#define SEMAP63                                (0xf03fc)\r
+#define GPIO_DIR_L                     (0xf008c)\r
+#define GPIO_POL_L                     (0xf0094)\r
+#define GPIO_MOD_L                     (0xf009c)\r
+#define GPIO_DAT_L                     (0xf0084)\r
+#define GPIO_DATACLEAR_L       (0xf00d4)\r
+#define GPIO_DATASET_L         (0xf00dc)\r
+\r
+#define CPUMODE                                (0xf0150)\r
+#define CPUMODE_MSK                    (0xc0000000UL)\r
+#define CPUMODE_SHIFT          (30)\r
+\r
+/* Definitions intended to become shared with UM. Later... */\r
+#define FW_READ                        0x00\r
+#define FW_WRITE               0x01\r
+#define FW_READ_CMD            0x08\r
+#define FW_WRITE_CMD   0x09\r
+#define FW_OPEN_IF             0xe7\r
+#define FW_CLOSE_IF            0x7e\r
+\r
+#define FW_SIGNATURE           (0x5a445a44)\r
+#define FW_SECT_SIZE           (0x10000)\r
+\r
+static inline errno_to_iberr(int err)\r
+{\r
+#define MAP_ERR(err,ibstatus)  case err: ib_status = ibstatus; break\r
+       ib_api_status_t ib_status = IB_UNKNOWN_ERROR;\r
+       if (err < 0)\r
+               err = -err;\r
+       switch (err) {\r
+               MAP_ERR( ENOENT, IB_NOT_FOUND );\r
+               MAP_ERR( EINTR, IB_INTERRUPTED );\r
+               MAP_ERR( EAGAIN, IB_RESOURCE_BUSY );\r
+               MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY );\r
+               MAP_ERR( EACCES, IB_INVALID_PERMISSION );\r
+               MAP_ERR( EFAULT, IB_ERROR );\r
+               MAP_ERR( EBUSY, IB_RESOURCE_BUSY );\r
+               MAP_ERR( ENODEV, IB_UNSUPPORTED );\r
+               MAP_ERR( EINVAL, IB_INVALID_PARAMETER );\r
+               MAP_ERR( ENOSYS, IB_UNSUPPORTED );\r
+               default:\r
+                       //HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                       //      "Unmapped errno (%d)\n", err);\r
+                       break;\r
+       }\r
+       return ib_status;\r
+}\r
+\r
+#endif /* !defined( _HCA_DRIVER_H_ ) */\r
diff --git a/trunk/hw/mthca/kernel/hca_mcast.c b/trunk/hw/mthca/kernel/hca_mcast.c
new file mode 100644 (file)
index 0000000..5813b21
--- /dev/null
@@ -0,0 +1,170 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_mcast.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#include <iba/ib_ci.h>\r
+#include <complib/comp_lib.h>\r
+\r
+#include "hca_driver.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_mcast.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+\r
+/*\r
+*      Multicast Support Verbs.\r
+*/\r
+ib_api_status_t\r
+mlnx_attach_mcast (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+       IN              const   ib_gid_t                                        *p_mcast_gid,\r
+       IN              const   uint16_t                                        mcast_lid,\r
+               OUT                     ib_mcast_handle_t                       *ph_mcast,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+       mlnx_mcast_t *mcast_p;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM  ,("User mode is not supported yet\n"));\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
+               status = IB_UNSUPPORTED;\r
+               goto err_user_unsupported;\r
+       }\r
+       if (!p_mcast_gid || !ph_mcast) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_param;\r
+       }\r
+\r
+       // allocate structure\r
+       mcast_p = (mlnx_mcast_t*)kmalloc(sizeof *mcast_p, GFP_ATOMIC );\r
+       if (mcast_p == NULL) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_no_mem;\r
+       }\r
+       \r
+       // attach to mcast group\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               //TODO: call uverbs\r
+       }\r
+       else {\r
+               err = ibv_attach_mcast(ib_qp_p, (union ib_gid *)p_mcast_gid, (u16)mcast_lid);\r
+               if (err) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_attach_mcast failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_attach;\r
+               }\r
+       }\r
+\r
+       // fill the structure\r
+       mcast_p->ib_qp_p = ib_qp_p;\r
+       mcast_p->mcast_lid = mcast_lid;\r
+       RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid);\r
+       HCA_PRINT(TRACE_LEVEL_WARNING, HCA_DBG_SHIM, ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
+               mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
+               *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
+               *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+       \r
+       // return the result\r
+       if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p;\r
+\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+               \r
+err_attach: \r
+       kfree(mcast_p);\r
+err_no_mem:    \r
+err_invalid_param:\r
+err_user_unsupported:\r
+end:           \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_detach_mcast (\r
+       IN              const   ib_mcast_handle_t                       h_mcast)\r
+{\r
+       ib_api_status_t         status;\r
+       int err;\r
+       mlnx_mcast_t *mcast_p = (mlnx_mcast_t*)h_mcast;\r
+\r
+       // sanity check\r
+       if (!mcast_p || !mcast_p->ib_qp_p) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_param;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_WARNING, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
+               mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
+               *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
+               *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+       \r
+       // detach\r
+       if( mcast_p->ib_qp_p->ucontext) {\r
+               //TODO: call uverbs\r
+       }\r
+       else {\r
+               err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
+                       (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
+               if (err) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_detach_mcast failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_detach_mcast;\r
+               }\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_detach_mcast:\r
+       kfree(mcast_p);\r
+err_invalid_param:     \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_mcast_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->attach_mcast = mlnx_attach_mcast;\r
+       p_interface->detach_mcast = mlnx_detach_mcast;\r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_memory.c b/trunk/hw/mthca/kernel/hca_memory.c
new file mode 100644 (file)
index 0000000..96fbad0
--- /dev/null
@@ -0,0 +1,396 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_memory.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_utils.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_memory.tmh"\r
+#endif\r
+\r
+/*\r
+ *     Memory Management Verbs.\r
+ */\r
+\r
+ib_api_status_t\r
+mlnx_register_mr (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   ib_mr_create_t                          *p_mr_create,\r
+       OUT                     net32_t* const                          p_lkey,\r
+       OUT                     net32_t* const                          p_rkey,\r
+       OUT                     ib_mr_handle_t                          *ph_mr,\r
+       IN                              boolean_t                                       um_call )\r
+{\r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_mr *mr_p;\r
+       struct mthca_mr *mro_p;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+       \r
+       HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+       // sanity checks\r
+       if( !cl_is_blockable() ) {\r
+               status = IB_UNSUPPORTED;\r
+               goto err_unsupported;\r
+       } \r
+       if (!p_mr_create || 0 == p_mr_create->length) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid attributes"));\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_parm; \r
+       }\r
+       /*\r
+        * Local write permission is required if remote write or\r
+        * remote atomic permission is also requested.\r
+        */\r
+       if (p_mr_create->access_ctrl & (IB_AC_RDMA_WRITE | IB_AC_ATOMIC) &&\r
+           !(p_mr_create->access_ctrl & IB_AC_LOCAL_WRITE)) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid access rights"));\r
+               status = IB_INVALID_PERMISSION;\r
+               goto err_invalid_access; \r
+       }               \r
+\r
+#ifdef WIN_TO_BE_REMOVED\r
+       // lock buffer for user\r
+       if (um_call) {\r
+               err = iobuf_register(\r
+                       (UINT_PTR)p_mr_create->vaddr,\r
+                       p_mr_create->length,\r
+                       um_call,\r
+                       (int)p_mr_create->access_ctrl,\r
+                       &iobuf );\r
+               if (err) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("iobuf_register failed(%d)",err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_lock;\r
+               }\r
+       }\r
+       \r
+       // prepare parameters\r
+       RtlZeroMemory(&region, sizeof(region));\r
+       RtlZeroMemory(&umv_buf, sizeof(umv_buf));\r
+       region.user_base = (u64)p_mr_create->vaddr;\r
+       region.virt_base = (u64)p_mr_create->vaddr;\r
+       region.page_size = PAGE_SIZE;\r
+       region.length = p_mr_create->length;\r
+       region.offset = p_mr_create->vaddr & (PAGE_SIZE - 1);\r
+       //TODO: end filling region (add list of chunks)\r
+       //TODO: fill umv_buf\r
+#endif \r
+\r
+       // register mr \r
+       mr_p = ibv_reg_mr(ib_pd_p, map_qp_ibal_acl(p_mr_create->access_ctrl), \r
+               p_mr_create->vaddr, p_mr_create->length, \r
+               (uint64_t)(ULONG_PTR)(void*)p_mr_create->vaddr, um_call );\r
+       if (IS_ERR(mr_p)) {\r
+               err = PTR_ERR(mr_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,\r
+                       ("ibv_reg_mr failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_reg_mr;\r
+       }\r
+\r
+       // results\r
+       mro_p = (struct mthca_mr *)mr_p;\r
+#ifdef WIN_TO_BE_REMOVED\r
+       mro_p->iobuf = iobuf;\r
+#endif\r
+       *p_lkey = mr_p->lkey;\r
+       *p_rkey = mr_p->rkey;\r
+       if (ph_mr)      *ph_mr = (ib_mr_handle_t)mr_p;\r
+       status = IB_SUCCESS;\r
+\r
+err_reg_mr:\r
+#ifdef WIN_TO_BE_REMOVED\r
+       if (um_call) \r
+               iobuf_deregister(&iobuf );\r
+err_lock:\r
+#endif \r
+err_invalid_access:    \r
+err_invalid_parm:\r
+err_unsupported:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_register_pmr (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   ib_phys_create_t* const         p_pmr_create,\r
+       IN      OUT                     uint64_t* const                         p_vaddr,\r
+               OUT                     net32_t* const                          p_lkey,\r
+               OUT                     net32_t* const                          p_rkey,\r
+               OUT                     ib_mr_handle_t* const           ph_mr,\r
+       IN                              boolean_t                                       um_call )\r
+{\r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_mr *mr_p;\r
+       struct ib_phys_buf *buffer_list;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+       \r
+       UNUSED_PARAM( um_call );\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( !cl_is_blockable() ) {\r
+               status = IB_UNSUPPORTED;\r
+               goto err_unsupported;\r
+       }       \r
+       if (!p_vaddr || !p_pmr_create ||\r
+               0 == p_pmr_create->length ) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_parm; \r
+       }\r
+\r
+       // prepare parameters\r
+       buffer_list = (void*)p_pmr_create->range_array;\r
+       //NB: p_pmr_create->buf_offset is not used, i.e. supposed that region is page-aligned\r
+       //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same\r
+       \r
+       // register pmr \r
+       if (p_pmr_create->length == (uint64_t)-1LL) \r
+               mr_p = ibv_get_dma_mr(ib_pd_p,  map_qp_ibal_acl(p_pmr_create->access_ctrl) );\r
+       else\r
+               mr_p = ibv_reg_phys_mr(ib_pd_p, buffer_list, p_pmr_create->num_ranges, \r
+                       map_qp_ibal_acl(p_pmr_create->access_ctrl), p_vaddr );\r
+       if (IS_ERR(mr_p)) {\r
+               err = PTR_ERR(mr_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,\r
+                       ("mthca_reg_phys_mr failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_reg_phys_mr;\r
+       }\r
+\r
+       // results\r
+       if (ph_mr)      *ph_mr = (ib_mr_handle_t)mr_p;\r
+       *p_lkey = mr_p->lkey;\r
+       *p_rkey = mr_p->rkey;\r
+       //NB:  p_vaddr was not changed\r
+       status = IB_SUCCESS;\r
+\r
+err_reg_phys_mr:\r
+err_invalid_parm:\r
+err_unsupported:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+       \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_mr (\r
+       IN              const   ib_mr_handle_t                          h_mr,\r
+               OUT                     ib_mr_attr_t                            *p_mr_query )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mr);\r
+       UNREFERENCED_PARAMETER(p_mr_query);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_query_mr not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_mr (\r
+       IN              const   ib_mr_handle_t                          h_mr,\r
+       IN              const   ib_mr_mod_t                                     mem_modify_req,\r
+       IN              const   ib_mr_create_t                          *p_mr_create,\r
+               OUT                     uint32_t                                        *p_lkey,\r
+               OUT                     uint32_t                                        *p_rkey,\r
+       IN              const   ib_pd_handle_t                          h_pd OPTIONAL,\r
+       IN                              boolean_t                                       um_call )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mr);\r
+       UNREFERENCED_PARAMETER(mem_modify_req);\r
+       UNREFERENCED_PARAMETER(p_mr_create);\r
+       UNREFERENCED_PARAMETER(p_lkey);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       UNREFERENCED_PARAMETER(h_pd);\r
+       UNREFERENCED_PARAMETER(um_call);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_modify_mr not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_pmr (\r
+       IN              const   ib_mr_handle_t                          h_mr,\r
+       IN              const   ib_mr_mod_t                                     mem_modify_req,\r
+       IN              const   ib_phys_create_t* const         p_pmr_create,\r
+       IN      OUT                     uint64_t* const                         p_vaddr,\r
+               OUT                     uint32_t* const                         p_lkey,\r
+               OUT                     uint32_t* const                         p_rkey,\r
+       IN              const   ib_pd_handle_t                          h_pd OPTIONAL,\r
+       IN                              boolean_t                                       um_call )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mr);\r
+       UNREFERENCED_PARAMETER(mem_modify_req);\r
+       UNREFERENCED_PARAMETER(p_pmr_create);\r
+       UNREFERENCED_PARAMETER(p_vaddr);\r
+       UNREFERENCED_PARAMETER(p_lkey);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       UNREFERENCED_PARAMETER(h_pd);\r
+       UNREFERENCED_PARAMETER(um_call);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_modify_pmr not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_register_smr (\r
+       IN              const   ib_mr_handle_t                          h_mr,\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   ib_access_t                                     access_ctrl,\r
+       IN      OUT                     uint64_t* const                         p_vaddr,\r
+               OUT                     net32_t* const                          p_lkey,\r
+               OUT                     net32_t* const                          p_rkey,\r
+               OUT                     ib_mr_handle_t* const           ph_mr,\r
+       IN                              boolean_t                                       um_call )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mr);\r
+       UNREFERENCED_PARAMETER(h_pd);\r
+       UNREFERENCED_PARAMETER(access_ctrl);\r
+       UNREFERENCED_PARAMETER(p_vaddr);\r
+       UNREFERENCED_PARAMETER(p_lkey);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       UNREFERENCED_PARAMETER(ph_mr);\r
+       UNREFERENCED_PARAMETER(um_call);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_register_smr not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_deregister_mr (\r
+       IN              const   ib_mr_handle_t                          h_mr)\r
+{\r
+       ib_api_status_t         status;\r
+       int err;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( !cl_is_blockable() ) {\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_unsupported;\r
+       } \r
+\r
+#ifdef WIN_TO_BE_REMOVED       \r
+       // unlock user buffer\r
+       {\r
+               struct mthca_mr *mro_p = (struct mthca_mr *)h_mr;\r
+               if (mro_p->ibmr.uobject) \r
+                       iobuf_deregister( &mro_p->iobuf );\r
+       }\r
+#endif \r
+\r
+       // deregister   \r
+       err = ibv_dereg_mr((struct ib_mr *)h_mr);\r
+       if (err) {\r
+               status = errno_to_iberr(err);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY, \r
+                       ("mthca_dereg_mr failed (%d)", status));\r
+               goto err_dereg_mr;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_dereg_mr:\r
+err_unsupported:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+               \r
+}\r
+\r
+/*\r
+*      Memory Window Verbs.\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_create_mw (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+               OUT                     net32_t* const                          p_rkey,\r
+               OUT                     ib_mw_handle_t                          *ph_mw,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       UNREFERENCED_PARAMETER(h_pd);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       UNREFERENCED_PARAMETER(ph_mw);\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_create_mw not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_mw (\r
+       IN              const   ib_mw_handle_t                          h_mw,\r
+               OUT                     ib_pd_handle_t                          *ph_pd,\r
+               OUT                     net32_t* const                          p_rkey,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       UNREFERENCED_PARAMETER(h_mw);\r
+       UNREFERENCED_PARAMETER(ph_pd);\r
+       UNREFERENCED_PARAMETER(p_rkey);\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_query_mw not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_mw (\r
+       IN              const   ib_mw_handle_t                          h_mw)\r
+{\r
+       UNREFERENCED_PARAMETER(h_mw);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_MEMORY  ,("mlnx_destroy_mw not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+void\r
+mlnx_memory_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->register_mr = mlnx_register_mr;\r
+       p_interface->register_pmr = mlnx_register_pmr;\r
+       p_interface->query_mr = mlnx_query_mr;\r
+       p_interface->modify_mr = mlnx_modify_mr;\r
+       p_interface->modify_pmr = mlnx_modify_pmr;\r
+       p_interface->register_smr = mlnx_register_smr;\r
+       p_interface->deregister_mr = mlnx_deregister_mr;\r
+\r
+       p_interface->create_mw = mlnx_create_mw;\r
+       p_interface->query_mw = mlnx_query_mw;\r
+       p_interface->destroy_mw = mlnx_destroy_mw;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/kernel/hca_pci.c b/trunk/hw/mthca/kernel/hca_pci.c
new file mode 100644 (file)
index 0000000..40342e4
--- /dev/null
@@ -0,0 +1,760 @@
+\r
+#include "hca_driver.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_pci.tmh"\r
+#endif\r
+#include <complib/cl_thread.h>\r
+#include <initguid.h>\r
+#include <wdmguid.h>\r
+#ifdef WIN_TO_BE_CHANGED\r
+#include <iba/hca_br_ifc.h>\r
+#endif\r
+\r
+#define HCA_RESET_HCR_OFFSET                           0x000F0010\r
+#define HCA_RESET_TOKEN                                                CL_HTON32(0x00000001)\r
+\r
+#define PCI_CAPABILITY_ID_VPD                          0x03\r
+#define PCI_CAPABILITY_ID_PCIX                         0x07\r
+#define PCI_CAPABILITY_ID_PCIEXP                       0x10\r
+\r
+boolean_t\r
+FindBridgeIf(\r
+       IN hca_dev_ext_t                *pi_ext,\r
+       IN      PBUS_INTERFACE_STANDARD pi_pInterface\r
+       );\r
+\r
+\r
+/*\r
+ * Vital Product Data Capability\r
+ */\r
+typedef struct _PCI_VPD_CAPABILITY {\r
+\r
+       PCI_CAPABILITIES_HEADER Header;\r
+\r
+       USHORT          Flags;\r
+       ULONG                   Data;\r
+\r
+} PCI_VPD_CAPABILITY, *PPCI_VPD_CAPABILITY;\r
+\r
+\r
+/*\r
+ * PCI-X Capability\r
+ */\r
+typedef struct _PCI_PCIX_CAPABILITY {\r
+\r
+       PCI_CAPABILITIES_HEADER Header;\r
+\r
+       USHORT          Command;\r
+       ULONG                   Status;\r
+\r
+/* for Command: */\r
+} PCI_PCIX_CAPABILITY, *PPCI_PCIX_CAPABILITY;\r
+\r
+#define  PCI_X_CMD_MAX_READ     0x000c  /* Max Memory Read Byte Count */\r
+\r
+/*\r
+ * PCI-Express Capability\r
+ */\r
+typedef struct _PCI_PCIEXP_CAPABILITY {\r
+\r
+       PCI_CAPABILITIES_HEADER Header;\r
+\r
+       USHORT          Flags;\r
+       ULONG                   DevCapabilities;\r
+       USHORT          DevControl;\r
+       USHORT          DevStatus;\r
+       ULONG                   LinkCapabilities;\r
+       USHORT          LinkControl;\r
+       USHORT          LinkStatus;\r
+       ULONG                   SlotCapabilities;\r
+       USHORT          SlotControl;\r
+       USHORT          SlotStatus;\r
+       USHORT          RootControl;\r
+       USHORT          RootCapabilities;\r
+       USHORT          RootStatus;\r
+} PCI_PCIEXP_CAPABILITY, *PPCI_PCIEXP_CAPABILITY;\r
+\r
+/* for DevControl: */\r
+#define  PCI_EXP_DEVCTL_READRQ  0x7000  /* Max_Read_Request_Size */\r
+\r
+static NTSTATUS\r
+__get_bus_ifc(\r
+       IN                              DEVICE_OBJECT* const            pDevObj,\r
+       IN              const   GUID* const                                     pGuid,\r
+               OUT                     BUS_INTERFACE_STANDARD          *pBusIfc );\r
+\r
+static void\r
+__fixup_pci_capabilities(\r
+       IN                              PCI_COMMON_CONFIG* const        pConfig );\r
+\r
+static NTSTATUS\r
+__save_pci_config(\r
+       IN                              BUS_INTERFACE_STANDARD          *pBusIfc,\r
+               OUT                     PCI_COMMON_CONFIG* const        pConfig );\r
+\r
+static NTSTATUS\r
+__restore_pci_config(\r
+       IN                              BUS_INTERFACE_STANDARD          *pBusIfc,\r
+       IN                              PCI_COMMON_CONFIG* const        pConfig );\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, __get_bus_ifc)\r
+#pragma alloc_text (PAGE, __fixup_pci_capabilities)\r
+#pragma alloc_text (PAGE, __save_pci_config)\r
+#pragma alloc_text (PAGE, __restore_pci_config)\r
+#endif\r
+\r
+/* Forwards the request to the HCA's PDO. */\r
+static NTSTATUS\r
+__get_bus_ifc(\r
+       IN                              DEVICE_OBJECT* const            pDevObj,\r
+       IN              const   GUID* const                                     pGuid,\r
+               OUT                     BUS_INTERFACE_STANDARD          *pBusIfc )\r
+{\r
+       NTSTATUS                        status;\r
+       IRP                                     *pIrp;\r
+       IO_STATUS_BLOCK         ioStatus;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       DEVICE_OBJECT           *pDev;\r
+       KEVENT                          event;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL );\r
+\r
+       pDev = IoGetAttachedDeviceReference( pDevObj );\r
+\r
+       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+       /* Build the IRP for the HCA. */\r
+       pIrp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, pDev,\r
+               NULL, 0, NULL, &event, &ioStatus );\r
+       if( !pIrp )\r
+       {\r
+               ObDereferenceObject( pDev );\r
+               HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+                       ("IoBuildSynchronousFsdRequest failed.\n"));\r
+               return STATUS_INSUFFICIENT_RESOURCES;\r
+       }\r
+\r
+       /* Copy the request query parameters. */\r
+       pIoStack = IoGetNextIrpStackLocation( pIrp );\r
+       pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+       pIoStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD);\r
+       pIoStack->Parameters.QueryInterface.Version = 1;\r
+       pIoStack->Parameters.QueryInterface.InterfaceType = pGuid;\r
+       pIoStack->Parameters.QueryInterface.Interface = (INTERFACE*)pBusIfc;\r
+       pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+\r
+       pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+\r
+       /* Send the IRP. */\r
+       status = IoCallDriver( pDev, pIrp );\r
+       if( status == STATUS_PENDING )\r
+       {\r
+               KeWaitForSingleObject( &event, Executive, KernelMode,\r
+                       FALSE, NULL );\r
+\r
+               status = ioStatus.Status;\r
+       }\r
+       ObDereferenceObject( pDev );\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+/*\r
+ * Reads and saves the PCI configuration of the device accessible\r
+ * through the provided bus interface.  Does not read registers 22 or 23\r
+ * as directed in Tavor PRM 1.0.1, Appendix A. InfiniHost Software Reset.\r
+ */\r
+static NTSTATUS\r
+__save_pci_config(\r
+       IN                              BUS_INTERFACE_STANDARD          *pBusIfc,\r
+               OUT                     PCI_COMMON_CONFIG* const        pConfig )\r
+{\r
+       ULONG                                   len;\r
+       UINT32                                  *pBuf;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+       \r
+       pBuf = (UINT32*)pConfig;\r
+\r
+       /*\r
+        * Read the lower portion of the configuration, up to but excluding\r
+        * register 22.\r
+        */\r
+       len = pBusIfc->GetBusData(\r
+               pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[0], 0, 88 );\r
+       if( len != 88 )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  , HCA_DBG_PNP  ,("Failed to read HCA config.\n"));\r
+               return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+       /* Read the upper portion of the configuration, from register 24. */\r
+       len = pBusIfc->GetBusData(\r
+               pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[24], 96, 160 );\r
+       if( len != 160 )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_PNP  ,("Failed to read HCA config.\n"));\r
+               return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static void\r
+__fixup_pci_capabilities(\r
+       IN                              PCI_COMMON_CONFIG* const        pConfig )\r
+{\r
+       UCHAR                                           *pBuf;\r
+       PCI_CAPABILITIES_HEADER         *pHdr, *pNextHdr;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pBuf = (UCHAR*)pConfig;\r
+\r
+       if( pConfig->HeaderType == PCI_DEVICE_TYPE )\r
+       {\r
+               if( pConfig->u.type0.CapabilitiesPtr )\r
+               {\r
+                       pNextHdr = (PCI_CAPABILITIES_HEADER*)\r
+                               (pBuf + pConfig->u.type0.CapabilitiesPtr);\r
+               }\r
+               else\r
+               {\r
+                       pNextHdr = NULL;\r
+               }\r
+       }\r
+       else\r
+       {\r
+               ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE );\r
+               if( pConfig->u.type1.CapabilitiesPtr )\r
+               {\r
+                       pNextHdr = (PCI_CAPABILITIES_HEADER*)\r
+                               (pBuf + pConfig->u.type1.CapabilitiesPtr);\r
+               }\r
+               else\r
+               {\r
+                       pNextHdr = NULL;\r
+               }\r
+       }\r
+\r
+       /*\r
+        * Fix up any fields that might cause changes to the\r
+        * device - like writing VPD data.\r
+        */\r
+       while( pNextHdr )\r
+       {\r
+               pHdr = pNextHdr;\r
+               if( pNextHdr->Next )\r
+                       pNextHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next);\r
+               else\r
+                       pNextHdr = NULL;\r
+\r
+               switch( pHdr->CapabilityID )\r
+               {\r
+               case PCI_CAPABILITY_ID_VPD:\r
+                       /* Clear the flags field so we don't cause a write. */\r
+                       ((PCI_VPD_CAPABILITY*)pHdr)->Flags = 0;\r
+                       break;\r
+\r
+               default:\r
+                       break;\r
+               }\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+#define PCI_CONFIG_OFFSET( field )                             \\r
+       offsetof( PCI_COMMON_CONFIG, field )\r
+\r
+#define PCI_CONFIG_LEN( fromField, toField )   \\r
+       offsetof( PCI_COMMON_CONFIG, toField ) -        \\r
+       offsetof( PCI_COMMON_CONFIG, fromField ) +      \\r
+       sizeof( ((PCI_COMMON_CONFIG*)NULL)->##toField )\r
+\r
+#define PCI_CONFIG_WRITE( fromField, toField )         \\r
+       pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG,           \\r
+       &pConfig->##fromField, PCI_CONFIG_OFFSET( fromField ),                  \\r
+       PCI_CONFIG_LEN( fromField, toField ) )\r
+\r
+/*\r
+ * Restore saved PCI configuration, skipping registers 22 and 23, as well\r
+ * as any registers where writing will have side effects such as the flags\r
+ * field of the VPD and vendor specific capabilities.  The function also delays\r
+ * writing the command register, bridge control register (if applicable), and\r
+ * PCIX command register (if present).\r
+ */\r
+static NTSTATUS\r
+__restore_pci_config(\r
+       IN                              BUS_INTERFACE_STANDARD          *pBusIfc,\r
+       IN                              PCI_COMMON_CONFIG* const        pConfig )\r
+{\r
+       ULONG                                           len;\r
+       UCHAR                                           *pBuf;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pBuf = (UCHAR*)pConfig;\r
+\r
+       /* Fixup the capabilities as needed. */\r
+       __fixup_pci_capabilities( pConfig );\r
+\r
+       /* Restore the vendor/device IDs */\r
+       len = PCI_CONFIG_WRITE( VendorID, DeviceID );\r
+       if( len != PCI_CONFIG_LEN( VendorID, DeviceID ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write vendor/device IDs.\n"));\r
+               return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+       /*\r
+        * Skip the command register and write the rest (except the bridge\r
+        * control if this is a bridge).\r
+        */\r
+       if( pConfig->HeaderType == PCI_DEVICE_TYPE )\r
+       {\r
+               len = PCI_CONFIG_WRITE( Status, u.type0.MaximumLatency );\r
+               if( len != PCI_CONFIG_LEN( Status, u.type0.MaximumLatency ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_PNP  ,("Failed to write type 0 common header.\n"));\r
+                       return STATUS_DEVICE_NOT_READY;\r
+               }\r
+       }\r
+       else\r
+       {\r
+               ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE );\r
+               len = PCI_CONFIG_WRITE( Status, u.type1.InterruptPin );\r
+               if( len != PCI_CONFIG_LEN( Status, u.type1.InterruptPin ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_PNP  ,("Failed to write type 1 common header.\n"));\r
+                       return STATUS_DEVICE_NOT_READY;\r
+               }\r
+       }\r
+\r
+       /* Write the capabilities back. */\r
+       len = pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG,\r
+               pConfig->DeviceSpecific, PCI_CONFIG_OFFSET( DeviceSpecific ), 192 );\r
+       if( len != 192 )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write capabilites.\n"));\r
+               return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+       /* Write the command register. */\r
+       len = PCI_CONFIG_WRITE( Command, Command );\r
+       if( len != PCI_CONFIG_LEN( Command, Command ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Failed to write command register.\n"));\r
+               return STATUS_DEVICE_NOT_READY;\r
+       }\r
+\r
+       /* Write the bridge control register if a bridge. */\r
+       if( pConfig->HeaderType == PCI_BRIDGE_TYPE )\r
+       {\r
+               len =\r
+                       PCI_CONFIG_WRITE( u.type1.BridgeControl, u.type1.BridgeControl );\r
+               if( len !=\r
+                       PCI_CONFIG_LEN( u.type1.BridgeControl, u.type1.BridgeControl ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Failed to write bridge control register.\n"));\r
+                       return STATUS_DEVICE_NOT_READY;\r
+               }\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+NTSTATUS\r
+hca_reset( DEVICE_OBJECT* const                pDevObj, int is_tavor )\r
+{\r
+       NTSTATUS                                status = STATUS_SUCCESS;\r
+       PCI_COMMON_CONFIG               hcaConfig, brConfig;\r
+       BUS_INTERFACE_STANDARD  hcaBusIfc;\r
+       BUS_INTERFACE_STANDARD  brBusIfc = {0}; // to bypass C4701\r
+       hca_dev_ext_t                   *pExt = (hca_dev_ext_t*)pDevObj->DeviceExtension;\r
+       ULONG                                   data, i;\r
+       PULONG  reset_p;\r
+       PHYSICAL_ADDRESS  pa;\r
+       static int skip = 1;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+       if (skip) goto resetErr1;\r
+\r
+       /* Get the HCA's bus interface. */\r
+       status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Failed to get HCA bus interface.\n"));\r
+               goto resetErr1;\r
+       }\r
+\r
+       if (is_tavor) {\r
+#if 0          \r
+               /* Get the HCA Bridge's bus interface. */\r
+               status = __get_bus_ifc( pDevObj, &GUID_HCA_BRIDGE_INTERFACE, &brBusIfc );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Failed to get HCA bridge bus interface.\n"));\r
+                       goto resetErr2;\r
+               }\r
+#else\r
+               if (!FindBridgeIf( pExt, &brBusIfc ))\r
+                       goto resetErr2;\r
+#endif\r
+       }\r
+\r
+       /* Save the HCA's configuration. */\r
+       status = __save_pci_config( &hcaBusIfc, &hcaConfig );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+                       ("Failed to save HCA config.\n"));\r
+               goto resetErr3;\r
+       }\r
+\r
+       if (is_tavor) {\r
+               /* Save the HCA bridge's configuration. */\r
+               status = __save_pci_config( &brBusIfc, &brConfig );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+                               ("Failed to save bridge config.\n"));\r
+                       goto resetErr3;\r
+               }\r
+       }\r
+       \r
+       /* map reset register */\r
+       pa.QuadPart = pExt->bar[HCA_BAR_TYPE_HCR].phys + HCA_RESET_HCR_OFFSET;\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP  ,("Mapping reset register with address 0x%I64x\n", pa.QuadPart));\r
+       reset_p = MmMapIoSpace( pa,     4, MmNonCached );\r
+       if( !reset_p )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_PNP  ,("Failed to map reset register with address 0x%I64x\n", pa.QuadPart));\r
+               status = STATUS_UNSUCCESSFUL;\r
+               goto resetErr3;\r
+       }\r
+       \r
+       /* Issue the reset. */\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP  ,("Resetting  the chip ...\n"));\r
+       WRITE_REGISTER_ULONG( reset_p, HCA_RESET_TOKEN );\r
+\r
+       /* Wait a second. */\r
+       cl_thread_suspend( 1000 );\r
+\r
+       /* unmap the reset register */\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP  ,("Unmapping reset register \n"));\r
+       MmUnmapIoSpace( reset_p, 4 );\r
+\r
+       \r
+       if (is_tavor) {\r
+               /*\r
+                * Now read the bridge's configuration register until it doesn't\r
+                * return 0xFFFFFFFF.  Give it 10 seconds for good measure.\r
+                */\r
+               HCA_PRINT( TRACE_LEVEL_INFORMATION  ,HCA_DBG_PNP  ,("Read the Bridge's configuration register \n"));\r
+               for( i = 0; i < 10; i++ )\r
+               {\r
+                       if( brBusIfc.GetBusData( brBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+                               &data, 0, sizeof(ULONG) ) != sizeof(ULONG) )\r
+                       {\r
+                               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                                       ("Failed to read bridge configuration data.\n"));\r
+                               status = STATUS_UNSUCCESSFUL;\r
+                               goto resetErr3;\r
+                       }\r
+                       /* See if we got valid data. */\r
+                       if( data != 0xFFFFFFFF )\r
+                               break;\r
+\r
+                       cl_thread_suspend( 1000 );\r
+               }       \r
+               if( i == 10 )\r
+               {\r
+                       /* Darn, timed out. :( */\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Doh! HCA Bridge never came back from reset!\n"));\r
+                       status = STATUS_UNSUCCESSFUL;\r
+                       goto resetErr3;\r
+               }\r
+       }       \r
+\r
+       else {\r
+               /*\r
+                * Now read the HCA's configuration register until it doesn't\r
+                * return 0xFFFFFFFF.  Give it 10 seconds for good measure.\r
+                */\r
+               HCA_PRINT( TRACE_LEVEL_INFORMATION  ,HCA_DBG_PNP  ,("Read the HCA's configuration register \n"));\r
+               for( i = 0; i < 100; i++ )\r
+               {\r
+                       if( hcaBusIfc.GetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+                               &data, 0, sizeof(ULONG) ) != sizeof(ULONG) )\r
+                       {\r
+                               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                                       ("Failed to read HCA configuration data.\n"));\r
+                               status = STATUS_UNSUCCESSFUL;\r
+                               goto resetErr3;\r
+                       }\r
+                       /* See if we got valid data. */\r
+                       if( data != 0xFFFFFFFF )\r
+                               break;\r
+\r
+                       cl_thread_suspend( 100 );\r
+               } \r
+               if( i >= 100 )\r
+               {\r
+                       /* Darn, timed out. :( */\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Doh! HCA Bridge never came back from reset!\n"));\r
+                       status = STATUS_UNSUCCESSFUL;\r
+                       goto resetErr3;\r
+               }\r
+       }\r
+       \r
+       if (is_tavor) {\r
+               /* Restore the HCA's bridge configuration. */\r
+               HCA_PRINT( TRACE_LEVEL_INFORMATION  ,HCA_DBG_PNP  ,("Restoring bridge PCI configuration \n"));\r
+               status = __restore_pci_config( &brBusIfc, &brConfig );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Failed to restore bridge config.\n"));\r
+                       goto resetErr3;\r
+               }\r
+       }\r
+       \r
+       /* Restore the HCA's configuration. */\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION  ,HCA_DBG_PNP  ,("Restoring HCA PCI configuration \n"));\r
+       status = __restore_pci_config( &hcaBusIfc, &hcaConfig );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("Failed to restore HCA config.\n"));\r
+       }\r
+\r
+resetErr3:\r
+       if (is_tavor) \r
+               brBusIfc.InterfaceDereference( brBusIfc.Context );\r
+\r
+resetErr2:\r
+       hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+\r
+resetErr1:\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+/*\r
+ * Returns the offset in configuration space of the PCI-X capabilites.\r
+ */\r
+static ULONG\r
+__FindCapability(\r
+       IN                              PCI_COMMON_CONFIG* const        pConfig,  \r
+       IN                              char cap_id\r
+       )\r
+{\r
+       ULONG                                           offset = 0;\r
+       UCHAR                                           *pBuf;\r
+       PCI_CAPABILITIES_HEADER         *pHdr;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pBuf = (UCHAR*)pConfig;\r
+\r
+       ASSERT( pConfig->HeaderType == PCI_DEVICE_TYPE );\r
+\r
+       if( pConfig->u.type0.CapabilitiesPtr )\r
+       {\r
+               pHdr = (PCI_CAPABILITIES_HEADER*)\r
+                       (pBuf + pConfig->u.type0.CapabilitiesPtr);\r
+       }\r
+       else\r
+       {\r
+               pHdr = NULL;\r
+       }\r
+\r
+       /*\r
+        * Fix up any fields that might cause changes to the\r
+        * device - like writing VPD data.\r
+        */\r
+       while( pHdr )\r
+       {\r
+               if( pHdr->CapabilityID == cap_id )\r
+               {\r
+                       offset = (UCHAR)(((ULONG_PTR)pHdr) - ((ULONG_PTR)pConfig));\r
+                       break;\r
+               }\r
+\r
+               if( pHdr->Next )\r
+                       pHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next);\r
+               else\r
+                       pHdr = NULL;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return offset;\r
+}\r
+\r
+\r
+/*\r
+ * Tunes PCI configuration as described in 13.3.2 in the Tavor PRM.\r
+ */\r
+NTSTATUS\r
+hca_tune_pci(\r
+       IN                              DEVICE_OBJECT* const            pDevObj )\r
+{\r
+       NTSTATUS                                status;\r
+       PCI_COMMON_CONFIG               hcaConfig;\r
+       BUS_INTERFACE_STANDARD  hcaBusIfc;\r
+       ULONG                                   len;\r
+       ULONG                                   capOffset;\r
+       PCI_PCIX_CAPABILITY             *pPciXCap;\r
+       PCI_PCIEXP_CAPABILITY   *pPciExpCap;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       /* Get the HCA's bus interface. */\r
+       status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to get HCA bus interface.\n"));\r
+               return status;\r
+       }\r
+\r
+       /* Save the HCA's configuration. */\r
+       status = __save_pci_config( &hcaBusIfc, &hcaConfig );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("Failed to save HCA config.\n"));\r
+               status = STATUS_UNSUCCESSFUL;\r
+               goto tweakErr;\r
+       }\r
+       status = 0;\r
+\r
+       /*\r
+       *               PCIX Capability\r
+       */\r
+       capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIX );\r
+       if( capOffset )\r
+       {\r
+               pPciXCap = (PCI_PCIX_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset);\r
+               /* Update the command field to max the read byte count if needed. */\r
+               if( (pPciXCap->Command & 0x000C) != 0x000C )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP,\r
+                               ("Updating max recv byte count of PCI-X capability.\n"));\r
+                       pPciXCap->Command = (pPciXCap->Command & ~PCI_X_CMD_MAX_READ) | (3 << 2);\r
+                       len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+                               &pPciXCap->Command,\r
+                               capOffset + offsetof( PCI_PCIX_CAPABILITY, Command),\r
+                               sizeof( pPciXCap->Command ) );\r
+                       if( len != sizeof( pPciXCap->Command ) )\r
+                       {\r
+                               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                                       ("Failed to update PCI-X maximum read byte count.\n"));\r
+                               status = STATUS_UNSUCCESSFUL;\r
+                               goto tweakErr;\r
+                       }\r
+               }\r
+       }\r
+\r
+\r
+       /*\r
+       *       PCI Express Capability\r
+       */\r
+       capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIEXP );\r
+       if( capOffset )\r
+       {\r
+               pPciExpCap = (PCI_PCIEXP_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset);\r
+               \r
+               /* Update Max_Read_Request_Size. */\r
+               HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+                       ("Updating max recv byte count of PCI-X capability.\n"));\r
+               pPciExpCap->DevControl = (pPciExpCap->DevControl & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);\r
+               len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+                       &pPciExpCap->DevControl,\r
+                       capOffset + offsetof( PCI_PCIEXP_CAPABILITY, DevControl),\r
+                       sizeof( pPciExpCap->DevControl ) );\r
+               if( len != sizeof( pPciExpCap->DevControl ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Failed to update PCI-Exp maximum read byte count.\n"));\r
+                       goto tweakErr;\r
+               }\r
+       }\r
+\r
+\r
+tweakErr:\r
+       hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+/* leo */\r
+\r
+NTSTATUS\r
+hca_enable_pci(\r
+       IN                              DEVICE_OBJECT* const            pDevObj,\r
+       OUT                     PCI_COMMON_CONFIG*      pHcaConfig\r
+       )\r
+{\r
+               NTSTATUS                                status;\r
+               BUS_INTERFACE_STANDARD  hcaBusIfc;\r
+               ULONG                           len;\r
+       \r
+               HCA_ENTER( HCA_DBG_PNP );\r
+       \r
+               /* Get the HCA's bus interface. */\r
+               status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR  , HCA_DBG_PNP  ,("Failed to get HCA bus interface.\n"));\r
+                       return STATUS_DEVICE_NOT_READY;\r
+               }\r
+       \r
+               /* Save the HCA's configuration. */\r
+               status = __save_pci_config( &hcaBusIfc, pHcaConfig );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+                               ("Failed to save HCA config.\n"));\r
+                       goto pciErr;\r
+               }\r
+\r
+               /* fix command register (set PCI Master bit) */\r
+               // NOTE: we change here the saved value of the command register\r
+               pHcaConfig->Command |= 7;\r
+          len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+               (PVOID)&pHcaConfig->Command , 4, sizeof(ULONG) ); \r
+               if( len != sizeof(ULONG) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_PNP  ,("Failed to write command register.\n"));\r
+                       status = STATUS_DEVICE_NOT_READY;\r
+                       goto pciErr;\r
+               }\r
+\r
+       pciErr:\r
+               hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+       \r
+               HCA_EXIT( HCA_DBG_PNP );\r
+               return status;\r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_pci.h b/trunk/hw/mthca/kernel/hca_pci.h
new file mode 100644 (file)
index 0000000..6393414
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef HCI_PCI_H
+#define HCI_PCI_H
+
+
+NTSTATUS
+hca_reset(
+       IN                              DEVICE_OBJECT* const            pDevObj, int is_tavor );
+
+NTSTATUS
+hca_enable_pci(
+       IN                              DEVICE_OBJECT* const            pDevObj,
+       OUT                     PCI_COMMON_CONFIG*      pHcaConfig
+       );
+
+NTSTATUS
+hca_tune_pci(
+       IN                              DEVICE_OBJECT* const            pDevObj );
+
+#endif
diff --git a/trunk/hw/mthca/kernel/hca_pnp.c b/trunk/hw/mthca/kernel/hca_pnp.c
new file mode 100644 (file)
index 0000000..216add0
--- /dev/null
@@ -0,0 +1,1543 @@
+/* BEGIN_ICS_COPYRIGHT ****************************************\r
+** END_ICS_COPYRIGHT   ****************************************/\r
+\r
+/*\r
+       $Revision: 1.1 $\r
+*/\r
+\r
+\r
+/*\r
+ * Provides the driver entry points for the Tavor VPD.\r
+ */\r
+\r
+#include "hca_driver.h"\r
+#include "mthca_dev.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_pnp.tmh"\r
+#endif\r
+#include "mthca.h"\r
+#include <initguid.h>\r
+#include <wdmguid.h>\r
+\r
+extern const char *mthca_version;\r
+\r
+\r
+static NTSTATUS\r
+hca_start(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_cancel_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static void\r
+hca_release_resources(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj );\r
+\r
+static NTSTATUS\r
+hca_cancel_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_surprise_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_capabilities(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_pnp_state(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_bus_relations(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_removal_relations(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_query_power(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp,\r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static NTSTATUS\r
+hca_set_power(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action );\r
+\r
+static ci_interface_t*\r
+__alloc_hca_ifc(\r
+       IN                              hca_dev_ext_t* const            p_ext );\r
+\r
+static NTSTATUS\r
+__get_ci_interface(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj );\r
+\r
+static NTSTATUS\r
+__hca_register(\r
+       IN                              DEVICE_OBJECT                           *p_dev_obj );\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+       IN                              void                                            *pNotifyStruct,\r
+       IN                              void                                            *context );\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+       IN                              void                                            *pNotifyStruct,\r
+       IN                              void                                            *context );\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, hca_add_device)\r
+#pragma alloc_text (PAGE, hca_start)\r
+#pragma alloc_text (PAGE, hca_query_stop)\r
+#pragma alloc_text (PAGE, hca_stop)\r
+#pragma alloc_text (PAGE, hca_cancel_stop)\r
+#pragma alloc_text (PAGE, hca_query_remove)\r
+#pragma alloc_text (PAGE, hca_release_resources)\r
+#pragma alloc_text (PAGE, hca_cancel_remove)\r
+#pragma alloc_text (PAGE, hca_surprise_remove)\r
+#pragma alloc_text (PAGE, hca_query_capabilities)\r
+#pragma alloc_text (PAGE, hca_query_pnp_state)\r
+#pragma alloc_text (PAGE, hca_query_bus_relations)\r
+#pragma alloc_text (PAGE, hca_query_removal_relations)\r
+#pragma alloc_text (PAGE, hca_set_power)\r
+#pragma alloc_text (PAGE, __alloc_hca_ifc)\r
+#pragma alloc_text (PAGE, __get_ci_interface)\r
+#pragma alloc_text (PAGE, __hca_register)\r
+#pragma alloc_text (PAGE, __pnp_notify_target)\r
+#pragma alloc_text (PAGE, __pnp_notify_ifc)\r
+#endif\r
+\r
+\r
+static cl_vfptr_pnp_po_t       vfptrHcaPnp;\r
+\r
+\r
+void\r
+hca_init_vfptr( void )\r
+{\r
+       vfptrHcaPnp.identity = "HCA driver";\r
+       vfptrHcaPnp.pfn_start = hca_start;\r
+       vfptrHcaPnp.pfn_query_stop = hca_query_stop;\r
+       vfptrHcaPnp.pfn_stop = hca_stop;\r
+       vfptrHcaPnp.pfn_cancel_stop = hca_cancel_stop;\r
+       vfptrHcaPnp.pfn_query_remove = hca_query_remove;\r
+       vfptrHcaPnp.pfn_release_resources = hca_release_resources;\r
+       vfptrHcaPnp.pfn_remove = cl_do_remove;\r
+       vfptrHcaPnp.pfn_cancel_remove = hca_cancel_remove;\r
+       vfptrHcaPnp.pfn_surprise_remove = hca_surprise_remove;\r
+       vfptrHcaPnp.pfn_query_capabilities = hca_query_capabilities;\r
+       vfptrHcaPnp.pfn_query_pnp_state = hca_query_pnp_state;\r
+       vfptrHcaPnp.pfn_filter_res_req = cl_irp_skip;\r
+       vfptrHcaPnp.pfn_dev_usage_notification = cl_do_sync_pnp;\r
+       vfptrHcaPnp.pfn_query_bus_relations = hca_query_bus_relations;\r
+       vfptrHcaPnp.pfn_query_ejection_relations = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_removal_relations = hca_query_removal_relations;\r
+       vfptrHcaPnp.pfn_query_target_relations = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_unknown = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_resources = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_res_req = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_bus_info = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_interface = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_read_config = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_write_config = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_eject = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_set_lock = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_query_power = hca_query_power;\r
+       vfptrHcaPnp.pfn_set_power = hca_set_power;\r
+       vfptrHcaPnp.pfn_power_sequence = cl_irp_ignore;\r
+       vfptrHcaPnp.pfn_wait_wake = cl_irp_ignore;\r
+}\r
+\r
+\r
+NTSTATUS\r
+hca_add_device(\r
+       IN                              PDRIVER_OBJECT                          pDriverObj,\r
+       IN                              PDEVICE_OBJECT                          pPdo )\r
+{\r
+       NTSTATUS                        status;\r
+       DEVICE_OBJECT           *p_dev_obj, *pNextDevObj;\r
+       hca_dev_ext_t           *p_ext;\r
+\r
+       HCA_ENTER(HCA_DBG_PNP);\r
+\r
+       /*\r
+        * Create the device so that we have a device extension to store stuff in.\r
+        */\r
+       status = IoCreateDevice( pDriverObj, sizeof(hca_dev_ext_t),\r
+               NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN,\r
+               FALSE, &p_dev_obj );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IoCreateDevice returned 0x%08X.\n", status));\r
+               return status;\r
+       }\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       cl_memclr( p_ext, sizeof(hca_dev_ext_t) );\r
+\r
+       /* Attach to the device stack. */\r
+       pNextDevObj = IoAttachDeviceToDeviceStack( p_dev_obj, pPdo );\r
+       if( !pNextDevObj )\r
+       {\r
+               //cl_event_destroy( &p_ext->mutex );\r
+               IoDeleteDevice( p_dev_obj );\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IoAttachDeviceToDeviceStack failed.\n"));\r
+               return STATUS_NO_SUCH_DEVICE;\r
+       }\r
+\r
+       /* Inititalize the complib extension. */\r
+       cl_init_pnp_po_ext( p_dev_obj, pNextDevObj, pPdo, 0,\r
+               &vfptrHcaPnp, NULL );\r
+\r
+       p_ext->state = HCA_ADDED;\r
+\r
+       HCA_EXIT(HCA_DBG_PNP);\r
+       return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__get_ci_interface(\r
+       IN                                      DEVICE_OBJECT* const    p_dev_obj )\r
+{\r
+       NTSTATUS                        status;\r
+       IRP                                     *p_irp;\r
+       hca_dev_ext_t           *p_ext;\r
+       IO_STATUS_BLOCK         ioStatus;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       KEVENT                          event;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+       /* Query for the verbs interface. */\r
+       p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev,\r
+               NULL, 0, NULL, &event, &ioStatus );\r
+       if( !p_irp )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IoBuildSynchronousFsdRequest failed.\n"));\r
+               return STATUS_INSUFFICIENT_RESOURCES;\r
+       }\r
+\r
+       /* Format the IRP. */\r
+       pIoStack = IoGetNextIrpStackLocation( p_irp );\r
+       pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+       pIoStack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION;\r
+       pIoStack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t);\r
+       pIoStack->Parameters.QueryInterface.Interface = \r
+               (INTERFACE*)&p_ext->ci_ifc;\r
+       pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+       pIoStack->Parameters.QueryInterface.InterfaceType = \r
+               &GUID_IB_CI_INTERFACE;\r
+       p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+\r
+       /* Send the IRP. */\r
+       status = IoCallDriver( p_ext->p_al_dev, p_irp );\r
+       if( status == STATUS_PENDING )\r
+       {\r
+               KeWaitForSingleObject( &event, Executive, KernelMode, \r
+                       FALSE, NULL );\r
+\r
+               status = ioStatus.Status;\r
+       }\r
+\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+                       ("Query interface for verbs returned %08x.\n", status));\r
+               return status;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+       IN                              void                                            *pNotifyStruct,\r
+       IN                              void                                            *context )\r
+{\r
+       NTSTATUS                                                        status = STATUS_SUCCESS;\r
+       DEVICE_OBJECT                                           *p_dev_obj;\r
+       hca_dev_ext_t                                           *p_ext;\r
+       TARGET_DEVICE_REMOVAL_NOTIFICATION      *pNotify;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pNotify = (TARGET_DEVICE_REMOVAL_NOTIFICATION*)pNotifyStruct;\r
+       p_dev_obj = (DEVICE_OBJECT*)context;\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       if( IsEqualGUID( &pNotify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) )\r
+       {\r
+               if( p_ext->state == HCA_REGISTERED )\r
+               {\r
+                       /* Release AL's CI interface. */\r
+                       p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+                       p_ext->state = HCA_STARTED;\r
+               }\r
+\r
+               /* Release AL's file object so that it can unload. */\r
+               CL_ASSERT( p_ext->p_al_file_obj );\r
+               CL_ASSERT( p_ext->p_al_file_obj == pNotify->FileObject );\r
+               ObDereferenceObject( p_ext->p_al_file_obj );\r
+               p_ext->p_al_file_obj = NULL;\r
+               p_ext->p_al_dev = NULL;\r
+       }\r
+       else if( IsEqualGUID( &pNotify->Event, \r
+               &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) )\r
+       {\r
+               if( p_ext->state == HCA_REGISTERED )\r
+               {\r
+                       /* Release AL's CI interface. */\r
+                       p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+                       p_ext->state = HCA_STARTED;\r
+               }\r
+\r
+               /* Release AL's file object so that it can unload. */\r
+               if( p_ext->p_al_file_obj )\r
+               {\r
+                       ObDereferenceObject( p_ext->p_al_file_obj );\r
+                       p_ext->p_al_file_obj = NULL;\r
+                       p_ext->p_al_dev = NULL;\r
+               }\r
+\r
+               /* Cancel our target device change registration. */\r
+               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+               p_ext->pnp_target_entry = NULL;\r
+       }\r
+       else if( IsEqualGUID( &pNotify->Event, \r
+               &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) )\r
+       {\r
+               /* Cancel our target device change registration. */\r
+               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+               p_ext->pnp_target_entry = NULL;\r
+\r
+               /* Get the device object pointer for the AL. */\r
+               CL_ASSERT( !p_ext->p_al_file_obj );\r
+               CL_ASSERT( !p_ext->p_al_dev );\r
+               p_ext->p_al_file_obj = pNotify->FileObject;\r
+               p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj );\r
+\r
+               status = IoRegisterPlugPlayNotification( \r
+                       EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
+                       p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
+                       &p_ext->pnp_target_entry );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+                       return status;\r
+               }\r
+\r
+               __hca_register( p_dev_obj );\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+static ci_interface_t*\r
+__alloc_hca_ifc(\r
+       IN                              hca_dev_ext_t* const            p_ext )\r
+{\r
+       ci_interface_t  *pIfc;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pIfc =\r
+               (ci_interface_t*)ExAllocatePool( PagedPool, sizeof(ci_interface_t) );\r
+       if( !pIfc )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+                       ("Failed to allocate ci_interface_t (%d bytes).\n",\r
+                       sizeof(ci_interface_t)));\r
+               return NULL;\r
+       }\r
+\r
+       setup_ci_interface( p_ext->hca.guid, pIfc );\r
+\r
+       pIfc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
+       pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID;\r
+       pIfc->dev_id = (uint16_t)p_ext->hcaConfig.DeviceID;\r
+       pIfc->dev_revision = (uint16_t)p_ext->hca.hw_ver;\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return pIfc;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__hca_register(\r
+       IN                              DEVICE_OBJECT                           *p_dev_obj )\r
+{\r
+       hca_dev_ext_t                   *p_ext;\r
+       NTSTATUS                                status;\r
+       ib_api_status_t                 ib_status;\r
+       ci_interface_t                  *p_hca_ifc;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+       \r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       ASSERT( p_ext->state == HCA_STARTED );\r
+       ASSERT( p_ext->p_al_dev );\r
+\r
+       /* Get the AL's lower interface. */\r
+       status = __get_ci_interface( p_dev_obj );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+                       ("__get_ci_interface returned %08x.\n", status));\r
+               return status;\r
+       }\r
+\r
+       /* Allocate and populate our HCA interface structure. */\r
+       p_hca_ifc = __alloc_hca_ifc( p_ext );\r
+       if( !p_hca_ifc )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("__alloc_hca_ifc failed.\n"));\r
+               return STATUS_NO_MEMORY;\r
+       }\r
+\r
+       /* Notify AL that we're available... */\r
+       ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc );\r
+       ExFreePool( p_hca_ifc );\r
+       if( ib_status != IB_SUCCESS )\r
+       {\r
+               p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+               return STATUS_INSUFFICIENT_RESOURCES;\r
+       }\r
+\r
+       p_ext->state = HCA_REGISTERED;\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+       IN                              void                                            *pNotifyStruct,\r
+       IN                              void                                            *context )\r
+{\r
+       NTSTATUS                                                                status;\r
+       DEVICE_OBJECT                                                   *p_dev_obj;\r
+       hca_dev_ext_t                                                   *p_ext;\r
+       DEVICE_INTERFACE_CHANGE_NOTIFICATION    *pNotify;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       pNotify = (DEVICE_INTERFACE_CHANGE_NOTIFICATION*)pNotifyStruct;\r
+       p_dev_obj = (DEVICE_OBJECT*)context;\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       if( !IsEqualGUID( &pNotify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) )\r
+       {\r
+               HCA_EXIT( HCA_DBG_PNP );\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       /*\r
+        * Sanity check.  We should only be getting notifications of the \r
+        * CI interface exported by AL.\r
+        */\r
+       ASSERT( \r
+               IsEqualGUID( &pNotify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) );\r
+\r
+       if( p_ext->state != HCA_STARTED )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Invalid state: %d\n", p_ext->state));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       ASSERT( !p_ext->p_al_dev );\r
+       ASSERT( !p_ext->p_al_file_obj );\r
+\r
+       /* Get the AL device object. */\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM  ,("Calling IoGetDeviceObjectPointer.\n"));\r
+       status = IoGetDeviceObjectPointer( pNotify->SymbolicLinkName,\r
+               FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                       ("IoGetDeviceObjectPointer returned %08x.\n", status ));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       /* Register for removal notification of the IB Fabric root device. */\r
+       HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, \r
+               ("Registering for target notifications.\n"));\r
+       status = IoRegisterPlugPlayNotification( \r
+               EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
+               p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
+               &p_ext->pnp_target_entry );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               ObDereferenceObject( p_ext->p_al_file_obj );\r
+               p_ext->p_al_file_obj = NULL;\r
+               p_ext->p_al_dev = NULL;\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       status = __hca_register( p_dev_obj );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+               p_ext->pnp_target_entry = NULL;\r
+               ObDereferenceObject( p_ext->p_al_file_obj );\r
+               p_ext->p_al_file_obj = NULL;\r
+               p_ext->p_al_dev = NULL;\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                       ("__get_ci_interface returned %08x.\n", status));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Walk the resource lists and store the information.  The write-only\r
+ * flag is not set for the UAR region, so it is indistinguishable from the\r
+ * DDR region since both are prefetchable.  The code here assumes that the\r
+ * resources get handed in order - HCR, UAR, DDR.\r
+ *     - Configuration Space: not prefetchable, read/write\r
+ *     - UAR space: prefetchable, write only.\r
+ *     - DDR: prefetchable, read/write.\r
+ */\r
+static NTSTATUS\r
+__SetupHcaResources(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              CM_RESOURCE_LIST* const         pHcaResList,\r
+       IN                              CM_RESOURCE_LIST* const         pHostResList )\r
+{\r
+       NTSTATUS                                                status = STATUS_SUCCESS;\r
+       hca_dev_ext_t                                   *p_ext;\r
+       USHORT                                                  i;\r
+       hca_bar_type_t                                  type = HCA_BAR_TYPE_HCR;\r
+\r
+       CM_PARTIAL_RESOURCE_DESCRIPTOR  *pHcaRes, *pHostRes;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       ASSERT( pHostResList->List[0].PartialResourceList.Version == 1 );\r
+       ASSERT( pHostResList->List[0].PartialResourceList.Revision == 1 );\r
+\r
+       // store the bus number for reset of Tavor\r
+       p_ext->bus_number = pHostResList->List[0].BusNumber;\r
+       \r
+       for( i = 0; i < pHostResList->List[0].PartialResourceList.Count; i++ )\r
+       {\r
+               pHcaRes =\r
+                       &pHcaResList->List[0].PartialResourceList.PartialDescriptors[i];\r
+               pHostRes = \r
+                       &pHostResList->List[0].PartialResourceList.PartialDescriptors[i];\r
+\r
+\r
+               /*\r
+                * Save the interrupt information so that we can power the device\r
+                * up and down.  Since the device will lose state when powered down\r
+                * we have to fully disable it.  Note that we can leave memory mapped\r
+                * resources in place when powered down as the resource assignments\r
+                * won't change.  However, we must disconnect our interrupt, and\r
+                * reconnect it when powering up.\r
+                */\r
+               if( pHcaRes->Type == CmResourceTypeInterrupt )\r
+               {\r
+                       p_ext->interruptInfo = *pHostRes;\r
+                       continue;\r
+               }\r
+               \r
+               if( pHcaRes->Type != CmResourceTypeMemory )\r
+                       continue;\r
+\r
+               /*\r
+                * Sanity check that our assumption on how resources\r
+                * are reported hold.\r
+                */\r
+               if( type == HCA_BAR_TYPE_HCR &&\r
+                       (pHcaRes->Flags & CM_RESOURCE_MEMORY_PREFETCHABLE) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("First memory resource is prefetchable - expected HCR.\n"));\r
+                       status = STATUS_UNSUCCESSFUL;\r
+                       break;\r
+               }\r
+\r
+               p_ext->bar[type].phys = pHcaRes->u.Memory.Start.QuadPart;\r
+               p_ext->bar[type].size = pHcaRes->u.Memory.Length;\r
+#ifdef MAP_ALL_HCA_MEMORY              \r
+               /*leo: no need to map all the resources */\r
+               p_ext->bar[type].virt = MmMapIoSpace( pHostRes->u.Memory.Start,\r
+                       pHostRes->u.Memory.Length, MmNonCached );\r
+               if( !p_ext->bar[type].virt )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("Failed to map memory resource type %d\n", type));\r
+                       status = STATUS_UNSUCCESSFUL;\r
+                       break;\r
+               }\r
+#else          \r
+               p_ext->bar[type].virt = NULL;\r
+#endif         \r
+\r
+               type++;\r
+       }\r
+\r
+       if( type == HCA_BAR_TYPE_DDR)\r
+       {\r
+               p_ext->hca_hidden = 1;\r
+       }\r
+       else \r
+       if( type != HCA_BAR_TYPE_MAX )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Failed to map all memory resources.\n"));\r
+               status = STATUS_UNSUCCESSFUL;\r
+       }\r
+\r
+       if( p_ext->interruptInfo.Type != CmResourceTypeInterrupt )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("No interrupt resource.\n"));\r
+               status = STATUS_UNSUCCESSFUL;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+static void\r
+__UnmapHcaMemoryResources(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj )\r
+{\r
+       hca_dev_ext_t           *p_ext;\r
+       USHORT                          i;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       for( i = 0; i < HCA_BAR_TYPE_MAX; i++ )\r
+       {\r
+               if( p_ext->bar[i].virt )\r
+               {\r
+                       MmUnmapIoSpace( p_ext->bar[i].virt, p_ext->bar[i].size );\r
+                       cl_memclr( &p_ext->bar[i], sizeof(hca_bar_t) );\r
+               }\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_start(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       NTSTATUS                        status;\r
+       hca_dev_ext_t           *p_ext;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       POWER_STATE                     powerState;\r
+       DEVICE_DESCRIPTION      devDesc;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       /* Handled on the way up. */\r
+       status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("Lower drivers failed IRP_MN_START_DEVICE.\n"));\r
+               return status;\r
+       }\r
+\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       /*\r
+        * Walk the resource lists and store the information.  The write-only\r
+        * flag is not set for the UAR region, so it is indistinguishable from the\r
+        * DDR region since both are prefetchable.  The code here assumes that the\r
+        * resources get handed in order - HCR, UAR, DDR.\r
+        *      - Configuration Space: not prefetchable, read/write\r
+        *      - UAR space: prefetchable, write only.\r
+        *      - DDR: prefetchable, read/write.\r
+        */\r
+       status = __SetupHcaResources( p_dev_obj,\r
+               pIoStack->Parameters.StartDevice.AllocatedResources,\r
+               pIoStack->Parameters.StartDevice.AllocatedResourcesTranslated );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("__ProcessResources returned %08X.\n", status));\r
+               return status;\r
+       }\r
+       \r
+       /* save PCI configuration info and enable device */\r
+       hca_enable_pci( p_dev_obj, &p_ext->hcaConfig );\r
+\r
+       /*\r
+        * Get the DMA adapter representing the HCA so we can\r
+        * allocate common buffers.\r
+        */\r
+       RtlZeroMemory( &devDesc, sizeof(devDesc) );\r
+       devDesc.Version = DEVICE_DESCRIPTION_VERSION2;\r
+       devDesc.Master = TRUE;\r
+       devDesc.ScatterGather = TRUE;\r
+       devDesc.Dma32BitAddresses = TRUE;\r
+       devDesc.Dma64BitAddresses = TRUE;\r
+       devDesc.InterfaceType = PCIBus;\r
+\r
+       // get the adapter object\r
+       // 0x80000000 is a threshold, that's why - 1\r
+       devDesc.MaximumLength = 0x80000000 - 1;\r
+       p_ext->p_dma_adapter = IoGetDmaAdapter(\r
+               p_ext->cl_ext.p_pdo, &devDesc, &p_ext->n_map_regs );\r
+       if( !p_ext->p_dma_adapter )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("Failed to get DMA_ADAPTER for HCA.\n"));\r
+               return STATUS_INSUFFICIENT_RESOURCES;\r
+       }\r
+\r
+       /* Initialize the HCA now. */\r
+       status = mthca_init_one( p_ext );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               //TODO: no cleanup on error\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("mthca_start returned %08X\n", status));\r
+               return status;\r
+       }\r
+\r
+       /*\r
+        * Change the state since the PnP callback can happen\r
+        * before the callback returns.\r
+        */\r
+       p_ext->state = HCA_STARTED;\r
+\r
+       /*leo: get node GUID */\r
+       {\r
+               int err =       mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+               if (err) {\r\r
+                       //TODO: no cleanup on error\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+                               ("can't get guid - mthca_query_port()"));\r
+                       return STATUS_INSUFFICIENT_RESOURCES;\r
+               }\r
+       }\r
+\r
+       /* queue HCA  */\r
+       mlnx_hca_insert( &p_ext->hca );\r
+\r
+       /*\r
+        * Change the state since the PnP callback can happen\r
+        * before the callback returns.\r
+        */\r
+       p_ext->state = HCA_STARTED;\r
+       \r
+       /* Register for interface arrival of the IB_AL device. */\r
+       status = IoRegisterPlugPlayNotification(\r
+               EventCategoryDeviceInterfaceChange,\r
+               PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES,\r
+               (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject,\r
+               __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               p_ext->state = HCA_ADDED;\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+                       ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+       }\r
+\r
+       /* We get started fully powered. */\r
+       p_ext->PowerState = PowerDeviceD0;\r
+       powerState.DeviceState = PowerDeviceD0;\r
+       PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState );\r
+\r
+       {\r
+               struct mthca_dev *mdev = p_ext->hca.mdev;\r
+               HCA_PRINT_EV(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,\r
+                       ("Ven %d Dev %d Hw %x Fw %d.%d.%d Drv %s (%s) Flg %s%s%s\n", \r
+                       (unsigned)p_ext->hcaConfig.VendorID, (unsigned)p_ext->hcaConfig.DeviceID,\r
+                       p_ext->hca.hw_ver,      (int) (mdev->fw_ver >> 32),\r
+                       (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff),\r
+                       DRV_VERSION, DRV_RELDATE,\r
+                       (mdev->mthca_flags & MTHCA_FLAG_MEMFREE) ? "M:" : "",\r
+                       (mdev->mthca_flags & MTHCA_FLAG_PCIE) ? "E:" : "",\r
+                       (mdev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN) ? "H" : ""\r
+                       ));\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+static void\r
+hca_release_resources(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj )\r
+{\r
+       hca_dev_ext_t           *p_ext;\r
+       POWER_STATE                     powerState;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       if( p_ext->state == HCA_REGISTERED )\r
+       {\r
+               CL_ASSERT( p_ext->ci_ifc.deregister_ca );\r
+               CL_ASSERT( p_ext->p_al_dev );\r
+               CL_ASSERT( p_ext->p_al_file_obj );\r
+               /* Notify AL that the CA is being removed. */\r
+               p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
+               /* Release AL's CI interface. */\r
+               p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+       }\r
+\r
+       if( p_ext->pnp_target_entry )\r
+       {\r
+               ASSERT( p_ext->pnp_ifc_entry );\r
+               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+       }\r
+\r
+       if( p_ext->pnp_ifc_entry )\r
+               IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry );\r
+\r
+       if( p_ext->p_al_file_obj )\r
+               ObDereferenceObject( p_ext->p_al_file_obj );\r
+\r
+       mthca_remove_one( p_ext );\r
+\r
+       if( p_ext->p_dma_adapter )\r
+               p_ext->p_dma_adapter->DmaOperations->PutDmaAdapter( p_ext->p_dma_adapter );\r
+\r
+       //cl_event_destroy( &p_ext->mutex );\r
+       __UnmapHcaMemoryResources( p_dev_obj );\r
+\r
+       /* Notify the power manager that the device is powered down. */\r
+       powerState.DeviceState = PowerDeviceD3;\r
+       PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState );\r
+\r
+       /* Clear the PnP state in case we get restarted. */\r
+       p_ext->pnpState = 0;\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_removal_relations(\r
+       IN                                      DEVICE_OBJECT* const    p_dev_obj,\r
+       IN                                      IRP* const                              p_irp, \r
+               OUT                             cl_irp_action_t* const  p_action )\r
+{\r
+       NTSTATUS        status;\r
+       hca_dev_ext_t   *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       if( p_ext->state == HCA_REGISTERED )\r
+       {\r
+               status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       *p_action = IrpComplete;\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("AL get_relations returned %08x.\n", status));\r
+                       return status;\r
+               }\r
+       }\r
+\r
+       *p_action = IrpPassDown;\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_bus_relations(\r
+       IN                                      DEVICE_OBJECT* const    p_dev_obj,\r
+       IN                                      IRP* const                              p_irp, \r
+               OUT                             cl_irp_action_t* const  p_action )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       NTSTATUS                        status;\r
+       DEVICE_RELATIONS        *p_rel;\r
+       hca_dev_ext_t   *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = p_dev_obj->DeviceExtension;\r
+\r
+       //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE );\r
+       if( p_ext->state == HCA_REGISTERED )\r
+       {\r
+               status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       //cl_event_signal( &p_ext->mutex );\r
+                       *p_action = IrpComplete;\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("AL get_relations returned %08x.\n", status));\r
+                       return status;\r
+               }\r
+       }\r
+       else\r
+       {\r
+               status = cl_alloc_relations( p_irp, 1 );\r
+               if( !NT_SUCCESS( status ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                               ("cl_alloc_relations returned %08x.\n", status));\r
+                       return status;\r
+               }\r
+\r
+               p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information;\r
+               p_rel->Count = 0;\r
+               p_rel->Objects[0] = NULL;\r
+       }\r
+\r
+       //cl_event_signal( &p_ext->mutex );\r
+\r
+       *p_action = IrpPassDown;\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;\r
+\r
+#else\r
+       return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+       //NTSTATUS      status;\r
+       //hca_dev_ext_t *p_ext;\r
+\r
+       //HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       //p_ext = p_dev_obj->DeviceExtension;\r
+\r
+       //if( p_ext->state == HCA_REGISTERED )\r
+       //{\r
+       //      status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+       //      if( !NT_SUCCESS( status ) )\r
+       //      {\r
+       //              *p_action = IrpComplete;\r
+       //              HCA_PRINT( TRACE_LEVEL_ERROR,\r
+       //                      "AL get_relations returned %08x.\n", status);\r
+       //              return status;\r
+       //      }\r
+       //}\r
+\r
+       //*p_action = IrpPassDown;\r
+       //HCA_EXIT( HCA_DBG_PNP );\r
+       //return STATUS_SUCCESS;\r
+#endif \r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /* All kernel clients will get notified through the device hierarchy. */\r
+\r
+       /* TODO: set a flag to fail creation of any new IB resources. */\r
+       return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /*\r
+        * Must disable everything.  Complib framework will\r
+        * call ReleaseResources handler.\r
+        */\r
+       return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_cancel_stop(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /* Handled on the way up. */\r
+       return cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /* Query remove always succeeds. */\r
+       /* TODO: set a flag to fail creation of any new IB resources. */\r
+       return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_cancel_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /* Handled on the way up. */\r
+       return cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_surprise_remove(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       /*\r
+        * TODO: Set state so that all further requests\r
+        * automatically succeed/fail as needed.\r
+        */\r
+       return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_capabilities(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       NTSTATUS                        status;\r
+       hca_dev_ext_t           *p_ext;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       DEVICE_CAPABILITIES     *pCaps;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       /* Process on the way up. */\r
+       status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("cl_do_sync_pnp returned %08X.\n", status));\r
+               return status;\r
+       }\r
+\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+       pCaps = pIoStack->Parameters.DeviceCapabilities.Capabilities;\r
+\r
+       /*\r
+        * Store the device power mapping into our extension since we're\r
+        * the power policy owner.  The mapping is used when handling\r
+        * IRP_MN_SET_POWER IRPs.\r
+        */\r
+       cl_memcpy(\r
+               p_ext->DevicePower, pCaps->DeviceState, sizeof(p_ext->DevicePower) );\r
+\r
+       if( pCaps->DeviceD1 )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+                       ("WARNING: Device reports support for DeviceD1 power state.\n"));\r
+               pCaps->DeviceD1 = FALSE;\r
+       }\r
+\r
+       if( pCaps->DeviceD2 )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_WARNING,HCA_DBG_PNP,\r
+                       ("WARINING: Device reports support for DeviceD2 power state.\n"));\r
+               pCaps->DeviceD2 = FALSE;\r
+       }\r
+\r
+       if( pCaps->SystemWake != PowerSystemUnspecified )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+                       ("WARINING: Device reports support for system wake.\n"));\r
+               pCaps->SystemWake = PowerSystemUnspecified;\r
+       }\r
+\r
+       if( pCaps->DeviceWake != PowerDeviceUnspecified )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP,\r
+                       ("WARINING: Device reports support for device wake.\n"));\r
+               pCaps->DeviceWake = PowerDeviceUnspecified;\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_pnp_state(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp, \r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       hca_dev_ext_t           *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       p_irp->IoStatus.Information |= p_ext->pnpState;\r
+\r
+       *p_action = IrpSkip;\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_SUCCESS;;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_power(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp,\r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       NTSTATUS                        status = STATUS_SUCCESS;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+\r
+       HCA_ENTER(HCA_DBG_PO);\r
+\r
+       UNUSED_PARAM( p_dev_obj );\r
+\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       switch( pIoStack->Parameters.Power.Type )\r
+       {\r
+       case SystemPowerState:\r
+               /* Fail any requests to hibernate or sleep the system. */\r
+               switch( pIoStack->Parameters.Power.State.SystemState )\r
+               {\r
+                       case PowerSystemWorking:\r
+                       case PowerSystemShutdown:\r
+                               /* We only support fully working and shutdown system states. */\r
+                               break;\r
+\r
+                       default:\r
+                               status = STATUS_NOT_SUPPORTED;\r
+               }\r
+               break;\r
+\r
+       case DevicePowerState:\r
+               /* Fail any query for low power states. */\r
+               switch( pIoStack->Parameters.Power.State.DeviceState )\r
+               {\r
+               case PowerDeviceD0:\r
+               case PowerDeviceD3:\r
+                       /* We only support fully powered or off power states. */\r
+                       break;\r
+\r
+               default:\r
+                       status = STATUS_NOT_SUPPORTED;\r
+               }\r
+               break;\r
+       }\r
+\r
+       if( status == STATUS_NOT_SUPPORTED )\r
+               *p_action = IrpComplete;\r
+       else\r
+               *p_action = IrpSkip;\r
+\r
+       HCA_EXIT( HCA_DBG_PO );\r
+       return status;\r
+}\r
+\r
+\r
+static void\r
+__RequestPowerCompletion(\r
+       IN                              DEVICE_OBJECT                           *p_dev_obj,\r
+       IN                              UCHAR                                           minorFunction,\r
+       IN                              POWER_STATE                                     powerState,\r
+       IN                              void                                            *context,\r
+       IN                              IO_STATUS_BLOCK                         *pIoStatus )\r
+{\r
+       IRP                                     *p_irp;\r
+       cl_pnp_po_ext_t         *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_PO );\r
+\r
+       UNUSED_PARAM( minorFunction );\r
+       UNUSED_PARAM( powerState );\r
+\r
+       p_irp = (IRP*)context;\r
+       p_ext = (cl_pnp_po_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+       /* Propagate the device IRP status to the system IRP status. */\r
+       p_irp->IoStatus.Status = pIoStatus->Status;\r
+\r
+       /* Continue Power IRP processing. */\r
+       PoStartNextPowerIrp( p_irp );\r
+       IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+       IoReleaseRemoveLock( &p_ext->remove_lock, p_irp );\r
+       HCA_EXIT( HCA_DBG_PO );\r
+}\r
+\r
+\r
+/*NOTE: Completion routines must NEVER be pageable. */\r
+static NTSTATUS\r
+__SystemPowerCompletion(\r
+       IN                              DEVICE_OBJECT                           *p_dev_obj,\r
+       IN                              IRP                                                     *p_irp,\r
+       IN                              void                                            *context )\r
+{\r
+       NTSTATUS                        status;\r
+       POWER_STATE                     state;\r
+       hca_dev_ext_t           *p_ext;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+\r
+       HCA_ENTER( HCA_DBG_PNP );\r
+\r
+       UNUSED_PARAM( context );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       if( !NT_SUCCESS( p_irp->IoStatus.Status ) )\r
+       {\r
+               PoStartNextPowerIrp( p_irp );\r
+               IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n",\r
+                       p_irp->IoStatus.Status));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       state.DeviceState = \r
+               p_ext->DevicePower[pIoStack->Parameters.Power.State.SystemState];\r
+\r
+       /*\r
+        * Send a device power IRP to our devnode.  Using our device object will\r
+        * only work on win2k and other NT based systems.\r
+        */\r
+       status = PoRequestPowerIrp( p_dev_obj, IRP_MN_SET_POWER, state,\r
+               __RequestPowerCompletion, p_irp, NULL );\r
+\r
+       if( !NT_SUCCESS( p_irp->IoStatus.Status ) )\r
+       {\r
+               PoStartNextPowerIrp( p_irp );\r
+               /* Propagate the failure. */\r
+               p_irp->IoStatus.Status = status;\r
+               IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+               IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+                       ("PoRequestPowerIrp returned %08x.\n", status));\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return STATUS_MORE_PROCESSING_REQUIRED;\r
+}\r
+\r
+\r
+/* Work item callback to handle DevicePowerD0 IRPs at passive level. */\r
+static void\r
+__PowerUpCb(\r
+       IN                              DEVICE_OBJECT*                          p_dev_obj,\r
+       IN                              void*                                           context )\r
+{\r
+       NTSTATUS                        status;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       hca_dev_ext_t           *p_ext;\r
+       IRP                                     *p_irp;\r
+\r
+       HCA_ENTER( HCA_DBG_PO );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       p_irp = (IRP*)context;\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       IoFreeWorkItem( p_ext->pPoWorkItem );\r
+       p_ext->pPoWorkItem = NULL;\r
+\r
+       status = mthca_init_one( p_ext );\r
+       if( !NT_SUCCESS( status ) )\r
+               goto done;\r
+\r
+       if( p_ext->p_al_dev )\r
+               status = __hca_register( p_dev_obj );\r
+\r
+done:\r
+       if( !NT_SUCCESS( status ) )\r
+       {\r
+               /* Flag device as having failed. */\r
+               p_ext->pnpState |= PNP_DEVICE_FAILED;\r
+               IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
+       }\r
+\r
+       PoStartNextPowerIrp( p_irp );\r
+       IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+       IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+\r
+       HCA_EXIT( HCA_DBG_PO );\r
+}\r
+\r
+\r
+/*NOTE: Completion routines must NEVER be pageable. */\r
+static NTSTATUS\r
+__DevicePowerCompletion(\r
+       IN                              DEVICE_OBJECT                           *p_dev_obj,\r
+       IN                              IRP                                                     *p_irp,\r
+       IN                              void                                            *context )\r
+{\r
+       NTSTATUS                        status;\r
+       hca_dev_ext_t           *p_ext;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+\r
+       HCA_ENTER( HCA_DBG_PO );\r
+\r
+       UNUSED_PARAM( context );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       if( !NT_SUCCESS( p_irp->IoStatus.Status ) )\r
+       {\r
+               PoStartNextPowerIrp( p_irp );\r
+               IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+                       ("IRP_MN_SET_POWER for device failed by lower driver with %08x.\n",\r
+                       p_irp->IoStatus.Status));\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       p_ext->PowerState = pIoStack->Parameters.Power.State.DeviceState;\r
+       PoSetPowerState( p_dev_obj, DevicePowerState,\r
+               pIoStack->Parameters.Power.State );\r
+\r
+       /* Process in a work item - mthca_start blocks. */\r
+       ASSERT( !p_ext->pPoWorkItem );\r
+       p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj );\r
+       if( !p_ext->pPoWorkItem )\r
+       {\r
+               IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
+\r
+               PoStartNextPowerIrp( p_irp );\r
+               IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+\r
+               return STATUS_SUCCESS;\r
+       }\r
+\r
+       /* Process in work item callback. */\r
+       IoMarkIrpPending( p_irp );\r
+       IoQueueWorkItem( p_ext->pPoWorkItem, __PowerUpCb, DelayedWorkQueue, p_irp );\r
+       /* TODO: Start the HCA. */\r
+       status = mthca_init_one( p_ext );\r
+       if( !NT_SUCCESS( status ) )\r
+               goto done;\r
+\r
+       if( p_ext->p_al_dev )\r
+               status = __hca_register( p_dev_obj );\r
+\r
+done:\r
+       if( !NT_SUCCESS( status ) )\r
+               IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
+\r
+       PoStartNextPowerIrp( p_irp );\r
+       IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+\r
+       HCA_EXIT( HCA_DBG_PO );\r
+       return STATUS_MORE_PROCESSING_REQUIRED;\r
+}\r
+\r
+\r
+/* Work item callback to handle DevicePowerD3 IRPs at passive level. */\r
+static void\r
+__PowerDownCb(\r
+       IN                              DEVICE_OBJECT*                          p_dev_obj,\r
+       IN                              void*                                           context )\r
+{\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       hca_dev_ext_t           *p_ext;\r
+       IRP                                     *p_irp;\r
+\r
+       HCA_ENTER( HCA_DBG_PO );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       p_irp = (IRP*)context;\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       IoFreeWorkItem( p_ext->pPoWorkItem );\r
+       p_ext->pPoWorkItem = NULL;\r
+\r
+       PoSetPowerState( p_dev_obj, DevicePowerState,\r
+               pIoStack->Parameters.Power.State );\r
+       if( p_ext->state == HCA_REGISTERED )\r
+       {\r
+               /* Notify AL that the CA is being removed. */\r
+               p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
+               /* Release AL's CI interface. */\r
+               p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+\r
+               p_ext->state = HCA_STARTED;\r
+       }\r
+\r
+       mthca_remove_one( p_ext );\r
+\r
+       IoSkipCurrentIrpStackLocation( p_irp );\r
+       PoStartNextPowerIrp( p_irp );\r
+       PoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
+       IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+\r
+       HCA_EXIT( HCA_DBG_PO );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_set_power(\r
+       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
+       IN                              IRP* const                                      p_irp,\r
+               OUT                     cl_irp_action_t* const          p_action )\r
+{\r
+       NTSTATUS                        status;\r
+       IO_STACK_LOCATION       *pIoStack;\r
+       hca_dev_ext_t           *p_ext;\r
+\r
+       HCA_ENTER( HCA_DBG_PO );\r
+\r
+       p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+       pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+       switch( pIoStack->Parameters.Power.Type )\r
+       {\r
+       case SystemPowerState:\r
+               /*\r
+                * Process on the way up the stack.  We cannot block since the \r
+                * power dispatch function can be called at elevated IRQL if the\r
+                * device is in a paging/hibernation/crash dump path.\r
+                */\r
+               IoMarkIrpPending( p_irp );\r
+               IoCopyCurrentIrpStackLocationToNext( p_irp );\r
+#pragma warning( push, 3 )\r
+               IoSetCompletionRoutine( p_irp, __SystemPowerCompletion, NULL, \r
+                       TRUE, TRUE, TRUE );\r
+#pragma warning( pop )\r
+               PoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
+\r
+               *p_action = IrpDoNothing;\r
+               status = STATUS_PENDING;\r
+               break;\r
+\r
+       case DevicePowerState:\r
+               IoMarkIrpPending( p_irp );\r
+               if( pIoStack->Parameters.Power.State.DeviceState == PowerDeviceD0 )\r
+               {\r
+                       /* If we're already powered up, just pass down. */\r
+                       if( p_ext->PowerState == PowerDeviceD0 )\r
+                       {\r
+                               status = STATUS_SUCCESS;\r
+                               *p_action = IrpIgnore;\r
+                               break;\r
+                       }\r
+\r
+                       /* Process in I/O completion callback. */\r
+                       IoCopyCurrentIrpStackLocationToNext( p_irp );\r
+#pragma warning( push, 3 )\r
+                       IoSetCompletionRoutine( p_irp, __DevicePowerCompletion, NULL, \r
+                               TRUE, TRUE, TRUE );\r
+#pragma warning( pop )\r
+                       PoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
+               }\r
+               else\r
+               {\r
+                       /* Process in a work item - deregister_ca and HcaDeinit block. */\r
+                       ASSERT( !p_ext->pPoWorkItem );\r
+                       p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj );\r
+                       if( !p_ext->pPoWorkItem )\r
+                       {\r
+                               status = STATUS_INSUFFICIENT_RESOURCES;\r
+                               break;\r
+                       }\r
+\r
+                       /* Process in work item callback. */\r
+                       IoMarkIrpPending( p_irp );\r
+                       IoQueueWorkItem(\r
+                               p_ext->pPoWorkItem, __PowerDownCb, DelayedWorkQueue, p_irp );\r
+               }\r
+               *p_action = IrpDoNothing;\r
+               status = STATUS_PENDING;\r
+               break;\r
+\r
+       default:\r
+               /* Pass down and let the PDO driver handle it. */\r
+               *p_action = IrpIgnore;\r
+               status = STATUS_SUCCESS;\r
+               break;\r
+       }\r
+\r
+       if( !NT_SUCCESS( status ) )\r
+               *p_action = IrpComplete;\r
+\r
+       HCA_EXIT( HCA_DBG_PNP );\r
+       return status;\r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_pnp.h b/trunk/hw/mthca/kernel/hca_pnp.h
new file mode 100644 (file)
index 0000000..fba554f
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: hca_driver.h 46 2005-05-30 17:55:53Z sleybo $
+ */
+
+
+#ifndef        _HCA_PNP_H_
+#define _HCA_PNP_H_
+
+void hca_init_vfptr( void );
+
+NTSTATUS
+hca_add_device(
+       IN                              PDRIVER_OBJECT                          pDriverObj,
+       IN                              PDEVICE_OBJECT                          pPdo );
+
+
+#endif
+
+
diff --git a/trunk/hw/mthca/kernel/hca_smp.c b/trunk/hw/mthca/kernel/hca_smp.c
new file mode 100644 (file)
index 0000000..84159d5
--- /dev/null
@@ -0,0 +1,657 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_smp.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+/*\r
+ * SMP handling of IB Access Layer VPD for Mellanox MT23108 HCA\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_smp.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+#include "ib_mad.h"\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_guid_info(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       uint32_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = cl_ntoh32( p_mad_in->attr_mod );\r
+\r
+       /*\r
+        * TODO: Setup the response to fail the MAD instead of sending\r
+        * it down to the HCA.\r
+        */\r
+       if( idx > 31 )\r
+               return FALSE;\r
+\r
+       if( !p_cache->guid_block[idx].valid )\r
+               return FALSE;\r
+\r
+       /*\r
+        * If a SET, see if the set is identical to the cache,\r
+        * in which case it's a no-op.\r
+        */\r
+       if( p_mad_in->method == IB_MAD_METHOD_SET )\r
+       {\r
+               if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ),\r
+                       &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) ) )\r
+               {\r
+                       /* The set is requesting a change. */\r
+                       return FALSE;\r
+               }\r
+       }\r
+       CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET );\r
+\r
+       /* Setup the response mad. */\r
+       cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE );\r
+       p_mad_out->method |= IB_MAD_METHOD_RESP_MASK;\r
+       if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status = IB_SMP_DIRECTION;\r
+       else\r
+               p_mad_out->status = 0;\r
+\r
+       /* Copy the cached data. */\r
+       cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) );\r
+\r
+       return TRUE;\r
+}\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_pkey_table(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       uint16_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = ((uint16_t)cl_ntoh32( p_mad_in->attr_mod ));\r
+\r
+       /*\r
+        * TODO: Setup the response to fail the MAD instead of sending\r
+        * it down to the HCA.\r
+        */\r
+       if( idx > 2047 )\r
+               return FALSE;\r
+\r
+       if( !p_cache->pkey_tbl[idx].valid )\r
+               return FALSE;\r
+\r
+       /*\r
+        * If a SET, see if the set is identical to the cache,\r
+        * in which case it's a no-op.\r
+        */\r
+       if( p_mad_in->method == IB_MAD_METHOD_SET )\r
+       {\r
+               if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ),\r
+                       &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
+               {\r
+                       /* The set is requesting a change. */\r
+                       return FALSE;\r
+               }\r
+       }\r
+       CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET );\r
+\r
+       /* Setup the response mad. */\r
+       cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE );\r
+       p_mad_out->method |= IB_MAD_METHOD_RESP_MASK;\r
+       if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status = IB_SMP_DIRECTION;\r
+       else\r
+               p_mad_out->status = 0;\r
+\r
+       /* Copy the cached data. */\r
+       cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) );\r
+\r
+       return TRUE;\r
+}\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_sl_vl_table(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       if( !p_cache->sl_vl.valid )\r
+               return FALSE;\r
+\r
+       /*\r
+        * If a SET, see if the set is identical to the cache,\r
+        * in which case it's a no-op.\r
+        */\r
+       if( p_mad_in->method == IB_MAD_METHOD_SET )\r
+       {\r
+               if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ),\r
+                       &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) ) )\r
+               {\r
+                       /* The set is requesting a change. */\r
+                       return FALSE;\r
+               }\r
+       }\r
+       CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET );\r
+\r
+       /* Setup the response mad. */\r
+       cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE );\r
+       p_mad_out->method |= IB_MAD_METHOD_RESP_MASK;\r
+       if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status = IB_SMP_DIRECTION;\r
+       else\r
+               p_mad_out->status = 0;\r
+\r
+       /* Copy the cached data. */\r
+       cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) );\r
+\r
+       return TRUE;\r
+}\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_vl_arb_table(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       uint16_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = ((uint16_t)(cl_ntoh32( p_mad_in->attr_mod ) >> 16)) - 1;\r
+\r
+       /*\r
+        * TODO: Setup the response to fail the MAD instead of sending\r
+        * it down to the HCA.\r
+        */\r
+       if( idx > 3 )\r
+               return FALSE;\r
+\r
+       if( !p_cache->vl_arb[idx].valid )\r
+               return FALSE;\r
+\r
+       /*\r
+        * If a SET, see if the set is identical to the cache,\r
+        * in which case it's a no-op.\r
+        */\r
+       if( p_mad_in->method == IB_MAD_METHOD_SET )\r
+       {\r
+               if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ),\r
+                       &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) )\r
+               {\r
+                       /* The set is requesting a change. */\r
+                       return FALSE;\r
+               }\r
+       }\r
+       CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET );\r
+\r
+       /* Setup the response mad. */\r
+       cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE );\r
+       p_mad_out->method |= IB_MAD_METHOD_RESP_MASK;\r
+       if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status = IB_SMP_DIRECTION;\r
+       else\r
+               p_mad_out->status = 0;\r
+\r
+       /* Copy the cached data. */\r
+       cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) );\r
+\r
+       return TRUE;\r
+}\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_port_info(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       ib_port_info_t          *p_port_info;\r
+\r
+       UNUSED_PARAM( p_mad_out );\r
+\r
+       if( !p_cache->port_info.valid )\r
+               return FALSE;\r
+\r
+       if( p_mad_in->method == IB_MAD_METHOD_GET )\r
+               return FALSE;\r
+\r
+       /*\r
+        * NOTE: Even though the input MAD is const, we modify it to change\r
+        * some parameters to no-ops to compensate for problems in the HCA chip.\r
+        */\r
+       p_port_info =\r
+               (ib_port_info_t*)ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in );\r
+\r
+       /* We can only cache requests for the same port that the SMP came in on. */\r
+       if( p_mad_in->attr_mod != 0 &&\r
+               cl_ntoh32( p_mad_in->attr_mod ) != port_num )\r
+       {\r
+               return FALSE;\r
+       }\r
+\r
+       /*\r
+        * to avoid unnecessary glitches in port state, we translate these\r
+        * fields to NOP when there is no change.  Note these fields cannot\r
+        * change within the hardware without a Set going through here.\r
+        */\r
+       if( p_port_info->link_width_enabled ==\r
+               p_cache->port_info.info.link_width_enabled )\r
+       {\r
+               p_port_info->link_width_enabled = 0;\r
+       }\r
+       if( (p_port_info->state_info2 & 0x0F) ==\r
+               (p_cache->port_info.info.state_info2 & 0x0F) )\r
+       {\r
+               p_port_info->state_info2 &= 0xF0;\r
+       }\r
+       if( (p_port_info->link_speed & 0x0F) ==\r
+               (p_cache->port_info.info.link_speed & 0x0F) )\r
+       {\r
+               p_port_info->link_speed &= 0xF0;\r
+       }\r
+       if( (p_port_info->vl_enforce & 0xF0) ==\r
+               (p_cache->port_info.info.vl_enforce & 0xF0) )\r
+       {\r
+               p_port_info->vl_enforce &= 0x0F;\r
+       }\r
+\r
+       /*\r
+        * We modified the input MAD to change things to no-ops, but\r
+        * we can't actually fulfill the MAD with cached data.\r
+        */\r
+       return FALSE;\r
+}\r
+\r
+\r
+boolean_t\r
+mlnx_cachable_mad(\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+       if( p_mad_in->method != IB_MCLASS_SUBN_DIR &&\r
+               p_mad_in->method != IB_MCLASS_SUBN_LID )\r
+       {\r
+               return FALSE;\r
+       }\r
+\r
+       switch( p_mad_in->attr_id )\r
+       {\r
+       case IB_MAD_ATTR_GUID_INFO:\r
+               return mlnx_cachable_guid_info(\r
+                       &h_ca->cache[port_num-1], p_mad_in, p_mad_out );\r
+\r
+       case IB_MAD_ATTR_P_KEY_TABLE:\r
+               return mlnx_cachable_pkey_table(\r
+                       &h_ca->cache[port_num-1], p_mad_in, p_mad_out );\r
+\r
+       case IB_MAD_ATTR_SLVL_TABLE:\r
+               return mlnx_cachable_sl_vl_table(\r
+                       &h_ca->cache[port_num-1], p_mad_in, p_mad_out );\r
+\r
+       case IB_MAD_ATTR_VL_ARBITRATION:\r
+               return mlnx_cachable_vl_arb_table(\r
+                       &h_ca->cache[port_num-1], p_mad_in, p_mad_out );\r
+\r
+       case IB_MAD_ATTR_PORT_INFO:\r
+               return mlnx_cachable_port_info(\r
+                       &h_ca->cache[port_num-1], port_num, p_mad_in, p_mad_out );\r
+\r
+       default:\r
+               break;\r
+       }\r
+       return FALSE;\r
+}\r
+\r
+\r
+void\r
+mlnx_update_guid_info(\r
+       IN                              mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t* const                         p_mad_out )\r
+{\r
+       uint32_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = cl_ntoh32( p_mad_out->attr_mod );\r
+\r
+       /*\r
+        * We only get successful MADs here, so invalid settings\r
+        * shouldn't happen.\r
+        */\r
+       CL_ASSERT( idx <= 31 );\r
+\r
+       cl_memcpy( &p_cache->guid_block[idx].tbl,\r
+               ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               sizeof(ib_guid_info_t) );\r
+       p_cache->guid_block[idx].valid = TRUE;\r
+}\r
+\r
+\r
+void\r
+mlnx_update_pkey_table(\r
+       IN                              mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t* const                         p_mad_out )\r
+{\r
+       uint16_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = ((uint16_t)cl_ntoh32( p_mad_out->attr_mod ));\r
+\r
+       ASSERT( idx <= 2047 );\r
+\r
+       cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
+               ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               sizeof(ib_pkey_table_info_t) );\r
+       p_cache->pkey_tbl[idx].valid = TRUE;\r
+}\r
+\r
+\r
+void\r
+mlnx_update_sl_vl_table(\r
+       IN                              mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t* const                         p_mad_out )\r
+{\r
+       cl_memcpy( &p_cache->sl_vl.tbl,\r
+               ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               sizeof(ib_slvl_table_t) );\r
+       p_cache->sl_vl.valid = TRUE;\r
+}\r
+\r
+\r
+void\r
+mlnx_update_vl_arb_table(\r
+       IN                              mlnx_cache_t* const                     p_cache,\r
+       IN              const   ib_mad_t* const                         p_mad_out )\r
+{\r
+       uint16_t                        idx;\r
+\r
+       /* Get the table selector from the attribute */\r
+       idx = ((uint16_t)(cl_ntoh32( p_mad_out->attr_mod ) >> 16)) - 1;\r
+\r
+       CL_ASSERT( idx <= 3 );\r
+\r
+       cl_memcpy( &p_cache->vl_arb[idx].tbl,\r
+               ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
+               sizeof(ib_vl_arb_table_t) );\r
+       p_cache->vl_arb[idx].valid = TRUE;\r
+}\r
+\r
+\r
+void\r
+mlnx_update_port_info(\r
+       IN              const   mlnx_cache_t* const                     p_cache,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t* const                         p_mad_out )\r
+{\r
+       UNUSED_PARAM( p_cache );\r
+\r
+       /* We can only cache requests for the same port that the SMP came in on. */\r
+       /* TODO: Add synchronization to support getting data from other ports. */\r
+       if( p_mad_out->attr_mod != 0 &&\r
+               cl_ntoh32( p_mad_out->attr_mod ) != port_num )\r
+       {\r
+               return;\r
+       }\r
+\r
+       /* TODO: Setup the capabilites mask properly. */\r
+}\r
+\r
+\r
+void\r
+mlnx_update_cache(\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t                                        *p_mad_out )\r
+{\r
+       if( p_mad_out->method != IB_MCLASS_SUBN_DIR &&\r
+               p_mad_out->method != IB_MCLASS_SUBN_LID )\r
+       {\r
+               return;\r
+       }\r
+\r
+       /* Any successful response updates the cache. */\r
+       if( p_mad_out->status )\r
+               return;\r
+\r
+\r
+       switch( p_mad_out->attr_id )\r
+       {\r
+       case IB_MAD_ATTR_GUID_INFO:\r
+               mlnx_update_guid_info(\r
+                       &h_ca->cache[port_num-1], p_mad_out );\r
+               break;\r
+\r
+       case IB_MAD_ATTR_P_KEY_TABLE:\r
+               mlnx_update_pkey_table(\r
+                       &h_ca->cache[port_num-1], p_mad_out );\r
+               break;\r
+\r
+       case IB_MAD_ATTR_SLVL_TABLE:\r
+               mlnx_update_sl_vl_table(\r
+                       &h_ca->cache[port_num-1], p_mad_out );\r
+               break;\r
+\r
+       case IB_MAD_ATTR_VL_ARBITRATION:\r
+               mlnx_update_vl_arb_table(\r
+                       &h_ca->cache[port_num-1], p_mad_out );\r
+               break;\r
+\r
+       case IB_MAD_ATTR_PORT_INFO:\r
+               mlnx_update_port_info(\r
+                       &h_ca->cache[port_num-1], port_num, p_mad_out );\r
+               break;\r
+\r
+       default:\r
+               break;\r
+       }\r
+\r
+}\r
+\r
+\r
+/*\r
+ * Local MAD Support Verbs. For CAs that do not support\r
+ * agents in HW.\r
+ */\r
+\r
+#ifdef WIN_TO_BE_REMOVED\r
+//TODO: seems like non need in that\r
+static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, \r
+       u8 port_num, struct _ib_wc *wc)\r
+{\r
+       RtlZeroMemory(wc, sizeof *wc);\r
+       wc->wr_id = wr_id;\r
+       wc->status = IB_WC_SUCCESS;\r
+       wc->wc_type = IB_WC_RECV;\r
+       wc->length = sizeof(struct ib_mad) + sizeof(struct ib_grh);\r
+       wc->qp_num = IB_QP0;\r
+       wc->port_num = port_num;\r
+       wc->recv.ud.pkey_index = pkey_index;\r
+       wc->recv.ud.remote_qp = IB_QP0;\r
+       wc->recv.ud.remote_lid = slid;\r
+       wc->recv.ud.remote_sl = 0;\r
+       wc->recv.ud.path_bits = 0;\r
+}\r
+#endif\r
+\r
+ib_api_status_t\r
+mlnx_local_mad (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_mad_t                                        *p_mad_in,\r
+               OUT                     ib_mad_t                                        *p_mad_out )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       struct ib_device *ib_dev_p = IBDEV_FROM_HOB( hob_p );\r
+       //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?\r
+       int mad_flags = 0;  \r
+       struct _ib_wc *wc_p;\r
+       //TODO: do we need use grh ?\r
+       struct ib_grh *grh_p = NULL;\r
+\r
+       HCA_ENTER(HCA_DBG_MAD);\r
+\r
+       // sanity checks\r
+       if (port_num > 2) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_port_num;\r
+       }\r
+\r
+#ifdef WIN_TO_BE_REMOVED\r
+       //TODO: seems like non need in that\r
+       //TODO: don't know: wr_id, pkey_index !!!\r
+       if (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR) {\r
+               ib_smp_t *smp = (ib_smp_t *)p_mad_in;\r
+               u64 wr_id = 0;\r
+               u16 pkey_index = 0;\r
+               build_smp_wc(wr_id, cl_ntoh16(smp->dr_slid), pkey_index, port_num,  &wc);\r
+               wc_p = &wc;\r
+       }\r
+#else\r
+       wc_p = NULL;\r
+#endif\r
+       \r
+       // debug print\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_MAD, \r
+                       ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
+                       (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
+                       (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
+                       (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
+                       (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
+                       (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
+       }\r
+       \r
+       // process mad\r
+       if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) )\r
+       {\r
+               err = mthca_process_mad(ib_dev_p, mad_flags, (uint8_t)port_num, \r
+                       wc_p, grh_p, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);\r
+               if (!err) {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, \r
+                               ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",\r
+                               p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));\r
+                       status = IB_ERROR;\r
+                       goto err_process_mad;\r
+               }\r
+               mlnx_update_cache( h_ca, port_num, p_mad_out );\r
+       }\r
+\r
+       /* Modify direction for Direct MAD */\r
+       if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status |= IB_SMP_DIRECTION;\r
+\r
+err_process_mad:\r
+err_port_num:  \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_MAD  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+       \r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       uint32_t                        hca_idx;\r
+       mlnx_hobul_t            *hobul_p;\r
+       HH_hca_dev_t            *hca_ul_info;\r
+\r
+       HCA_ENTER(HCA_DBG_MAD);\r
+\r
+       if (port_num > 2) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+\r
+       if (!hob_p || E_MARK_CA != hob_p->mark) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_idx = hob_p->index;\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
+       if (NULL == hca_ul_info) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) )\r
+       {\r
+               if( HH_OK != THH_hob_process_local_mad( hobul_p->hh_hndl, port_num,\r
+                       0x0, 0, (void *)p_mad_in, p_mad_out ) )\r
+               {\r
+                       HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, \r
+                               ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",\r
+                               p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+\r
+               mlnx_update_cache( h_ca, port_num, p_mad_out );\r
+       }\r
+\r
+       /* Modify direction for Direct MAD */\r
+       if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+               p_mad_out->status |= IB_SMP_DIRECTION;\r
+\r
+       HCA_EXIT(HCA_DBG_MAD);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_MAD  ,("status %d\n", status)));\r
+       HCA_EXIT(HCA_DBG_MAD);\r
+       return status;\r
+#endif \r
+}\r
diff --git a/trunk/hw/mthca/kernel/hca_verbs.c b/trunk/hw/mthca/kernel/hca_verbs.c
new file mode 100644 (file)
index 0000000..0a6100a
--- /dev/null
@@ -0,0 +1,3236 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_verbs.c 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_verbs.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+#include "ib_cache.h"\r
+#include "mx_abi.h"\r
+\r
+#define PTR_ALIGN(size)        (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
+\r
+\r
+/* Matches definition in IbAccess for MaxSMPsWatermark */\r
+uint32_t       g_sqp_max_avs = ((4096/sizeof(ib_mad_t))*32*5);\r
+\r
+\r
+// Local declarations\r
+ib_api_status_t\r
+mlnx_query_qp (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
+\r
+/* \r
+* CA Access Verbs\r
+*/\r
+ib_api_status_t\r
+mlnx_open_ca (\r
+       IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
+       IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
+       IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
+       IN              const   void*const                                      ca_context,\r
+               OUT                     ib_ca_handle_t                          *ph_ca)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       mlnx_hca_t                              *p_hca;\r
+       ib_api_status_t status;\r
+       mlnx_cache_t    *p_cache;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM  ,("context 0x%p\n", ca_context));\r
+\r
+       // find CA object\r
+       p_hca = mlnx_hca_from_guid( ca_guid );\r
+       if( !p_hca ) {\r
+               status = IB_NOT_FOUND;\r
+               goto err_hca_from_guid;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM  ,("context 0x%p\n", ca_context));\r
+       status = mlnx_hobs_set_cb(&p_hca->hob,\r
+               pfn_completion_cb,\r
+               pfn_async_event_cb,\r
+               ca_context);\r
+       if (IB_SUCCESS != status) {\r
+               goto err_set_cb;\r
+       }\r
+\r
+       // MAD cache\r
+       p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
+       if( !p_cache ) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_mad_cache;\r
+       }\r
+       p_hca->hob.cache = p_cache;\r
+\r
+       \r
+       //TODO: do we need something for kernel users ?\r
+\r
+       // Return pointer to HOB object\r
+       if (ph_ca) *ph_ca = &p_hca->hob;\r
+       status =  IB_SUCCESS;\r
+\r
+err_mad_cache:\r
+err_set_cb:\r
+err_hca_from_guid:     \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       mlnx_hca_t                              *p_hca;\r
+       HH_hca_dev_t *                  hca_ul_info;\r
+       void *                                  hca_ul_resources_p = NULL; // (THH_hca_ul_resources_t *)\r
+       ib_api_status_t                 status;\r
+       mlnx_hob_t                              *new_ca = NULL;\r
+       MOSAL_protection_ctx_t  prot_ctx;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"context 0x%p\n", ca_context)\r
+\r
+       // find CA object\r
+       p_hca = mlnx_hca_from_guid( ca_guid );\r
+       if( !p_hca ) {\r
+               HCA_EXIT( TRACE_LEVEL_VERBOSE );\r
+               return IB_NOT_FOUND;\r
+       }\r
+\r
+       hca_ul_info = p_hca->hh_hndl;\r
+\r
+       {\r
+               // We are opening the HCA in kernel mode.\r
+               // if a HOBKL exists for this device (i.e. it is open) - return E_BUSY\r
+               if (IB_SUCCESS == mlnx_hobs_lookup(p_hca->hh_hndl, &new_ca)) {\r
+                       if (ph_ca) *ph_ca = (ib_ca_handle_t)new_ca;\r
+                       HCA_EXIT( HCA_DBG_SHIM);\r
+                       return IB_RESOURCE_BUSY;\r
+               }\r
+\r
+               // Create a mapping from hca index to hh_hndl\r
+               status = mlnx_hobs_insert(p_hca, &new_ca);\r
+               if (IB_SUCCESS != status) {\r
+                       HCA_EXIT( HCA_DBG_SHIM);\r
+                       return status;\r
+               }\r
+\r
+               /* save copy of HCA device object */\r
+               new_ca->p_dev_obj = p_hca->p_dev_obj;\r
+\r
+               // Initialize the device driver\r
+               if (HH_OK != THH_hob_open_hca(p_hca->hh_hndl, NULL, NULL)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+               \r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"context 0x%p\n", ca_context)\r
+               status = mlnx_hobs_set_cb(new_ca,\r
+                       pfn_completion_cb,\r
+                       pfn_async_event_cb,\r
+                       ca_context);\r
+               if (IB_SUCCESS != status) {\r
+                       goto cleanup;\r
+               }\r
+\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,("ul_resource sizes: hca %d pd %d\n",\r
+                       hca_ul_info->hca_ul_resources_sz,\r
+                       hca_ul_info->pd_ul_resources_sz));\r
+\r
+               hca_ul_resources_p = cl_zalloc( hca_ul_info->hca_ul_resources_sz);\r
+\r
+               /* get the kernel protection context */ \r
+               prot_ctx = MOSAL_get_kernel_prot_ctx();\r
+       }\r
+\r
+       if (!hca_ul_resources_p) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto cleanup;\r
+       }\r
+\r
+       if (HH_OK != THH_hob_alloc_ul_res(p_hca->hh_hndl, prot_ctx, hca_ul_resources_p)) {\r
+               status = IB_ERROR;\r
+               goto cleanup;\r
+       }\r
+\r
+       // TBD: !!! in user mode (kernel hobul_idx != hob_idx)\r
+       status = mlnx_hobul_new(new_ca, p_hca->hh_hndl, hca_ul_resources_p);\r
+       if (IB_SUCCESS != status) {\r
+               goto cleanup;\r
+       }\r
+\r
+       // Return the HOBUL index\r
+       if (ph_ca) *ph_ca = new_ca;\r
+\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       if (hca_ul_resources_p)\r
+               cl_free( hca_ul_resources_p);\r
+       THH_hob_close_hca(p_hca->hh_hndl);\r
+       mlnx_hobs_remove(new_ca);\r
+\r
+       // For user mode call - return status to user mode\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d \n", status)));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_ca (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+               OUT                     ib_ca_attr_t                            *p_ca_attr,\r
+       IN      OUT                     uint32_t                                        *p_byte_count,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       uint32_t                        size, required_size;\r
+       uint8_t                 port_num, num_ports;\r
+       uint32_t                        num_gids, num_pkeys;\r
+       uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
+       uint8_t                         *last_p;\r
+       struct ib_device_attr props;\r
+       struct ib_port_attr  *hca_ports = NULL;\r
+       int i;\r
+       \r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
+       int err;\r
+       \r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
+                       p_umv_buf->status = status = IB_UNSUPPORTED;\r
+                       goto err_user_unsupported;\r
+       }\r
+       if (NULL == p_byte_count) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_byte_count;\r
+       }\r
+\r
+       // query the device\r
+       err = mthca_query_device(ib_dev, &props );\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                       ("ib_query_device failed (%d)\n",err));\r
+               status = errno_to_iberr(err);\r
+               goto err_query_device;\r
+       }\r
+       \r
+       // alocate arrary for port properties\r
+       num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
+       if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_ports;\r
+       }\r
+\r
+       // start calculation of ib_ca_attr_t full size\r
+       num_gids = 0;\r
+       num_pkeys = 0;\r
+       required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
+               PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
+               PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports);\r
+\r
+       // get port properties\r
+       for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
+               // request\r
+               err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
+               if (err) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_query_port;\r
+               }\r
+\r
+               // calculate GID table size\r
+               num_gids  = hca_ports[port_num].gid_tbl_len;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
+               required_size += size;\r
+\r
+               // calculate pkeys table size\r
+               num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
+               required_size += size;\r
+       }\r
+\r
+       // resource sufficience check\r
+       if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
+               *p_byte_count = required_size;\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               if ( p_ca_attr != NULL) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+                               ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
+               }\r
+               goto err_insuff_mem;\r
+       }\r
+\r
+       // Space is sufficient - setup table pointers\r
+       last_p = (uint8_t*)p_ca_attr;\r
+       last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
+\r
+       p_ca_attr->p_page_size = (uint32_t*)last_p;\r
+       last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
+\r
+       p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
+       last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
+\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+               p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
+               last_p += size;\r
+\r
+               p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
+               last_p += size;\r
+       }\r
+\r
+       // Separate the loops to ensure that table pointers are always setup\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+\r
+               // get pkeys, using cache\r
+               for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
+                       err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
+                               &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
+                       if (err) {\r
+                               status = errno_to_iberr(err);\r
+                               HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+                                       ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
+                                       err, port_num + start_port(ib_dev), i));\r
+                               goto err_get_pkey;\r
+                       }\r
+               }\r
+               \r
+               // get gids, using cache\r
+               for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
+                       union ib_gid * __ptr64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
+                       err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
+                       //TODO: do we need to convert gids to little endian\r
+                       if (err) {\r
+                               status = errno_to_iberr(err);\r
+                               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                                       ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
+                                       err, port_num + start_port(ib_dev), i));\r
+                               goto err_get_gid;\r
+                       }\r
+               }\r
+\r
+#if 0\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"port %d gid0:", port_num)\r
+               for (i = 0; i < 16; i++)\r
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM," 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i])\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"\n")\r
+#endif\r
+       }\r
+\r
+       // set result size\r
+       p_ca_attr->size = required_size;\r
+       CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
+#if 0\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,HCA_DBG_SHIM ,("Space required %d used %d\n",\r
+                       required_size, ((uintn_t)last_p) - ((uintn_t)p_ca_attr))));\r
+#endif\r
+       \r
+       // !!! GID/PKEY tables must be queried before this call !!!\r
+       mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_get_gid:\r
+err_get_pkey:\r
+err_insuff_mem:\r
+err_query_port:\r
+       cl_free(hca_ports);\r
+err_alloc_ports:\r
+err_query_device:\r
+err_byte_count:        \r
+err_user_unsupported:\r
+       if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
+               HCA_PRINT(TRACE_LEVEL_ERROR     , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+\r
+\r
+\r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       HH_hca_hndl_t           hh_hndl = NULL;\r
+       HH_hca_dev_t            *hca_ul_info;\r
+       VAPI_hca_cap_t          hca_cap;\r
+       VAPI_hca_port_t         *hca_ports = NULL;\r
+       uint32_t                        size, required_size;\r
+       uint8_t                 port_num, num_ports;\r
+       uint32_t                        num_gids, num_pkeys;\r
+       uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
+       uint8_t                         *last_p;\r
+       void                            *hca_ul_resources_p = NULL;\r
+       uint32_t                        priv_op;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       if (NULL == p_byte_count) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+\r
+       mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
+       if (NULL == hh_hndl) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  , HCA_DBG_SHIM  ,("returning E_NODEV dev\n")));\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
+\r
+       if (HH_OK != THH_hob_query(hh_hndl, &hca_cap)) {\r
+               status = IB_ERROR;\r
+               goto cleanup;\r
+       }\r
+\r
+       num_ports = hca_cap.phys_port_num;   /* Number of physical ports of the HCA */             \r
+\r
+       if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof(VAPI_hca_port_t)))) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                       ("Failed to cl_zalloc ports array\n"));\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto cleanup;\r
+       }\r
+\r
+       // Loop on ports and get their properties\r
+       num_gids = 0;\r
+       num_pkeys = 0;\r
+       required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
+               PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
+               PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports);\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+               if (HH_OK != THH_hob_query_port_prop(hh_hndl, port_num+1, &hca_ports[port_num])) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+\r
+               num_gids  = hca_ports[port_num].gid_tbl_len;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
+               required_size += size;\r
+\r
+               num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
+               required_size += size;\r
+       }\r
+\r
+       if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
+               *p_byte_count = required_size;\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               if ( p_ca_attr != NULL) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                               ("Failed *p_byte_count < required_size\n"));\r
+               }\r
+               goto cleanup;\r
+       }\r
+\r
+       // Space is sufficient - setup table pointers\r
+       last_p = (uint8_t*)p_ca_attr;\r
+       last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
+\r
+       p_ca_attr->p_page_size = (uint32_t*)last_p;\r
+       last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
+\r
+       p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
+       last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
+\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+               p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
+               last_p += size;\r
+\r
+               p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
+               last_p += size;\r
+       }\r
+\r
+       // Separate the loops to ensure that table pointers are always setup\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+               status = mlnx_get_hca_pkey_tbl(hh_hndl, port_num+1,\r
+                       hca_ports[port_num].pkey_tbl_len,\r
+                       p_ca_attr->p_port_attr[port_num].p_pkey_table);\r
+               if (IB_SUCCESS != status) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                               ("Failed to mlnx_get_hca_pkey_tbl for port_num:%d\n",port_num));\r
+                       goto cleanup;\r
+               }\r
+\r
+               status = mlnx_get_hca_gid_tbl(hh_hndl, port_num+1,\r
+                       hca_ports[port_num].gid_tbl_len,\r
+                       &p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw);\r
+               if (IB_SUCCESS != status) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                               ("Failed to mlnx_get_hca_gid_tbl for port_num:%d\n",port_num));\r
+                       goto cleanup;\r
+               }\r
+\r
+#if 0\r
+               {\r
+                       int i;\r
+\r
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("port %d gid0:", port_num));\r
+                       for (i = 0; i < 16; i++)\r
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,(" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i]));\r
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("\n"));\r
+               }\r
+#endif\r
+       }\r
+\r
+       // Convert query result into IBAL structure (no cl_memset())\r
+       p_ca_attr->size = required_size;\r
+\r
+       // !!! GID/PKEY tables must be queried before this call !!!\r
+       mlnx_conv_vapi_hca_cap(hca_ul_info, &hca_cap, hca_ports, p_ca_attr);\r
+\r
+       // verify: required space == used space\r
+       CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
+\r
+#if 0\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,HCA_DBG_SHIM ,("Space required %d used %d\n",\r
+               required_size,\r
+               ((uintn_t)last_p) - ((uintn_t)p_ca_attr))));\r
+#endif\r
+\r
+       if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
+       if (hca_ports) cl_free( hca_ports );\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
+       if (hca_ports) cl_free( hca_ports);\r
+       if( p_ca_attr != NULL || status != IB_INSUFFICIENT_MEMORY )\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n", status)));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_modify_ca (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_ca_mod_t                                     modca_cmd,\r
+       IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+#define SET_CAP_MOD(al_mask, al_fld, ib)               \\r
+               if (modca_cmd & al_mask) {      \\r
+                       if (p_port_attr->cap.##al_fld)          \\r
+                               props.set_port_cap_mask |= ib;  \\r
+                       else            \\r
+                               props.clr_port_cap_mask |= ib;  \\r
+               }\r
+\r
+       ib_api_status_t status;\r
+       int err;\r
+       struct ib_port_modify props;\r
+       int port_modify_mask = 0;\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // prepare parameters\r
+       RtlZeroMemory(&props, sizeof(props));\r
+       SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
+       if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
+               port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
+       \r
+       // modify port\r
+       err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
+       if (err) {\r
+               status = errno_to_iberr(err);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
+               goto err_modify_port;\r
+       }\r
+       \r
+       status =        IB_SUCCESS;\r
+\r
+err_modify_port:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       ib_api_status_t                 status;\r
+\r
+       mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
+       HH_hca_hndl_t                   hh_hndl = NULL;\r
+\r
+       VAPI_hca_attr_t                 hca_attr;\r
+       VAPI_hca_attr_mask_t    hca_attr_mask = 0;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
+       if (NULL == hh_hndl) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_memclr(&hca_attr, sizeof(hca_attr));\r
+       if (modca_cmd & IB_CA_MOD_IS_SM) {\r
+               hca_attr_mask |= HCA_ATTR_IS_SM;\r
+               hca_attr.is_sm = (MT_bool)p_port_attr->cap.sm;\r
+       }\r
+       if (modca_cmd & IB_CA_MOD_IS_SNMP_SUPPORTED) {\r
+               hca_attr_mask |= HCA_ATTR_IS_SNMP_TUN_SUP;\r
+               hca_attr.is_snmp_tun_sup = (MT_bool)p_port_attr->cap.snmp;\r
+       }\r
+       if (modca_cmd & IB_CA_MOD_IS_DEV_MGMT_SUPPORTED) {\r
+               hca_attr_mask |= HCA_ATTR_IS_DEV_MGT_SUP;\r
+               hca_attr.is_dev_mgt_sup = (MT_bool)p_port_attr->cap.dev_mgmt;\r
+       }\r
+       if (modca_cmd & IB_CA_MOD_IS_VEND_SUPPORTED) {\r
+               hca_attr_mask |= HCA_ATTR_IS_VENDOR_CLS_SUP;\r
+               hca_attr.is_vendor_cls_sup = (MT_bool)p_port_attr->cap.vend;\r
+       }\r
+       if (modca_cmd & IB_CA_MOD_QKEY_CTR) {\r
+               if (p_port_attr->qkey_ctr == 0)\r
+                       hca_attr.reset_qkey_counter = TRUE;\r
+       }\r
+\r
+       if (0 != hca_attr_mask) {\r
+               if (HH_OK != THH_hob_modify( hh_hndl, port_num, &hca_attr, &hca_attr_mask))\r
+               {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+       }\r
+\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n",status)));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_close_ca (\r
+       IN                              ib_ca_handle_t                          h_ca)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // release HOB resources\r
+       mlnx_hobs_remove(h_ca);\r
+\r
+       //TODO: release HOBUL resources\r
+\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       \r
+       return IB_SUCCESS;\r
+\r
+#else\r
+       ib_api_status_t status;\r
+\r
+       HH_hca_hndl_t   hh_hndl = NULL;\r
+       mlnx_hob_t              *hob_p   = (mlnx_hob_t *)h_ca;\r
+       HH_hca_dev_t    *hca_ul_info;\r
+       void                    *hca_ul_resources_p = NULL;\r
+       mlnx_hobul_t    *hobul_p;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       hobul_p = mlnx_hobul_array[hob_p->index];\r
+       if( !hobul_p ) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       if( hobul_p->count ) {\r
+               status = IB_RESOURCE_BUSY;\r
+               goto cleanup;\r
+       }\r
+\r
+       mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
+       if (NULL == hh_hndl) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
+       mlnx_hobul_get(hob_p, &hca_ul_resources_p);\r
+\r
+       if (hca_ul_resources_p) {\r
+               THH_hob_free_ul_res(hh_hndl, hca_ul_resources_p);\r
+               cl_free( hca_ul_resources_p);\r
+       }\r
+       mlnx_hobul_delete(hob_p);\r
+       THH_hob_close_hca(hh_hndl);\r
+       mlnx_hobs_remove(hob_p);\r
+\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n",status)));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+mlnx_um_open(\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
+               OUT                     ib_ca_handle_t* const           ph_um_ca )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       int err;\r
+       ib_api_status_t         status;\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
+       struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
+       struct ib_ucontext *context_p;\r
+       struct mthca_alloc_ucontext_resp *uresp_p;\r
+       struct ibv_alloc_pd_resp resp;\r
+       ci_umv_buf_t umv_buf;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity check\r
+       if( !p_umv_buf || !p_umv_buf->command || !p_umv_buf->p_inout_buf ||\r
+               (p_umv_buf->output_size < sizeof *uresp_p) ) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto mlnx_um_open_err;\r
+       }\r
+\r
+       // create user context in kernel\r
+       context_p = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
+       if (IS_ERR(context_p)) {\r
+               err = PTR_ERR(context_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("mthca_alloc_ucontext failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_ucontext;\r
+       }\r
+\r
+       /* allocate pd */\r
+       umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
+       umv_buf.p_inout_buf = &resp;\r
+       //NB: Pay attention ! Ucontext parameter is important here:\r
+       // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
+       context_p->pd = ibv_alloc_pd(ib_dev, context_p, &umv_buf);\r
+       if (IS_ERR(context_p->pd)) {\r
+               err = PTR_ERR(context_p->pd);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ibv_alloc_pd failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_pd;\r
+       }\r
+       \r
+       // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
+       uresp_p = (struct mthca_alloc_ucontext_resp *)(void*)p_umv_buf->p_inout_buf;\r
+       uresp_p->uar_addr = (uint64_t)(UINT_PTR)context_p->user_uar;\r
+       uresp_p->pd_handle = resp.pd_handle;\r
+       uresp_p->pdn = resp.pdn;\r
+       uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
+       uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
+       \r
+       // return the result\r
+       if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)context_p;\r
+\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+       \r
+err_alloc_pd:\r
+       mthca_dealloc_ucontext(context_p);\r
+err_alloc_ucontext: \r
+mlnx_um_open_err:      \r
+end:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+       \r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
+       HH_hca_hndl_t                   hh_hndl = NULL;\r
+       HH_hca_dev_t                    *hca_ul_info;\r
+       mlnx_um_ca_t                    *p_um_ca;\r
+       MOSAL_protection_ctx_t  prot_ctx;\r
+\r
+       HCA_ENTER( TRACE_LEVEL_VERBOSE );\r
+\r
+       mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
+       if( !hh_hndl )\r
+       {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM  ,("returning E_NODEV dev\n"));\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto mlnx_um_open_err1;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
+\r
+       if( !p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = IB_SUCCESS;\r
+               goto mlnx_um_open_err1;\r
+       }\r
+\r
+       /*\r
+        * Prepare the buffer with the size including hca_ul_resources_sz\r
+        * NO ALIGNMENT for this size \r
+        */\r
+       if( !p_umv_buf->p_inout_buf ||\r
+               p_umv_buf->output_size < sizeof(void*) )\r
+       {\r
+               p_umv_buf->status = IB_INVALID_PARAMETER;\r
+               goto mlnx_um_open_err1;\r
+       }\r
+\r
+       HCA_PRINT( TRACE_LEVEL_VERBOSE  ,HCA_DBG_SHIM  ,("priv_op = %d\n", p_umv_buf->command ));\r
+\r
+       /* Yes, UVP request for hca_ul_info. */\r
+       p_um_ca = (mlnx_um_ca_t*)cl_zalloc(\r
+               sizeof(mlnx_um_ca_t) + hca_ul_info->hca_ul_resources_sz - 1 );\r
+       if( !p_um_ca )\r
+       {\r
+               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
+               goto mlnx_um_open_err1;\r
+       }\r
+\r
+       p_um_ca->p_mdl = IoAllocateMdl( &p_um_ca->dev_info,\r
+               (ULONG)(sizeof(HH_hca_dev_t) + hca_ul_info->hca_ul_resources_sz),\r
+               FALSE, TRUE, NULL );\r
+       if( !p_um_ca->p_mdl )\r
+       {\r
+               p_umv_buf->status = IB_ERROR;\r
+               goto mlnx_um_open_err2;\r
+       }\r
+       /* Build the page list... */\r
+       MmBuildMdlForNonPagedPool( p_um_ca->p_mdl );\r
+\r
+       /* Map the memory into the calling process's address space. */\r
+       __try\r
+       {\r
+               p_um_ca->p_mapped_addr =\r
+                       MmMapLockedPagesSpecifyCache( p_um_ca->p_mdl,\r
+                       UserMode, MmCached, NULL, FALSE, NormalPagePriority );\r
+       }\r
+       __except(EXCEPTION_EXECUTE_HANDLER)\r
+       {\r
+               p_umv_buf->status = IB_ERROR;\r
+               goto mlnx_um_open_err3;\r
+       }\r
+\r
+       /* Register with THH (attach to the HCA). */\r
+       prot_ctx = MOSAL_get_current_prot_ctx();\r
+       if( THH_hob_alloc_ul_res(hh_hndl, prot_ctx, p_um_ca->ul_hca_res) != HH_OK )\r
+       {\r
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Failed to get ul_res\n"));\r
+               p_umv_buf->status = IB_ERROR;\r
+       }\r
+\r
+       if( p_umv_buf->status == IB_SUCCESS )\r
+       {\r
+               /* Copy the dev info. */\r
+               p_um_ca->dev_info = *hca_ul_info;\r
+               *ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
+               (*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       else\r
+       {\r
+               MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
+mlnx_um_open_err3:\r
+               IoFreeMdl( p_um_ca->p_mdl );\r
+mlnx_um_open_err2:\r
+               cl_free( p_um_ca );\r
+mlnx_um_open_err1:\r
+               *ph_um_ca = NULL;\r
+       }\r
+\r
+       //*ph_um_ca = NULL;\r
+       p_umv_buf->output_size = sizeof(void*);\r
+       HCA_EXIT( TRACE_LEVEL_VERBOSE );\r
+       return p_umv_buf->status;\r
+#endif \r
+}\r
+\r
+static void\r
+mlnx_um_close(\r
+       IN                              ib_ca_handle_t                          h_ca,\r
+       IN                              ib_ca_handle_t                          h_um_ca )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       UNREFERENCED_PARAMETER(h_ca);\r
+       ibv_um_close((struct ib_ucontext *)h_um_ca);\r
+       return;\r
+       \r
+#else\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+       HH_hca_hndl_t           hh_hndl = NULL;\r
+       mlnx_um_ca_t            *p_um_ca = (mlnx_um_ca_t*)h_um_ca;\r
+\r
+       HCA_ENTER( TRACE_LEVEL_VERBOSE );\r
+\r
+       mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
+       if( !hh_hndl )\r
+               goto mlnx_um_close_cleanup;\r
+\r
+       if( !p_um_ca )\r
+               return;\r
+\r
+       THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res );\r
+\r
+mlnx_um_close_cleanup:\r
+       MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
+       IoFreeMdl( p_um_ca->p_mdl );\r
+       cl_free( p_um_ca );\r
+\r
+       HCA_EXIT( TRACE_LEVEL_VERBOSE );\r
+#endif \r
+}\r
+\r
+\r
+/*\r
+*    Protection Domain and Reliable Datagram Domain Verbs\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_allocate_pd (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   ib_pd_type_t                            type,\r
+               OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       struct ib_device *ib_dev;\r
+       struct ib_ucontext *context_p;\r
+       struct ib_pd *ib_pd_p;\r
+       int err;\r
+\r
+       //TODO: how are we use it ?\r
+       UNREFERENCED_PARAMETER(type);\r
+       \r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               context_p = (struct ib_ucontext *)h_ca;\r
+               ib_dev = context_p->device;\r
+       }\r
+       else {\r
+               mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
+               context_p = NULL;\r
+               ib_dev = IBDEV_FROM_HOB( hob_p );\r
+       }\r
+       \r
+       // create PD\r
+       ib_pd_p = ibv_alloc_pd(ib_dev, context_p, p_umv_buf);\r
+       if (IS_ERR(ib_pd_p)) {\r
+               err = PTR_ERR(ib_pd_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ibv_alloc_pd failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_pd;\r
+       }\r
+\r
+       // return the result\r
+       if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_alloc_pd:  \r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+       \r
+#else\r
+       mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
+       mlnx_hobul_t                    *hobul_p;\r
+       HH_hca_dev_t                    *hca_ul_info;\r
+       HHUL_pd_hndl_t                  hhul_pd_hndl = 0;\r
+       void                                    *pd_ul_resources_p = NULL;\r
+       uint32_t                                pd_idx;\r
+       ib_api_status_t                 status;\r
+       MOSAL_protection_ctx_t  prot_ctx;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       hobul_p = mlnx_hobs_get_hobul(hob_p);\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
+       if (NULL == hca_ul_info) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               // For user mode calls - obtain and verify the vendor information\r
+               if ((p_umv_buf->input_size - sizeof (uint32_t))  != \r
+                       hca_ul_info->pd_ul_resources_sz ||\r
+                       NULL == p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto cleanup;\r
+                       }\r
+                       pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+\r
+                       /* get the current protection context */ \r
+                       prot_ctx = MOSAL_get_current_prot_ctx();\r
+       }\r
+       else\r
+       {\r
+               // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
+               pd_ul_resources_p = cl_zalloc( hca_ul_info->pd_ul_resources_sz);\r
+               if (NULL == pd_ul_resources_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup;\r
+               }\r
+\r
+               switch( type )\r
+               {\r
+               case IB_PDT_SQP:\r
+                       if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
+                               g_sqp_max_avs, PD_FOR_SQP, &hhul_pd_hndl, pd_ul_resources_p))\r
+                       {\r
+                               status = IB_ERROR;\r
+                               goto cleanup;\r
+                       }\r
+                       break;\r
+\r
+               case IB_PDT_UD:\r
+                       if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
+                               g_sqp_max_avs, PD_NO_FLAGS, &hhul_pd_hndl, pd_ul_resources_p))\r
+                       {\r
+                               status = IB_ERROR;\r
+                               goto cleanup;\r
+                       }\r
+                       break;\r
+\r
+               default:\r
+                       if (HH_OK != THHUL_pdm_alloc_pd_prep(hobul_p->hhul_hndl, &hhul_pd_hndl, pd_ul_resources_p)) {\r
+                               status = IB_ERROR;\r
+                               goto cleanup;\r
+                       }\r
+               }\r
+               /* get the current protection context */ \r
+               prot_ctx = MOSAL_get_kernel_prot_ctx();\r
+       }\r
+\r
+       // Allocate the PD (cmdif)\r
+       if (HH_OK != THH_hob_alloc_pd(hobul_p->hh_hndl, prot_ctx, pd_ul_resources_p, &pd_idx)) {\r
+               status = IB_INSUFFICIENT_RESOURCES;\r
+               goto cleanup_pd;\r
+       }\r
+\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+       {\r
+               // Manage user level resources\r
+               if (HH_OK != THHUL_pdm_alloc_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl, pd_idx, pd_ul_resources_p)) {\r
+                       THH_hob_free_pd(hobul_p->hh_hndl, pd_idx);\r
+                       status = IB_ERROR;\r
+                       goto cleanup_pd;\r
+               }\r
+       }\r
+\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_pd);\r
+\r
+       // Save data refs for future use\r
+       cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
+       hobul_p->pd_info_tbl[pd_idx].pd_num = pd_idx;\r
+       hobul_p->pd_info_tbl[pd_idx].hca_idx = hob_p->index;\r
+       hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl = hhul_pd_hndl;\r
+       hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = pd_ul_resources_p;\r
+       hobul_p->pd_info_tbl[pd_idx].count = 0;\r
+       hobul_p->pd_info_tbl[pd_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
+       hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_PD;\r
+       cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
+\r
+       cl_atomic_inc( &hobul_p->count );\r
+\r
+       if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  , HCA_DBG_SHIM  ,("hca_idx 0x%x pd_idx 0x%x returned 0x%p\n", hob_p->index, pd_idx, *ph_pd));\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = p_umv_buf->input_size;\r
+               /* \r
+               * Copy the pd_idx back to user\r
+               */\r
+               cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz),\r
+                       &pd_idx, sizeof (pd_idx));\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_pd:\r
+       THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE);\r
+       THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl);\r
+\r
+cleanup:\r
+       if( !(p_umv_buf && p_umv_buf->command) && pd_ul_resources_p )\r
+               cl_free( pd_ul_resources_p);\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM   ,("status %d \n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_deallocate_pd (\r
+       IN                              ib_pd_handle_t                          h_pd)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
+               ("pcs %p\n", PsGetCurrentProcess()));\r
+       \r
+       // dealloc pd\r
+       err = ibv_dealloc_pd( ib_pd_p );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_dealloc_pd failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_dealloc_pd;\r
+       }\r
+       status = IB_SUCCESS;\r
+\r
+err_dealloc_pd:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       uint32_t                        hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
+       uint32_t                        pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
+       mlnx_hobul_t            *hobul_p;\r
+       HHUL_pd_hndl_t          hhul_pd_hndl;\r
+       ib_api_status_t         status;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("pd %d count %d k_mod %d\n", pd_idx, \r
+               hobul_p->pd_info_tbl[pd_idx].count, hobul_p->pd_info_tbl[pd_idx].kernel_mode));\r
+\r
+       if (0 != hobul_p->pd_info_tbl[pd_idx].count) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("pd %d count %d\n", \r
+                       pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
+               status = IB_RESOURCE_BUSY;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
+\r
+       // PREP:\r
+       if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,("pd %d before free_pd hh_hndl %p\n", \r
+               pd_idx, hobul_p->hh_hndl);\r
+\r
+       if (HH_OK != THH_hob_free_pd(hobul_p->hh_hndl, pd_idx)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"pd %d after free_pd\n", pd_idx)\r
+\r
+       if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+               if (hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p)\r
+                       cl_free( hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p);\r
+       }\r
+\r
+       hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_INVALID;\r
+       hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = NULL;\r
+\r
+       cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
+\r
+       cl_atomic_dec( &hobul_p->count );\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
+\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+/* \r
+* Address Vector Management Verbs\r
+*/\r
+ib_api_status_t\r
+mlnx_create_av (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   ib_av_attr_t                            *p_addr_vector,\r
+               OUT                     ib_av_handle_t                          *ph_av,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       int err = 0;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+       struct ib_device *ib_dev_p = ib_pd_p->device;\r
+       struct ib_ah *ib_av_p;\r
+       struct ib_ah_attr ah_attr;\r
+       struct ib_ucontext *context_p = NULL;\r
+\r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               // sanity checks \r
+               if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
+                       p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
+               }\r
+               context_p = ib_pd_p->ucontext;\r
+       }\r
+       else \r
+               context_p = NULL;\r
+\r
+       // fill parameters \r
+       RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
+       mlnx_conv_ibal_av( ib_dev_p, p_addr_vector,  &ah_attr );\r
+\r
+       ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, context_p, p_umv_buf);\r
+       if (IS_ERR(ib_pd_p)) {\r
+               err = PTR_ERR(ib_pd_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_QP  ,("ibv_alloc_pd failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_av;\r
+       }\r
+\r
+       // return the result\r
+       if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
+\r
+       if( context_p )\r
+       {\r
+               struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;\r
+               cl_memcpy( &create_ah_resp->av_attr, p_addr_vector, sizeof(create_ah_resp->av_attr) );\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_alloc_av:  \r
+err_inval_params:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+       \r
+#else\r
+       uint32_t                        hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
+       uint32_t                        pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
+       HHUL_ud_av_hndl_t       av_h;\r
+       mlnx_hobul_t            *hobul_p;\r
+       mlnx_avo_t                      *avo_p = NULL;\r
+       HHUL_pd_hndl_t          hhul_pd_hndl;\r
+       ib_api_status_t         status;\r
+\r
+       VAPI_ud_av_t            av;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
+\r
+       if (NULL == (avo_p = cl_zalloc( sizeof(mlnx_avo_t)))) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_memclr(&av, sizeof(av));\r
+       mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
+       // This creates a non priviledged ud_av.\r
+       // To create a privilged ud_av call THH_hob_create_ud_av()\r
+       if (HH_OK != THHUL_pdm_create_ud_av(hobul_p->hhul_hndl, hhul_pd_hndl, &av, &av_h)) {\r
+               status = IB_INSUFFICIENT_RESOURCES;\r
+               goto cleanup;\r
+       }\r
+\r
+       // update PD object count\r
+       cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,"pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)\r
+\r
+\r
+       avo_p->mark    = E_MARK_AV;\r
+       avo_p->hca_idx = hca_idx;\r
+       avo_p->pd_idx  = pd_idx;\r
+       avo_p->h_av    = av_h;\r
+\r
+       if (ph_av) *ph_av = (ib_av_handle_t)avo_p;\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+\r
+cleanup:\r
+       if (avo_p) {\r
+               avo_p->mark = E_MARK_INVALID;\r
+               cl_free( avo_p);\r
+       }\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+end:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d \n", status));\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_av (\r
+       IN              const   ib_av_handle_t                          h_av,\r
+               OUT                     ib_av_attr_t                            *p_addr_vector,\r
+               OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+                                                                                                                                                               \r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("User mode is not supported yet\n"));\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_user_unsupported;\r
+       }\r
+\r
+       // query AV\r
+#if 0\r
+       //TODO: not implemented in low-level driver\r
+       err = ibv_query_ah(ib_ah_p, &ah_attr)\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ibv_query_ah failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_query_ah;\r
+       }\r
+       // convert to IBAL structure: something like that\r
+       mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
+#else\r
+\r
+       err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("mlnx_conv_mthca_av failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_conv_mthca_av;\r
+       }\r
+#endif\r
+\r
+       // results\r
+       *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
+       \r
+err_conv_mthca_av:\r
+err_user_unsupported:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
+       mlnx_hobul_t            *hobul_p;\r
+       ib_api_status_t         status;\r
+\r
+       VAPI_ud_av_t            av;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+       if (!avo_p || avo_p->mark != E_MARK_AV) {\r
+               status = IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status =  IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       if (p_addr_vector) {\r
+               if (HH_OK != THHUL_pdm_query_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+               mlnx_conv_vapi_av(hobul_p->hh_hndl, &av, p_addr_vector);\r
+       }\r
+\r
+       if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(avo_p->pd_idx);\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = 0;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d \n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_modify_av (\r
+       IN              const   ib_av_handle_t                          h_av,\r
+       IN              const   ib_av_attr_t                            *p_addr_vector,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       struct ib_ah_attr ah_attr;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
+       struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_SHIM  ,("User mode is not supported yet\n"));\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_user_unsupported;\r
+       }\r
+\r
+       // fill parameters \r
+       mlnx_conv_ibal_av( ib_dev_p, p_addr_vector,  &ah_attr );\r
+\r
+       // modify AH\r
+#if 0\r
+       //TODO: not implemented in low-level driver\r
+       err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("ibv_query_ah failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_query_ah;\r
+       }\r
+#else\r
+\r
+       mlnx_modify_ah( ib_ah_p, &ah_attr );\r
+#endif\r
+\r
+err_user_unsupported:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
+       mlnx_hobul_t            *hobul_p;\r
+       ib_api_status_t         status;\r
+\r
+       VAPI_ud_av_t            av;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+       if (!avo_p || avo_p->mark != E_MARK_AV) {\r
+               status = IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status =  IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_memclr(&av, sizeof(av));\r
+       mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
+       if (HH_OK != THHUL_pdm_modify_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
+               status = IB_ERROR;\r
+               goto cleanup;\r
+       }\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = 0;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n",status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_av (\r
+       IN              const   ib_av_handle_t                          h_av)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+                                                                                                                                                               \r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // destroy AV\r
+       err = ibv_destroy_ah( ib_ah_p );\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,("ibv_destroy_ah failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_destroy_ah;\r
+       }\r
+\r
+err_destroy_ah:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
+       mlnx_hobul_t            *hobul_p;\r
+       ib_api_status_t         status;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+       if (!avo_p || avo_p->mark != E_MARK_AV) {\r
+               status = IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status =  IB_INVALID_AV_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       // This destroy's a non priviledged ud_av.\r
+       // To destroy a privilged ud_av call THH_hob_destroy_ud_av()\r
+       if (HH_OK != THHUL_pdm_destroy_ud_av(hobul_p->hhul_hndl, avo_p->h_av)) {\r
+               status = IB_ERROR;\r
+               goto cleanup;\r
+       }\r
+\r
+       // update PD object count\r
+       cl_atomic_dec(&hobul_p->pd_info_tbl[avo_p->pd_idx].count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("pd %d count %d\n", avo_p->pd_idx, hobul_p->pd_info_tbl[avo_p->pd_idx].count));\r
+\r
+       avo_p->mark = E_MARK_INVALID;\r
+       cl_free( avo_p);\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup:\r
+       if (avo_p) {\r
+               avo_p->mark = E_MARK_INVALID;\r
+               cl_free( avo_p);\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+/*\r
+*      Queue Pair Management Verbs\r
+*/\r
+\r
+\r
+static ib_api_status_t\r
+_create_qp (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   void                                            *qp_context,\r
+       IN              const   ib_qp_create_t                          *p_create_attr,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr,\r
+               OUT                     ib_qp_handle_t                          *ph_qp,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+               int err;\r
+               ib_api_status_t         status;\r
+               struct ib_qp * ib_qp_p;\r
+               struct mthca_qp *qp_p;\r
+               struct ib_qp_init_attr qp_init_attr;\r
+               struct ib_ucontext *context_p = NULL;\r
+               struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+               struct ib_device *ib_dev = ib_pd_p->device;\r
+               mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
+               \r
+               HCA_ENTER(HCA_DBG_QP);\r
+\r
+       \r
+               if( p_umv_buf && p_umv_buf->command ) {\r
+                       // sanity checks \r
+                       if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
+                               p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
+                               !p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto err_inval_params;\r
+                       }\r
+                       context_p = ib_pd_p->ucontext;\r
+               }\r
+               else \r
+                       context_p = NULL;\r
+\r
+               // prepare the parameters\r
+               RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
+               qp_init_attr.qp_type = p_create_attr->qp_type;\r
+               qp_init_attr.event_handler = qp_event_handler;\r
+               qp_init_attr.qp_context = hob_p;\r
+               qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
+               qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+               qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
+               qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
+               qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
+               qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
+               qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
+               qp_init_attr.port_num = port_num;\r
+\r
+\r
+               // create qp            \r
+               ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, context_p, p_umv_buf );\r
+               if (IS_ERR(ib_qp_p)) {\r
+                       err = PTR_ERR(ib_qp_p);\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP  ,("ibv_create_qp failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_create_qp;\r
+               }\r
+       \r
+               // fill the object\r
+               qp_p = (struct mthca_qp *)ib_qp_p;\r
+               qp_p->qp_context = (void*)qp_context;\r
+               qp_p->qp_init_attr = qp_init_attr;\r
+       \r
+               // Query QP to obtain requested attributes\r
+               if (p_qp_attr) {\r
+                       status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
+                       if (status != IB_SUCCESS)\r
+                                       goto err_query_qp;\r
+               }\r
+               \r
+               // return the results\r
+               if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
+       \r
+               status = IB_SUCCESS;\r
+               goto end;\r
+       \r
+       err_query_qp:\r
+               ibv_destroy_qp( ib_qp_p );\r
+       err_create_qp:\r
+       err_inval_params:\r
+       end:\r
+               if (p_umv_buf && p_umv_buf->command) \r
+                       p_umv_buf->status = status;\r
+               HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+               return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_create_spl_qp (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   void                                            *qp_context,\r
+       IN              const   ib_qp_create_t                          *p_create_attr,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr,\r
+               OUT                     ib_qp_handle_t                          *ph_qp )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       status =        _create_qp( h_pd, port_num,\r
+               qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );\r
+               \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+                                                                                                                                               \r
+#else\r
+       ib_api_status_t                 status;\r
+       ib_qp_handle_t                  h_qp;\r
+       ci_umv_buf_t                    *p_umv_buf = NULL;\r
+\r
+       uint32_t                                hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
+       uint32_t                                pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
+       uint32_t                                qp_num;\r
+       uint32_t                                qp_idx;\r
+       uint32_t                                send_cq_num;\r
+       uint32_t                                send_cq_idx;\r
+       uint32_t                                recv_cq_num;\r
+       uint32_t                                recv_cq_idx;\r
+       mlnx_hobul_t                    *hobul_p;\r
+       HH_hca_dev_t                    *hca_ul_info;\r
+       HH_qp_init_attr_t               hh_qp_init_attr;\r
+       HHUL_qp_init_attr_t             ul_qp_init_attr;\r
+       HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
+       VAPI_special_qp_t               vapi_qp_type;\r
+       VAPI_qp_cap_t                   hh_qp_cap;\r
+       void                                    *qp_ul_resources_p = NULL;\r
+       VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
+       VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
+       uint32_t                                num_sge;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
+       if (NULL == hca_ul_info) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       // The create attributes must be provided\r
+       if (!p_create_attr) {\r
+               status =  IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+\r
+       // convert input parameters\r
+       cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
+       mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, &vapi_qp_type);\r
+       send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
+       recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
+       send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
+       recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
+       ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
+       ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               // For user mode calls - obtain and verify the vendor information\r
+               if (p_umv_buf->input_size != hca_ul_info->qp_ul_resources_sz ||\r
+                       NULL == p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto cleanup;\r
+                       }\r
+                       qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+\r
+       } else {\r
+               // For kernel mode calls - allocate app resources. Use prep->call->done sequence\r
+               qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
+               if (!qp_ul_resources_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup;\r
+               }\r
+\r
+               if (HH_OK != THHUL_qpm_special_qp_prep(hobul_p->hhul_hndl,\r
+                       vapi_qp_type,\r
+                       port_num, \r
+                       &ul_qp_init_attr,\r
+                       &hhul_qp_hndl,\r
+                       &hh_qp_cap,\r
+                       qp_ul_resources_p)) {\r
+                               status = IB_ERROR;\r
+                               goto cleanup;\r
+                       }\r
+                       // TBD: if not same report error to IBAL\r
+                       ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
+       }\r
+\r
+       // Convert HHUL to HH structure (for HH create_qp)\r
+       hh_qp_init_attr.pd = pd_idx;\r
+       hh_qp_init_attr.rdd = 0; // TBD: RDD\r
+       if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
+       {\r
+               // TBD: HH handle from HHUL handle.\r
+               CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
+       }\r
+       else\r
+       {\r
+               hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
+       }\r
+       hh_qp_init_attr.sq_cq = send_cq_num;\r
+       hh_qp_init_attr.rq_cq = recv_cq_num;\r
+       hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
+       hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
+       hh_qp_init_attr.ts_type = VAPI_TS_UD;\r
+       hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
+\r
+       // Allocate the QP (cmdif)\r
+       if (HH_OK != THH_hob_get_special_qp( hobul_p->hh_hndl,\r
+               vapi_qp_type,\r
+               port_num,\r
+               &hh_qp_init_attr,\r
+               qp_ul_resources_p,\r
+               &qp_num))\r
+       {\r
+               status = IB_ERROR;\r
+               goto cleanup_qp;\r
+       }\r
+\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+       {\r
+               // Manage user level resources\r
+               if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
+                       THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
+                       status = IB_ERROR;\r
+                       goto cleanup_qp;\r
+               }\r
+\r
+               // Create SQ and RQ iov\r
+               num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
+               send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
+               if (!send_sge_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup_qp;\r
+               }\r
+\r
+               num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
+               recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
+               if (!recv_sge_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup_qp;\r
+               }\r
+       }\r
+\r
+       // Save data refs for future use\r
+       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
+       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
+\r
+       h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
+       cl_mutex_acquire(&h_qp->mutex);\r
+       h_qp->pd_num                    = pd_idx;\r
+       h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
+       h_qp->qp_type                   = p_create_attr->qp_type;\r
+       h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
+       h_qp->qp_context                = qp_context;\r
+       h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
+       h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
+       h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
+       h_qp->send_sge_p                = send_sge_p;\r
+       h_qp->recv_sge_p                = recv_sge_p;\r
+       h_qp->qp_num                    = qp_num;\r
+       h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
+       h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
+       h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
+       h_qp->mark                              = E_MARK_QP;\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
+               qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
+       cl_mutex_release(&h_qp->mutex);\r
+\r
+       /* Mark the CQ's associated with this special QP as being high priority. */\r
+       cl_atomic_inc( &h_qp->h_sq_cq->spl_qp_cnt );\r
+       KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, HighImportance );\r
+       cl_atomic_inc( &h_qp->h_rq_cq->spl_qp_cnt );\r
+       KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, HighImportance );\r
+\r
+       // Update PD object count\r
+       cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  , HCA_DBG_SHIM  ,("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
+\r
+       // Query QP to obtain requested attributes\r
+       if (p_qp_attr) {\r
+               if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
+                       goto cleanup;\r
+               }\r
+       }\r
+\r
+       if (ph_qp) *ph_qp = h_qp;\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = p_umv_buf->input_size;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_qp:\r
+       if (send_sge_p) cl_free( send_sge_p);\r
+       if (recv_sge_p) cl_free( recv_sge_p);\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+               THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
+\r
+cleanup:\r
+       if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p )\r
+               cl_free( qp_ul_resources_p);\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("stauts %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_create_qp (\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN              const   void                                            *qp_context,\r
+       IN              const   ib_qp_create_t                          *p_create_attr,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr,\r
+               OUT                     ib_qp_handle_t                          *ph_qp,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       //NB: algorithm of mthca_alloc_sqp() requires port_num\r
+       // PRM states, that special pares are created in couples, so\r
+       // looks like we can put here port_num = 1 always\r
+       uint8_t port_num = 1;\r
+\r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       status = _create_qp( h_pd, port_num,\r
+               qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
+               \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+                                                                                                                                               \r
+#else\r
+       ib_api_status_t                 status;\r
+       ib_qp_handle_t                  h_qp;\r
+\r
+       uint32_t                                hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
+       uint32_t                                pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
+       uint32_t                                qp_num;\r
+       uint32_t                                qp_idx;\r
+       uint32_t                                send_cq_num;\r
+       uint32_t                                send_cq_idx;\r
+       uint32_t                                recv_cq_num;\r
+       uint32_t                                recv_cq_idx;\r
+       mlnx_hobul_t                    *hobul_p;\r
+       HH_hca_dev_t                    *hca_ul_info;\r
+       HH_qp_init_attr_t               hh_qp_init_attr;\r
+       HHUL_qp_init_attr_t             ul_qp_init_attr;\r
+       HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
+       VAPI_qp_cap_t                   hh_qp_cap;\r
+       void                                    *qp_ul_resources_p = NULL;\r
+       VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
+       VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
+       uint32_t                                num_sge;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
+       if (NULL == hca_ul_info) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       // The create attributes must be provided\r
+       if (!p_create_attr) {\r
+               status =  IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+\r
+       // convert input parameters\r
+       cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
+       mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, NULL);\r
+       send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
+       recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
+       send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
+       recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
+       ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
+       ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               // For user mode calls - obtain and verify the vendor information\r
+               if ((p_umv_buf->input_size - sizeof (uint32_t)) != \r
+                       hca_ul_info->qp_ul_resources_sz ||\r
+                       NULL == p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto cleanup;\r
+                       }\r
+                       qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+\r
+       } else {\r
+               // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
+               qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
+               if (!qp_ul_resources_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup;\r
+               }\r
+\r
+               if (HH_OK != THHUL_qpm_create_qp_prep(hobul_p->hhul_hndl, &ul_qp_init_attr, &hhul_qp_hndl, &hh_qp_cap, qp_ul_resources_p)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+               // TBD: if not same report error to IBAL\r
+               ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
+       }\r
+\r
+       // Convert HHUL to HH structure (for HH create_qp)\r
+       hh_qp_init_attr.pd = pd_idx;\r
+       hh_qp_init_attr.rdd = 0; // TBD: RDD\r
+       if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
+       {\r
+               // TBD: HH handle from HHUL handle.\r
+               CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
+       }\r
+       else\r
+       {\r
+               hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
+       }\r
+       hh_qp_init_attr.sq_cq = send_cq_num;\r
+       hh_qp_init_attr.rq_cq = recv_cq_num;\r
+       hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
+       hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
+       hh_qp_init_attr.ts_type = ul_qp_init_attr.ts_type;\r
+       hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
+\r
+       // Allocate the QP (cmdif)\r
+       if (HH_OK != THH_hob_create_qp(hobul_p->hh_hndl, &hh_qp_init_attr, qp_ul_resources_p, &qp_num)) {\r
+               status = IB_INSUFFICIENT_RESOURCES;\r
+               goto cleanup_qp;\r
+       }\r
+\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+       {\r
+               // Manage user level resources\r
+               if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
+                       THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
+                       status = IB_ERROR;\r
+                       goto cleanup_qp;\r
+               }\r
+\r
+               // Create SQ and RQ iov\r
+               num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
+               send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
+               if (!send_sge_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup_qp;\r
+               }\r
+\r
+               num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
+               recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
+               if (!recv_sge_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup_qp;\r
+               }\r
+       }\r
+\r
+       // Save data refs for future use\r
+       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
+       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("hobul_p 0x%p mask 0x%x qp_idx 0x%x qp_num 0x%x\n",\r
+               hobul_p, hobul_p->qp_idx_mask, qp_idx, qp_num));\r
+\r
+       h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
+       cl_mutex_acquire(&h_qp->mutex);\r
+       h_qp->pd_num                    = pd_idx;\r
+       h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
+       h_qp->qp_type                   = p_create_attr->qp_type;\r
+       h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
+       h_qp->qp_context                = qp_context;\r
+       h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
+       h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
+       h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
+       h_qp->send_sge_p                = send_sge_p;\r
+       h_qp->recv_sge_p                = recv_sge_p;\r
+       h_qp->qp_num                    = qp_num;\r
+       h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
+       h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
+       h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
+       h_qp->mark                              = E_MARK_QP;\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
+               qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
+       cl_mutex_release(&h_qp->mutex);\r
+       // Update PD object count\r
+       cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
+\r
+       // Query QP to obtain requested attributes\r
+       if (p_qp_attr) {\r
+               if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf)))\r
+               {\r
+                       if( !(p_umv_buf && p_umv_buf->command) )\r
+                               goto cleanup_qp;\r
+                       else\r
+                               goto cleanup;\r
+               }\r
+       }\r
+\r
+       if (ph_qp) *ph_qp = h_qp;\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = p_umv_buf->input_size;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+               /* \r
+               * Copy the qp_idx back to user\r
+               */\r
+               cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->qp_ul_resources_sz),\r
+                       &qp_num, sizeof (qp_num));\r
+       }\r
+       HCA_EXIT( HCA_DBG_QP);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_qp:\r
+       if (send_sge_p) cl_free( send_sge_p);\r
+       if (recv_sge_p) cl_free( recv_sge_p);\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+               THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
+\r
+cleanup:\r
+       if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p)\r
+               cl_free( qp_ul_resources_p);\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("status %d\n", status));\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_modify_qp (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+       IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_qp_attr qp_attr;\r
+       int qp_attr_mask;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+\r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               // sanity checks \r
+               if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
+               }\r
+       }\r
+       \r
+       // fill parameters \r
+       status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
+               p_modify_attr,  &qp_attr, &qp_attr_mask );\r
+       if (status == IB_NOT_DONE)\r
+               goto query_qp;\r
+       if (status != IB_SUCCESS ) \r
+               goto err_mode_unsupported;\r
+\r
+       // modify QP\r
+       err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP ,("ibv_modify_qp failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_modify_qp;\r
+       }\r
+\r
+       // Query QP to obtain requested attributes\r
+query_qp:      \r
+       if (p_qp_attr) {\r
+               status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
+               if (status != IB_SUCCESS)\r
+                               goto err_query_qp;\r
+       }\r
+       \r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               struct ibv_modify_qp_resp resp;\r
+               resp.attr_mask = qp_attr_mask;\r
+               resp.qp_state = qp_attr.qp_state;\r
+               err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
+               if (err) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_copy;\r
+               }\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_copy:      \r
+err_query_qp:\r
+err_modify_qp: \r
+err_mode_unsupported:\r
+err_inval_params:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       uint32_t                        hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
+       uint32_t                        qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
+       uint32_t                        qp_idx  = 0;\r
+       mlnx_hobul_t            *hobul_p;\r
+       HHUL_qp_hndl_t          hhul_qp_hndl;\r
+       VAPI_qp_attr_mask_t     hh_qp_attr_mask;\r
+       VAPI_qp_attr_t          hh_qp_attr;\r
+       VAPI_qp_state_t         hh_qp_state;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
+       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
+       if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
+               status =  IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP, \r
+               ("Before acquire mutex to modify qp_idx 0x%x\n",\r
+               qp_idx));\r
+\r
+       cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+\r
+       hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
+\r
+       // Obtain curernt state of QP\r
+       if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, hobul_p->qp_info_tbl[qp_idx].qp_num, &hh_qp_attr))\r
+       {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+       hh_qp_state = hh_qp_attr.qp_state; // The current (pre-modify) state\r
+\r
+       // Convert the input parameters. Use query result as default (no cl_memset())\r
+       // cl_memclr(&hh_qp_attr, sizeof(hh_qp_attr));\r
+       status = mlnx_conv_qp_modify_attr(hobul_p->hh_hndl,\r
+               hobul_p->qp_info_tbl[qp_idx].qp_type,\r
+               p_modify_attr, &hh_qp_attr, &hh_qp_attr_mask);\r
+       if( status != IB_SUCCESS )\r
+               goto cleanup_locked;\r
+\r
+       if (HH_OK != THH_hob_modify_qp(hobul_p->hh_hndl,\r
+               hobul_p->qp_info_tbl[qp_idx].qp_num,\r
+               hh_qp_state, &hh_qp_attr, &hh_qp_attr_mask))\r
+       {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP, \r
+               ("After hob_modify_qp qp_idx 0x%x k_mod %d\n", \r
+               qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode));\r
+\r
+       // Notify HHUL of the new (post-modify) state. This is done for both k-mode calls only\r
+       if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_qpm_modify_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, hh_qp_attr.qp_state))\r
+               {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               } \r
+       } \r
+       cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+\r
+       if ((p_qp_attr) && !(p_umv_buf && p_umv_buf->command)) {\r
+               if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
+                       goto cleanup;\r
+               }\r
+       }\r
+\r
+       if ( p_umv_buf && p_umv_buf->command && (! hobul_p->qp_info_tbl[qp_idx].kernel_mode)) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP, \r
+                       ("mod_qp qp_idx %d umv_buf %p inout_buf %p\n", \r
+                       qp_idx, p_umv_buf, p_umv_buf->p_inout_buf));\r
+               if (p_umv_buf->p_inout_buf) {\r
+                       p_umv_buf->output_size = sizeof (VAPI_qp_state_t);\r
+                       cl_memcpy (p_umv_buf->p_inout_buf, &(hh_qp_attr.qp_state), \r
+                               (size_t)p_umv_buf->output_size);\r
+                       p_umv_buf->status = IB_SUCCESS;\r
+               }\r
+       }\r
+       HCA_EXIT( HCA_DBG_QP);\r
+       return IB_SUCCESS;\r
+\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("status %d\n", status));\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_qp (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+               OUT                     ib_qp_attr_t                            *p_qp_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+       struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       \r
+       HCA_ENTER( HCA_DBG_QP);\r
+       // sanity checks\r
+\r
+       // clean the structure\r
+       RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
+       \r
+       // fill the structure\r
+       //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
+       p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
+       p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
+       p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
+       p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
+       p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
+       p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
+       p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
+       p_qp_attr->resp_res                             = qp_p->resp_depth;\r
+       p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
+       p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
+       p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
+       p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
+       p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
+\r
+#if 0          \r
+//TODO: don't know how to fill the following fileds    without support of query_qp in MTHCA    \r
+       p_qp_attr->access_ctrl                  = qp_p->\r
+       p_qp_attr->pkey_index                   = qp_p->\r
+       p_qp_attr->dest_num                             = qp_p-\r
+       p_qp_attr->init_depth                   = qp_p-\r
+       p_qp_attr->qkey                                         = qp_p-\r
+       p_qp_attr->sq_psn                                       = qp_p-\r
+       p_qp_attr->rq_psn                                       = qp_p-\r
+       p_qp_attr->primary_port         = qp_p-\r
+       p_qp_attr->alternate_port               = qp_p-\r
+       p_qp_attr->primary_av                   = qp_p-\r
+       p_qp_attr->alternate_av                 = qp_p-\r
+       p_qp_attr->apm_state                    = qp_p-\r
+#endif         \r
+\r
+       status = IB_SUCCESS;\r
+\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+\r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       uint32_t                        hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
+       uint32_t                        qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
+       uint32_t                        qp_idx  = 0;\r
+       mlnx_hobul_t            *hobul_p;\r
+       VAPI_qp_attr_t          hh_qp_attr;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
+       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
+       if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
+               status =  IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&h_qp->mutex);\r
+\r
+       if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, h_qp->qp_num, &hh_qp_attr)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       // Convert query result into IBAL structure (no cl_memset())\r
+       mlnx_conv_vapi_qp_attr(hobul_p->hh_hndl, &hh_qp_attr, p_qp_attr);\r
+       p_qp_attr->qp_type = h_qp->qp_type;\r
+       p_qp_attr->h_pd    = (ib_pd_handle_t)PD_HNDL_FROM_PD(h_qp->pd_num);\r
+       p_qp_attr->h_sq_cq = h_qp->h_sq_cq;\r
+       p_qp_attr->h_rq_cq = h_qp->h_rq_cq;\r
+       p_qp_attr->sq_signaled = h_qp->sq_signaled;\r
+\r
+       cl_mutex_release(&h_qp->mutex);\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = 0;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_QP);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = 0;\r
+               p_umv_buf->status = status;\r
+       }\r
+       \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("status %d\n", status));\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_qp (\r
+       IN              const   ib_qp_handle_t                          h_qp,\r
+       IN              const   uint64_t                                        timewait )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+               \r
+       UNUSED_PARAM( timewait );\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
+               ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
+\r
+       err = ibv_destroy_qp( ib_qp_p );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_destroy_qp failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_destroy_qp;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_destroy_qp:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+\r
+       ib_api_status_t         status;\r
+\r
+       uint32_t                        hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
+       uint32_t                        qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
+       uint32_t                        pd_idx  = 0;\r
+       uint32_t                        qp_idx  = 0;\r
+       mlnx_hobul_t            *hobul_p;\r
+       HHUL_qp_hndl_t          hhul_qp_hndl;\r
+\r
+       UNUSED_PARAM( timewait );\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("hca %d qp 0x%x\n", hca_idx, qp_num));\r
+\r
+       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n",\r
+               hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark));\r
+\r
+       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
+       if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
+               if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+                               ("completes with ERROR status %s\n", ib_get_err_str(IB_INVALID_QP_HANDLE)));\r
+                       return IB_SUCCESS; // Already freed\r
+               }\r
+               status = IB_INVALID_QP_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+\r
+       hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
+       pd_idx       = hobul_p->qp_info_tbl[qp_idx].pd_num;\r
+       VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_locked);\r
+\r
+       if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("file %s line %d\n", __FILE__, __LINE__));\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("pd_idx 0x%x mark %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].mark));\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP, \r
+               ("Before THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
+               qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
+\r
+       // PREP: no PREP required for destroy_qp\r
+       if (HH_OK != THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP, \r
+               ("After THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
+               qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
+\r
+       if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+               if (hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p)\r
+                       cl_free( hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p);\r
+               if (hobul_p->qp_info_tbl[qp_idx].send_sge_p)\r
+                       cl_free( hobul_p->qp_info_tbl[qp_idx].send_sge_p);\r
+               if (hobul_p->qp_info_tbl[qp_idx].recv_sge_p)\r
+                       cl_free( hobul_p->qp_info_tbl[qp_idx].recv_sge_p);\r
+       }\r
+\r
+       if( h_qp->qp_type == IB_QPT_QP0 || h_qp->qp_type == IB_QPT_QP1 )\r
+       {\r
+               if( !cl_atomic_dec( &h_qp->h_sq_cq->spl_qp_cnt ) )\r
+                       KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, MediumImportance );\r
+               if( !cl_atomic_dec( &h_qp->h_rq_cq->spl_qp_cnt ) )\r
+                       KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, MediumImportance );\r
+       }\r
+\r
+       hobul_p->qp_info_tbl[qp_idx].mark = E_MARK_INVALID;\r
+       hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p = NULL;\r
+       cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+\r
+       // Update PD object count\r
+       cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
+\r
+       HCA_EXIT( HCA_DBG_QP);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("status %d\n",status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+/*\r
+* Completion Queue Managment Verbs.\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_create_cq (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   void                                            *cq_context,\r
+       IN      OUT                     uint32_t                                        *p_size,\r
+               OUT                     ib_cq_handle_t                          *ph_cq,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_cq *ib_cq_p;\r
+       struct mthca_cq *cq_p;\r
+       mlnx_hob_t                      *hob_p;\r
+       struct ib_device *ib_dev;\r
+       struct ib_ucontext *context_p;\r
+       \r
+       HCA_ENTER(HCA_DBG_CQ);\r
+\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+\r
+               // sanity checks \r
+               if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
+                       p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
+               }\r
+\r
+               context_p = (struct ib_ucontext *)h_ca;\r
+               hob_p = HOB_FROM_IBDEV(context_p->device);\r
+               ib_dev = context_p->device;\r
+       }\r
+       else {\r
+               hob_p = (mlnx_hob_t *)h_ca;\r
+               context_p = NULL;\r
+               ib_dev = IBDEV_FROM_HOB( hob_p );\r
+       }\r
+\r
+       // allocate cq  \r
+       ib_cq_p = ibv_create_cq(ib_dev, \r
+               cq_comp_handler, cq_event_handler,\r
+               hob_p, *p_size, context_p, p_umv_buf );\r
+       if (IS_ERR(ib_cq_p)) {\r
+               err = PTR_ERR(ib_cq_p);\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_create_cq;\r
+       }\r
+\r
+       // fill the object\r
+       cq_p = (struct mthca_cq *)ib_cq_p;\r
+       cq_p->cq_context = (void*)cq_context;\r
+       \r
+       // return the result\r
+//     *p_size = *p_size;      // return the same value\r
+       *p_size = ib_cq_p->cqe;\r
+\r
+       if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_create_cq:\r
+err_inval_params:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+                                                                                                                                               \r
+#else\r
+       ib_api_status_t                 status;\r
+\r
+       mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
+       uint32_t                                cq_idx;\r
+       uint32_t                                cq_num;\r
+       uint32_t                                cq_size = 0;\r
+       mlnx_hobul_t                    *hobul_p;\r
+       HH_hca_dev_t                    *hca_ul_info;\r
+       HHUL_cq_hndl_t                  hhul_cq_hndl = NULL;\r
+       void                                    *cq_ul_resources_p = NULL;\r
+       MOSAL_protection_ctx_t  prot_ctx;\r
+\r
+       HCA_ENTER( HCA_DBG_CQ);\r
+\r
+       hobul_p = mlnx_hobs_get_hobul(hob_p);\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CA_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
+       if (NULL == hca_ul_info) {\r
+               status =  IB_INVALID_PD_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       // The size must be provided\r
+       if (!p_size) {\r
+               status =  IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+       // TBD: verify that the number requested does not exceed to maximum allowed\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               // For user mode calls - obtain and verify the vendor information\r
+               if ((p_umv_buf->input_size - sizeof (uint32_t))  != \r
+                       hca_ul_info->cq_ul_resources_sz ||\r
+                       NULL == p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto cleanup;\r
+                       }\r
+                       cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+\r
+                       /* get the current protection context */ \r
+                       prot_ctx = MOSAL_get_current_prot_ctx();\r
+       } else {\r
+               // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
+               cq_ul_resources_p = cl_zalloc( hca_ul_info->cq_ul_resources_sz);\r
+               if (!cq_ul_resources_p) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto cleanup;\r
+               }\r
+               if (HH_OK != THHUL_cqm_create_cq_prep(hobul_p->hhul_hndl, *p_size, &hhul_cq_hndl, &cq_size, cq_ul_resources_p)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup;\r
+               }\r
+               /* get the current protection context */ \r
+               prot_ctx = MOSAL_get_kernel_prot_ctx();\r
+       }\r
+\r
+       // Allocate the CQ (cmdif)\r
+       if (HH_OK != THH_hob_create_cq(hobul_p->hh_hndl, prot_ctx, cq_ul_resources_p, &cq_num)) {\r
+               status = IB_INSUFFICIENT_RESOURCES;\r
+               goto cleanup_cq;\r
+       }\r
+\r
+       if( !(p_umv_buf && p_umv_buf->command) )\r
+       {\r
+               // Manage user level resources\r
+               if (HH_OK != THHUL_cqm_create_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl, cq_num, cq_ul_resources_p)) {\r
+                       THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num);\r
+                       status = IB_ERROR;\r
+                       goto cleanup_cq;\r
+               }\r
+       }\r
+\r
+       // Save data refs for future use\r
+       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_ERROR, cleanup_cq);\r
+       cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+       hobul_p->cq_info_tbl[cq_idx].hca_idx = hob_p->index;\r
+       hobul_p->cq_info_tbl[cq_idx].cq_num = cq_num;\r
+//     hobul_p->cq_info_tbl[cq_idx].pd_num = pd_idx;\r
+       hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl = hhul_cq_hndl;\r
+       hobul_p->cq_info_tbl[cq_idx].cq_context = cq_context;\r
+       hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = cq_ul_resources_p;\r
+       hobul_p->cq_info_tbl[cq_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
+       hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_CQ;\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       // Update CA object count\r
+       cl_atomic_inc(&hobul_p->count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ,("HCA %d count %d\n", h_ca->index, hobul_p->count));\r
+\r
+       *p_size = cq_size;\r
+       if (ph_cq) *ph_cq = (ib_cq_handle_t)CQ_HNDL_FROM_CQ(cq_idx);\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = p_umv_buf->input_size;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+               /* \r
+               * Copy the cq_idx back to user\r
+               */\r
+               cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->cq_ul_resources_sz),\r
+                       &cq_num, sizeof (cq_num));\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_cq:\r
+       THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl);\r
+\r
+cleanup:\r
+       if( !(p_umv_buf && p_umv_buf->command) && cq_ul_resources_p )\r
+               cl_free( cq_ul_resources_p);\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  ,HCA_DBG_CQ  ,("status %d\n",status));\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_resize_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       IN      OUT                     uint32_t                                        *p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+       UNREFERENCED_PARAMETER(h_cq);\r
+       UNREFERENCED_PARAMETER(p_size);\r
+       if (p_umv_buf && p_umv_buf->command) {\r
+               p_umv_buf->status = IB_UNSUPPORTED;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_resize_cq not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       uint32_t                        hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
+       uint32_t                        cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
+       uint32_t                        cq_idx;\r
+       mlnx_hobul_t            *hobul_p;\r
+\r
+       HHUL_cq_hndl_t          hhul_cq_hndl;\r
+       void                            *cq_ul_resources_p = NULL;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       if (!p_size) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               // For user mode calls - obtain and verify the vendor information\r
+               if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz ||\r
+                       NULL == p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto cleanup_locked;\r
+               }\r
+               cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
+\r
+       } else {\r
+               // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence\r
+               cq_ul_resources_p = hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p;\r
+\r
+               if (HH_OK != THHUL_cqm_resize_cq_prep(\r
+                       hobul_p->hhul_hndl, hhul_cq_hndl,\r
+                       *p_size, p_size, cq_ul_resources_p))\r
+               {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+       }\r
+\r
+       if (HH_OK != THH_hob_resize_cq(hobul_p->hh_hndl, cq_num, cq_ul_resources_p)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       // DONE: when called on behalf of kernel module\r
+       if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_cqm_resize_cq_done( hobul_p->hhul_hndl, hhul_cq_hndl, cq_ul_resources_p))\r
+               {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+       }\r
+\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = p_umv_buf->input_size;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM ,("status %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+               OUT                     uint32_t                                        *p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+       UNREFERENCED_PARAMETER(h_cq);\r
+       UNREFERENCED_PARAMETER(p_size);\r
+       if (p_umv_buf && p_umv_buf->command) {\r
+               p_umv_buf->status = IB_UNSUPPORTED;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_query_cq not implemented\n"));\r
+       return IB_UNSUPPORTED;\r
+#else\r
+       ib_api_status_t         status;\r
+\r
+       uint32_t                        hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
+       uint32_t                        cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
+       uint32_t                        cq_idx;\r
+       mlnx_hobul_t            *hobul_p;\r
+       HHUL_cq_hndl_t          hhul_cq_hndl;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       if (!p_size) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto cleanup;\r
+       }\r
+       VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
+\r
+       if (HH_OK != THH_hob_query_cq(hobul_p->hh_hndl, cq_num, p_size)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->output_size = 0;\r
+               p_umv_buf->status = IB_SUCCESS;\r
+       }\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+cleanup:\r
+       if( p_umv_buf && p_umv_buf->command )\r
+       {\r
+               p_umv_buf->status = status;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq)\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+                                                                                                                                                               \r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
+               ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
+\r
+       // destroy CQ\r
+       err = ibv_destroy_cq( ib_cq_p );\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,("ibv_destroy_cq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_destroy_cq;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_destroy_cq:\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       return status;\r
+\r
+#else\r
+       ib_api_status_t status;\r
+\r
+       uint32_t        hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
+       uint32_t        cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
+       uint32_t                cq_idx;\r
+//     uint32_t        pd_idx = 0;\r
+       mlnx_hobul_t     *hobul_p;\r
+       HHUL_cq_hndl_t   hhul_cq_hndl;\r
+\r
+       HCA_ENTER( HCA_DBG_SHIM);\r
+\r
+       VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
+       hobul_p = mlnx_hobul_array[hca_idx];\r
+       if (NULL == hobul_p) {\r
+               status = IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
+       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
+       if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
+               status =  IB_INVALID_CQ_HANDLE;\r
+               goto cleanup;\r
+       }\r
+\r
+       cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
+//     pd_idx       = hobul_p->cq_info_tbl[cq_idx].pd_num;\r
+//     VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup);\r
+//     if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
+//             status =  IB_INVALID_PD_HANDLE;\r
+//             goto cleanup_locked;\r
+//     }\r
+\r
+       // PREP: no PREP required for destroy_cq\r
+       if (HH_OK != THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num)) {\r
+               status = IB_ERROR;\r
+               goto cleanup_locked;\r
+       }\r
+\r
+       if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
+               if (HH_OK != THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl)) {\r
+                       status = IB_ERROR;\r
+                       goto cleanup_locked;\r
+               }\r
+               if (hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p)\r
+                       cl_free( hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p);\r
+       }\r
+\r
+       hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_INVALID;\r
+       hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = NULL;\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+       // Update CA object count\r
+       cl_atomic_dec(&hobul_p->count);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  , HCA_DBG_SHIM  ,("CA %d count %d\n", hca_idx, hobul_p->count));\r
+\r
+\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+\r
+cleanup_locked:\r
+       cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
+\r
+cleanup:\r
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("status %d\n", status));\r
+       HCA_EXIT( HCA_DBG_SHIM);\r
+       return status;\r
+#endif \r
+}\r
+\r
+\r
+void\r
+setup_ci_interface(\r
+       IN              const   ib_net64_t                                      ca_guid,\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       cl_memclr(p_interface, sizeof(*p_interface));\r
+\r
+       /* Guid of the CA. */\r
+       p_interface->guid = ca_guid;\r
+\r
+       /* Version of this interface. */\r
+       p_interface->version = VERBS_VERSION;\r
+\r
+       /* UVP name */\r
+       cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
+\r
+       /* The real interface. */\r
+       p_interface->open_ca = mlnx_open_ca;\r
+       p_interface->query_ca = mlnx_query_ca;\r
+       p_interface->modify_ca = mlnx_modify_ca; \r
+       p_interface->close_ca = mlnx_close_ca;\r
+       p_interface->um_open_ca = mlnx_um_open;\r
+       p_interface->um_close_ca = mlnx_um_close;\r
+\r
+       p_interface->allocate_pd = mlnx_allocate_pd;\r
+       p_interface->deallocate_pd = mlnx_deallocate_pd;\r
+\r
+       p_interface->create_av = mlnx_create_av;\r
+       p_interface->query_av = mlnx_query_av;\r
+       p_interface->modify_av = mlnx_modify_av;\r
+       p_interface->destroy_av = mlnx_destroy_av;\r
+\r
+       p_interface->create_qp = mlnx_create_qp;\r
+       p_interface->create_spl_qp = mlnx_create_spl_qp;\r
+       p_interface->modify_qp = mlnx_modify_qp;\r
+       p_interface->query_qp = mlnx_query_qp;\r
+       p_interface->destroy_qp = mlnx_destroy_qp;\r
+\r
+       p_interface->create_cq = mlnx_create_cq;\r
+       p_interface->resize_cq = mlnx_resize_cq;\r
+       p_interface->query_cq = mlnx_query_cq;\r
+       p_interface->destroy_cq = mlnx_destroy_cq;\r
+\r
+       p_interface->local_mad = mlnx_local_mad;\r
+       \r
+       p_interface->vendor_call = fw_access_ctrl;\r
+\r
+       mlnx_memory_if(p_interface);\r
+       mlnx_direct_if(p_interface);\r
+       mlnx_mcast_if(p_interface);\r
+\r
+\r
+       return;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/kernel/ib_cache.h b/trunk/hw/mthca/kernel/ib_cache.h
new file mode 100644 (file)
index 0000000..be3ca4f
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_cache.h 2730 2005-06-28 16:43:03Z sean.hefty $
+ */
+
+#ifndef _IB_CACHE_H
+#define _IB_CACHE_H
+
+#include <ib_verbs.h>
+
+/**
+ * ib_get_cached_gid - Returns a cached GID table entry
+ * @device: The device to query.
+ * @port_num: The port number of the device to query.
+ * @index: The index into the cached GID table to query.
+ * @gid: The GID value found at the specified index.
+ *
+ * ib_get_cached_gid() fetches the specified GID table entry stored in
+ * the local software cache.
+ */
+int ib_get_cached_gid(struct ib_device    *device,
+                     u8                   port_num,
+                     int                  index,
+                     union ib_gid        *gid);
+
+/**
+ * ib_find_cached_gid - Returns the port number and GID table index where
+ *   a specified GID value occurs.
+ * @device: The device to query.
+ * @gid: The GID value to search for.
+ * @port_num: The port number of the device where the GID value was found.
+ * @index: The index into the cached GID table where the GID was found.  This
+ *   parameter may be NULL.
+ *
+ * ib_find_cached_gid() searches for the specified GID value in
+ * the local software cache.
+ */
+int ib_find_cached_gid(struct ib_device *device,
+                      union ib_gid     *gid,
+                      u8               *port_num,
+                      u16              *index);
+
+/**
+ * ib_get_cached_pkey - Returns a cached PKey table entry
+ * @device: The device to query.
+ * @port_num: The port number of the device to query.
+ * @index: The index into the cached PKey table to query.
+ * @pkey: The PKey value found at the specified index.
+ *
+ * ib_get_cached_pkey() fetches the specified PKey table entry stored in
+ * the local software cache.
+ */
+int ib_get_cached_pkey(struct ib_device    *device_handle,
+                      u8                   port_num,
+                      int                  index,
+                      u16                 *pkey);
+
+/**
+ * ib_find_cached_pkey - Returns the PKey table index where a specified
+ *   PKey value occurs.
+ * @device: The device to query.
+ * @port_num: The port number of the device to search for the PKey.
+ * @pkey: The PKey value to search for.
+ * @index: The index into the cached PKey table where the PKey was found.
+ *
+ * ib_find_cached_pkey() searches the specified PKey table in
+ * the local software cache.
+ */
+int ib_find_cached_pkey(struct ib_device    *device,
+                       u8                   port_num,
+                       u16                  pkey,
+                       u16                 *index);
+
+
+int  ib_cache_setup(void);
+void ib_cache_cleanup(void);
+
+#endif /* _IB_CACHE_H */
diff --git a/trunk/hw/mthca/kernel/ib_mad.h b/trunk/hw/mthca/kernel/ib_mad.h
new file mode 100644 (file)
index 0000000..d799536
--- /dev/null
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_mad.h 2928 2005-07-28 18:45:56Z sean.hefty $
+ */
+
+#if !defined( IB_MAD_H )
+#define IB_MAD_H
+
+#include <ib_verbs.h>
+
+/* Management base version */
+#define IB_MGMT_BASE_VERSION                   1
+
+/* Management classes */
+#define IB_MGMT_CLASS_SUBN_LID_ROUTED          0x01
+#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE      0x81
+#define IB_MGMT_CLASS_SUBN_ADM                 0x03
+#define IB_MGMT_CLASS_PERF_MGMT                        0x04
+#define IB_MGMT_CLASS_BM                       0x05
+#define IB_MGMT_CLASS_DEVICE_MGMT              0x06
+#define IB_MGMT_CLASS_CM                       0x07
+#define IB_MGMT_CLASS_SNMP                     0x08
+#define IB_MGMT_CLASS_VENDOR_RANGE2_START      0x30
+#define IB_MGMT_CLASS_VENDOR_RANGE2_END                0x4F
+
+#define        IB_OPENIB_OUI                           (0x001405)
+
+/* Management methods */
+#define IB_MGMT_METHOD_GET                     0x01
+#define IB_MGMT_METHOD_SET                     0x02
+#define IB_MGMT_METHOD_GET_RESP                        0x81
+#define IB_MGMT_METHOD_SEND                    0x03
+#define IB_MGMT_METHOD_TRAP                    0x05
+#define IB_MGMT_METHOD_REPORT                  0x06
+#define IB_MGMT_METHOD_REPORT_RESP             0x86
+#define IB_MGMT_METHOD_TRAP_REPRESS            0x07
+
+#define IB_MGMT_METHOD_RESP                    0x80
+
+#define IB_MGMT_MAX_METHODS                    128
+
+/* RMPP information */
+#define IB_MGMT_RMPP_VERSION                   1
+
+#define IB_MGMT_RMPP_TYPE_DATA                 1
+#define IB_MGMT_RMPP_TYPE_ACK                  2
+#define IB_MGMT_RMPP_TYPE_STOP                 3
+#define IB_MGMT_RMPP_TYPE_ABORT                        4
+
+#define IB_MGMT_RMPP_FLAG_ACTIVE               1
+#define IB_MGMT_RMPP_FLAG_FIRST                        (1<<1)
+#define IB_MGMT_RMPP_FLAG_LAST                 (1<<2)
+
+#define IB_MGMT_RMPP_NO_RESPTIME               0x1F
+
+#define        IB_MGMT_RMPP_STATUS_SUCCESS             0
+#define        IB_MGMT_RMPP_STATUS_RESX                1
+#define        IB_MGMT_RMPP_STATUS_ABORT_MIN           118
+#define        IB_MGMT_RMPP_STATUS_T2L                 118
+#define        IB_MGMT_RMPP_STATUS_BAD_LEN             119
+#define        IB_MGMT_RMPP_STATUS_BAD_SEG             120
+#define        IB_MGMT_RMPP_STATUS_BADT                121
+#define        IB_MGMT_RMPP_STATUS_W2S                 122
+#define        IB_MGMT_RMPP_STATUS_S2B                 123
+#define        IB_MGMT_RMPP_STATUS_BAD_STATUS          124
+#define        IB_MGMT_RMPP_STATUS_UNV                 125
+#define        IB_MGMT_RMPP_STATUS_TMR                 126
+#define        IB_MGMT_RMPP_STATUS_UNSPEC              127
+#define        IB_MGMT_RMPP_STATUS_ABORT_MAX           127
+
+#define IB_QP1_QKEY    0x00000180                      /* big endian */
+#define IB_QP_SET_QKEY 0x00000080      /* big endian */
+
+struct ib_mad_hdr {
+       u8      base_version;
+       u8      mgmt_class;
+       u8      class_version;
+       u8      method;
+       __be16  status;
+       __be16  class_specific;
+       __be64  tid;
+       __be16  attr_id;
+       __be16  resv;
+       __be32  attr_mod;
+};
+
+struct ib_rmpp_hdr {
+       u8      rmpp_version;
+       u8      rmpp_type;
+       u8      rmpp_rtime_flags;
+       u8      rmpp_status;
+       __be32  seg_num;
+       __be32  paylen_newwin;
+};
+
+typedef u64 ib_sa_comp_mask;
+
+#define IB_SA_COMP_MASK(n) ((ib_sa_comp_mask) cl_hton64(1ull << n))
+
+/*
+ * ib_sa_hdr and ib_sa_mad structures must be packed because they have
+ * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
+ * lay them out wrong otherwise.  (And unfortunately they are sent on
+ * the wire so we can't change the layout)
+ */
+#pragma pack(push,1)
+struct ib_sa_hdr {
+       __be64                  sm_key;
+       __be16                  attr_offset;
+       __be16                  reserved;
+       ib_sa_comp_mask         comp_mask;
+};
+#pragma pack(pop)
+
+struct ib_mad {
+       struct ib_mad_hdr       mad_hdr;
+       u8                      data[232];
+};
+
+struct ib_rmpp_mad {
+       struct ib_mad_hdr       mad_hdr;
+       struct ib_rmpp_hdr      rmpp_hdr;
+       u8                      data[220];
+};
+
+#pragma pack(push,1)
+struct ib_sa_mad {
+       struct ib_mad_hdr       mad_hdr;
+       struct ib_rmpp_hdr      rmpp_hdr;
+       struct ib_sa_hdr        sa_hdr;
+       u8                      data[200];
+};
+#pragma pack(pop)
+
+struct ib_vendor_mad {
+       struct ib_mad_hdr       mad_hdr;
+       struct ib_rmpp_hdr      rmpp_hdr;
+       u8                      reserved;
+       u8                      oui[3];
+       u8                      data[216];
+};
+
+/**
+ * ib_mad_send_buf - MAD data buffer and work request for sends.
+ * @mad: References an allocated MAD data buffer.  The size of the data
+ *   buffer is specified in the @send_wr.length field.
+ * @mapping: DMA mapping information.
+ * @mad_agent: MAD agent that allocated the buffer.
+ * @context: User-controlled context fields.
+ * @send_wr: An initialized work request structure used when sending the MAD.
+ *   The wr_id field of the work request is initialized to reference this
+ *   data structure.
+ * @sge: A scatter-gather list referenced by the work request.
+ *
+ * Users are responsible for initializing the MAD buffer itself, with the
+ * exception of specifying the payload length field in any RMPP MAD.
+ */
+struct ib_mad_send_buf {
+       struct ib_mad           *mad;
+       dma_addr_t                              mapping;
+       struct ib_mad_agent     *mad_agent;
+       void                    *context[2];
+       struct _ib_send_wr      send_wr;
+       struct ib_sge           sge;
+};
+
+/**
+ * ib_get_rmpp_resptime - Returns the RMPP response time.
+ * @rmpp_hdr: An RMPP header.
+ */
+static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
+{
+       return rmpp_hdr->rmpp_rtime_flags >> 3;
+}
+
+/**
+ * ib_get_rmpp_flags - Returns the RMPP flags.
+ * @rmpp_hdr: An RMPP header.
+ */
+static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
+{
+       return rmpp_hdr->rmpp_rtime_flags & 0x7;
+}
+
+/**
+ * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
+ * @rmpp_hdr: An RMPP header.
+ * @rtime: The response time to set.
+ */
+static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
+{
+       rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
+}
+
+/**
+ * ib_set_rmpp_flags - Sets the flags in an RMPP header.
+ * @rmpp_hdr: An RMPP header.
+ * @flags: The flags to set.
+ */
+static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
+{
+       rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
+                                    (flags & 0x7);
+}
+
+struct ib_mad_agent;
+struct ib_mad_send_wc;
+struct ib_mad_recv_wc;
+
+/**
+ * ib_mad_send_handler - callback handler for a sent MAD.
+ * @mad_agent: MAD agent that sent the MAD.
+ * @mad_send_wc: Send work completion information on the sent MAD.
+ */
+typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
+                                   struct ib_mad_send_wc *mad_send_wc);
+
+/**
+ * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
+ * @mad_agent: MAD agent that snooped the MAD.
+ * @send_wr: Work request information on the sent MAD.
+ * @mad_send_wc: Work completion information on the sent MAD.  Valid
+ *   only for snooping that occurs on a send completion.
+ *
+ * Clients snooping MADs should not modify data referenced by the @send_wr
+ * or @mad_send_wc.
+ */
+typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
+                                    struct _ib_send_wr *send_wr,
+                                    struct ib_mad_send_wc *mad_send_wc);
+
+/**
+ * ib_mad_recv_handler - callback handler for a received MAD.
+ * @mad_agent: MAD agent requesting the received MAD.
+ * @mad_recv_wc: Received work completion information on the received MAD.
+ *
+ * MADs received in response to a send request operation will be handed to
+ * the user after the send operation completes.  All data buffers given
+ * to registered agents through this routine are owned by the receiving
+ * client, except for snooping agents.  Clients snooping MADs should not
+ * modify the data referenced by @mad_recv_wc.
+ */
+typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
+                                   struct ib_mad_recv_wc *mad_recv_wc);
+
+/**
+ * ib_mad_agent - Used to track MAD registration with the access layer.
+ * @device: Reference to device registration is on.
+ * @qp: Reference to QP used for sending and receiving MADs.
+ * @mr: Memory region for system memory usable for DMA.
+ * @recv_handler: Callback handler for a received MAD.
+ * @send_handler: Callback handler for a sent MAD.
+ * @snoop_handler: Callback handler for snooped sent MADs.
+ * @context: User-specified context associated with this registration.
+ * @hi_tid: Access layer assigned transaction ID for this client.
+ *   Unsolicited MADs sent by this client will have the upper 32-bits
+ *   of their TID set to this value.
+ * @port_num: Port number on which QP is registered
+ * @rmpp_version: If set, indicates the RMPP version used by this agent.
+ */
+struct ib_mad_agent {
+       struct ib_device        *device;
+       struct ib_qp            *qp;
+       struct ib_mr            *mr;
+       ib_mad_recv_handler     recv_handler;
+       ib_mad_send_handler     send_handler;
+       ib_mad_snoop_handler    snoop_handler;
+       void                    *context;
+       u32                     hi_tid;
+       u8                      port_num;
+       u8                      rmpp_version;
+};
+
+/**
+ * ib_mad_send_wc - MAD send completion information.
+ * @wr_id: Work request identifier associated with the send MAD request.
+ * @status: Completion status.
+ * @vendor_err: Optional vendor error information returned with a failed
+ *   request.
+ */
+struct ib_mad_send_wc {
+       u64                     wr_id;
+       enum ib_wc_status       status;
+       u32                     vendor_err;
+};
+
+/**
+ * ib_mad_recv_buf - received MAD buffer information.
+ * @list: Reference to next data buffer for a received RMPP MAD.
+ * @grh: References a data buffer containing the global route header.
+ *   The data refereced by this buffer is only valid if the GRH is
+ *   valid.
+ * @mad: References the start of the received MAD.
+ */
+struct ib_mad_recv_buf {
+       struct list_head        list;
+       struct ib_grh           *grh;
+       struct ib_mad           *mad;
+};
+
+/**
+ * ib_mad_recv_wc - received MAD information.
+ * @wc: Completion information for the received data.
+ * @recv_buf: Specifies the location of the received data buffer(s).
+ * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
+ * @mad_len: The length of the received MAD, without duplicated headers.
+ *
+ * For received response, the wr_id field of the wc is set to the wr_id
+ *   for the corresponding send request.
+ */
+struct ib_mad_recv_wc {
+       struct _ib_wc           *wc;
+       struct ib_mad_recv_buf  recv_buf;
+       struct list_head        rmpp_list;
+       int                     mad_len;
+};
+
+/**
+ * ib_mad_reg_req - MAD registration request
+ * @mgmt_class: Indicates which management class of MADs should be receive
+ *   by the caller.  This field is only required if the user wishes to
+ *   receive unsolicited MADs, otherwise it should be 0.
+ * @mgmt_class_version: Indicates which version of MADs for the given
+ *   management class to receive.
+ * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
+ *   in the range from 0x30 to 0x4f. Otherwise not used.
+ * @method_mask: The caller will receive unsolicited MADs for any method
+ *   where @method_mask = 1.
+ */
+struct ib_mad_reg_req {
+       u8      mgmt_class;
+       u8      mgmt_class_version;
+       u8      oui[3];
+       DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
+};
+
+/**
+ * ib_register_mad_agent - Register to send/receive MADs.
+ * @device: The device to register with.
+ * @port_num: The port on the specified device to use.
+ * @qp_type: Specifies which QP to access.  Must be either
+ *   IB_QPT_QP0 or IB_QPT_QP1.
+ * @mad_reg_req: Specifies which unsolicited MADs should be received
+ *   by the caller.  This parameter may be NULL if the caller only
+ *   wishes to receive solicited responses.
+ * @rmpp_version: If set, indicates that the client will send
+ *   and receive MADs that contain the RMPP header for the given version.
+ *   If set to 0, indicates that RMPP is not used by this client.
+ * @send_handler: The completion callback routine invoked after a send
+ *   request has completed.
+ * @recv_handler: The completion callback routine invoked for a received
+ *   MAD.
+ * @context: User specified context associated with the registration.
+ */
+struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
+                                          u8 port_num,
+                                          enum ib_qp_type_t qp_type,
+                                          struct ib_mad_reg_req *mad_reg_req,
+                                          u8 rmpp_version,
+                                          ib_mad_send_handler send_handler,
+                                          ib_mad_recv_handler recv_handler,
+                                          void *context);
+
+enum ib_mad_snoop_flags {
+       /*IB_MAD_SNOOP_POSTED_SENDS        = 1,*/
+       /*IB_MAD_SNOOP_RMPP_SENDS          = (1<<1),*/
+       IB_MAD_SNOOP_SEND_COMPLETIONS      = (1<<2),
+       /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
+       IB_MAD_SNOOP_RECVS                 = (1<<4)
+       /*IB_MAD_SNOOP_RMPP_RECVS          = (1<<5),*/
+       /*IB_MAD_SNOOP_REDIRECTED_QPS      = (1<<6)*/
+};
+
+/**
+ * ib_register_mad_snoop - Register to snoop sent and received MADs.
+ * @device: The device to register with.
+ * @port_num: The port on the specified device to use.
+ * @qp_type: Specifies which QP traffic to snoop.  Must be either
+ *   IB_QPT_QP0 or IB_QPT_QP1.
+ * @mad_snoop_flags: Specifies information where snooping occurs.
+ * @send_handler: The callback routine invoked for a snooped send.
+ * @recv_handler: The callback routine invoked for a snooped receive.
+ * @context: User specified context associated with the registration.
+ */
+struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
+                                          u8 port_num,
+                                          enum ib_qp_type_t qp_type,
+                                          int mad_snoop_flags,
+                                          ib_mad_snoop_handler snoop_handler,
+                                          ib_mad_recv_handler recv_handler,
+                                          void *context);
+
+/**
+ * ib_unregister_mad_agent - Unregisters a client from using MAD services.
+ * @mad_agent: Corresponding MAD registration request to deregister.
+ *
+ * After invoking this routine, MAD services are no longer usable by the
+ * client on the associated QP.
+ */
+int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
+
+/**
+ * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
+ *   with the registered client.
+ * @mad_agent: Specifies the associated registration to post the send to.
+ * @send_wr: Specifies the information needed to send the MAD(s).
+ * @bad_send_wr: Specifies the MAD on which an error was encountered.
+ *
+ * Sent MADs are not guaranteed to complete in the order that they were posted.
+ *
+ * If the MAD requires RMPP, the data buffer should contain a single copy
+ * of the common MAD, RMPP, and class specific headers, followed by the class
+ * defined data.  If the class defined data would not divide evenly into
+ * RMPP segments, then space must be allocated at the end of the referenced
+ * buffer for any required padding.  To indicate the amount of class defined
+ * data being transferred, the paylen_newwin field in the RMPP header should
+ * be set to the size of the class specific header plus the amount of class
+ * defined data being transferred.  The paylen_newwin field should be
+ * specified in network-byte order.
+ */
+int ib_post_send_mad(struct ib_mad_agent *mad_agent,
+                    struct _ib_send_wr *send_wr,
+                    struct _ib_send_wr **bad_send_wr);
+
+/**
+ * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
+ * @mad_recv_wc: Work completion information for a received MAD.
+ * @buf: User-provided data buffer to receive the coalesced buffers.  The
+ *   referenced buffer should be at least the size of the mad_len specified
+ *   by @mad_recv_wc.
+ *
+ * This call copies a chain of received MAD segments into a single data buffer,
+ * removing duplicated headers.
+ */
+void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
+
+/**
+ * ib_free_recv_mad - Returns data buffers used to receive a MAD.
+ * @mad_recv_wc: Work completion information for a received MAD.
+ *
+ * Clients receiving MADs through their ib_mad_recv_handler must call this
+ * routine to return the work completion buffers to the access layer.
+ */
+void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
+
+/**
+ * ib_cancel_mad - Cancels an outstanding send MAD operation.
+ * @mad_agent: Specifies the registration associated with sent MAD.
+ * @wr_id: Indicates the work request identifier of the MAD to cancel.
+ *
+ * MADs will be returned to the user through the corresponding
+ * ib_mad_send_handler.
+ */
+void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
+
+/**
+ * ib_modify_mad - Modifies an outstanding send MAD operation.
+ * @mad_agent: Specifies the registration associated with sent MAD.
+ * @wr_id: Indicates the work request identifier of the MAD to modify.
+ * @timeout_ms: New timeout value for sent MAD.
+ *
+ * This call will reset the timeout value for a sent MAD to the specified
+ * value.
+ */
+int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
+
+/**
+ * ib_redirect_mad_qp - Registers a QP for MAD services.
+ * @qp: Reference to a QP that requires MAD services.
+ * @rmpp_version: If set, indicates that the client will send
+ *   and receive MADs that contain the RMPP header for the given version.
+ *   If set to 0, indicates that RMPP is not used by this client.
+ * @send_handler: The completion callback routine invoked after a send
+ *   request has completed.
+ * @recv_handler: The completion callback routine invoked for a received
+ *   MAD.
+ * @context: User specified context associated with the registration.
+ *
+ * Use of this call allows clients to use MAD services, such as RMPP,
+ * on user-owned QPs.  After calling this routine, users may send
+ * MADs on the specified QP by calling ib_mad_post_send.
+ */
+struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
+                                       u8 rmpp_version,
+                                       ib_mad_send_handler send_handler,
+                                       ib_mad_recv_handler recv_handler,
+                                       void *context);
+
+/**
+ * ib_process_mad_wc - Processes a work completion associated with a
+ *   MAD sent or received on a redirected QP.
+ * @mad_agent: Specifies the registered MAD service using the redirected QP.
+ * @wc: References a work completion associated with a sent or received
+ *   MAD segment.
+ *
+ * This routine is used to complete or continue processing on a MAD request.
+ * If the work completion is associated with a send operation, calling
+ * this routine is required to continue an RMPP transfer or to wait for a
+ * corresponding response, if it is a request.  If the work completion is
+ * associated with a receive operation, calling this routine is required to
+ * process an inbound or outbound RMPP transfer, or to match a response MAD
+ * with its corresponding request.
+ */
+int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
+                     struct _ib_wc *wc);
+
+/**
+ * ib_create_send_mad - Allocate and initialize a data buffer and work request
+ *   for sending a MAD.
+ * @mad_agent: Specifies the registered MAD service to associate with the MAD.
+ * @remote_qpn: Specifies the QPN of the receiving node.
+ * @pkey_index: Specifies which PKey the MAD will be sent using.  This field
+ *   is valid only if the remote_qpn is QP 1.
+ * @ah: References the address handle used to transfer to the remote node.
+ * @rmpp_active: Indicates if the send will enable RMPP.
+ * @hdr_len: Indicates the size of the data header of the MAD.  This length
+ *   should include the common MAD header, RMPP header, plus any class
+ *   specific header.
+ * @data_len: Indicates the size of any user-transferred data.  The call will
+ *   automatically adjust the allocated buffer size to account for any
+ *   additional padding that may be necessary.
+ * @gfp_mask: GFP mask used for the memory allocation.
+ *
+ * This is a helper routine that may be used to allocate a MAD.  Users are
+ * not required to allocate outbound MADs using this call.  The returned
+ * MAD send buffer will reference a data buffer usable for sending a MAD, along
+ * with an initialized work request structure.  Users may modify the returned
+ * MAD data buffer or work request before posting the send.
+ *
+ * The returned data buffer will be cleared.  Users are responsible for
+ * initializing the common MAD and any class specific headers.  If @rmpp_active
+ * is set, the RMPP header will be initialized for sending.
+ */
+struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
+                                           u32 remote_qpn, u16 pkey_index,
+                                           struct ib_ah *ah, int rmpp_active,
+                                           int hdr_len, int data_len,
+                                           unsigned int gfp_mask);
+
+/**
+ * ib_free_send_mad - Returns data buffers used to send a MAD.
+ * @send_buf: Previously allocated send data buffer.
+ */
+void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
+
+#endif /* IB_MAD_H */
diff --git a/trunk/hw/mthca/kernel/ib_pack.h b/trunk/hw/mthca/kernel/ib_pack.h
new file mode 100644 (file)
index 0000000..3f7f79f
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
+ */
+
+#ifndef IB_PACK_H
+#define IB_PACK_H
+
+#include <ib_verbs.h>
+
+enum {
+       IB_LRH_BYTES  = 8,
+       IB_GRH_BYTES  = 40,
+       IB_BTH_BYTES  = 12,
+       IB_DETH_BYTES = 8
+};
+
+struct ib_field {
+       int    struct_offset_bytes;
+       int    struct_size_bytes;
+       int    offset_words;
+       int    offset_bits;
+       int    size_bits;
+       char  *field_name;
+};
+
+#define RESERVED \
+       .field_name          = "reserved"
+
+/*
+ * This macro cleans up the definitions of constants for BTH opcodes.
+ * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
+ * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
+ * the correct value.
+ *
+ * In short, user code should use the constants defined using the
+ * macro rather than worrying about adding together other constants.
+*/
+#define IB_OPCODE(transport, op) \
+       IB_OPCODE_ ## transport ## _ ## op = \
+               IB_OPCODE_ ## transport + IB_OPCODE_ ## op
+
+enum {
+       /* transport types -- just used to define real constants */
+       IB_OPCODE_RC                                = 0x00,
+       IB_OPCODE_UC                                = 0x20,
+       IB_OPCODE_RD                                = 0x40,
+       IB_OPCODE_UD                                = 0x60,
+
+       /* operations -- just used to define real constants */
+       IB_OPCODE_SEND_FIRST                        = 0x00,
+       IB_OPCODE_SEND_MIDDLE                       = 0x01,
+       IB_OPCODE_SEND_LAST                         = 0x02,
+       IB_OPCODE_SEND_LAST_WITH_IMMEDIATE          = 0x03,
+       IB_OPCODE_SEND_ONLY                         = 0x04,
+       IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE          = 0x05,
+       IB_OPCODE_RDMA_WRITE_FIRST                  = 0x06,
+       IB_OPCODE_RDMA_WRITE_MIDDLE                 = 0x07,
+       IB_OPCODE_RDMA_WRITE_LAST                   = 0x08,
+       IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE    = 0x09,
+       IB_OPCODE_RDMA_WRITE_ONLY                   = 0x0a,
+       IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE    = 0x0b,
+       IB_OPCODE_RDMA_READ_REQUEST                 = 0x0c,
+       IB_OPCODE_RDMA_READ_RESPONSE_FIRST          = 0x0d,
+       IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE         = 0x0e,
+       IB_OPCODE_RDMA_READ_RESPONSE_LAST           = 0x0f,
+       IB_OPCODE_RDMA_READ_RESPONSE_ONLY           = 0x10,
+       IB_OPCODE_ACKNOWLEDGE                       = 0x11,
+       IB_OPCODE_ATOMIC_ACKNOWLEDGE                = 0x12,
+       IB_OPCODE_COMPARE_SWAP                      = 0x13,
+       IB_OPCODE_FETCH_ADD                         = 0x14,
+
+       /* real constants follow -- see comment about above IB_OPCODE()
+          macro for more details */
+
+       /* RC */
+       IB_OPCODE(RC, SEND_FIRST),
+       IB_OPCODE(RC, SEND_MIDDLE),
+       IB_OPCODE(RC, SEND_LAST),
+       IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(RC, SEND_ONLY),
+       IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
+       IB_OPCODE(RC, RDMA_WRITE_FIRST),
+       IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
+       IB_OPCODE(RC, RDMA_WRITE_LAST),
+       IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(RC, RDMA_WRITE_ONLY),
+       IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+       IB_OPCODE(RC, RDMA_READ_REQUEST),
+       IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
+       IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
+       IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
+       IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
+       IB_OPCODE(RC, ACKNOWLEDGE),
+       IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
+       IB_OPCODE(RC, COMPARE_SWAP),
+       IB_OPCODE(RC, FETCH_ADD),
+
+       /* UC */
+       IB_OPCODE(UC, SEND_FIRST),
+       IB_OPCODE(UC, SEND_MIDDLE),
+       IB_OPCODE(UC, SEND_LAST),
+       IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(UC, SEND_ONLY),
+       IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
+       IB_OPCODE(UC, RDMA_WRITE_FIRST),
+       IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
+       IB_OPCODE(UC, RDMA_WRITE_LAST),
+       IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(UC, RDMA_WRITE_ONLY),
+       IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+
+       /* RD */
+       IB_OPCODE(RD, SEND_FIRST),
+       IB_OPCODE(RD, SEND_MIDDLE),
+       IB_OPCODE(RD, SEND_LAST),
+       IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(RD, SEND_ONLY),
+       IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
+       IB_OPCODE(RD, RDMA_WRITE_FIRST),
+       IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
+       IB_OPCODE(RD, RDMA_WRITE_LAST),
+       IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IB_OPCODE(RD, RDMA_WRITE_ONLY),
+       IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+       IB_OPCODE(RD, RDMA_READ_REQUEST),
+       IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
+       IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
+       IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
+       IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
+       IB_OPCODE(RD, ACKNOWLEDGE),
+       IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
+       IB_OPCODE(RD, COMPARE_SWAP),
+       IB_OPCODE(RD, FETCH_ADD),
+
+       /* UD */
+       IB_OPCODE(UD, SEND_ONLY),
+       IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
+};
+
+enum {
+       IB_LNH_RAW        = 0,
+       IB_LNH_IP         = 1,
+       IB_LNH_IBA_LOCAL  = 2,
+       IB_LNH_IBA_GLOBAL = 3
+};
+
+struct ib_unpacked_lrh {
+       u8        virtual_lane;
+       u8        link_version;
+       u8        service_level;
+       u8        link_next_header;
+       __be16    destination_lid;
+       __be16    packet_length;
+       __be16    source_lid;
+};
+
+struct ib_unpacked_grh {
+       u8           ip_version;
+       u8           traffic_class;
+       __be32       flow_label;
+       __be16       payload_length;
+       u8           next_header;
+       u8           hop_limit;
+       union ib_gid source_gid;
+       union ib_gid destination_gid;
+};
+
+struct ib_unpacked_bth {
+       u8           opcode;
+       u8           solicited_event;
+       u8           mig_req;
+       u8           pad_count;
+       u8           transport_header_version;
+       __be16       pkey;
+       __be32       destination_qpn;
+       u8           ack_req;
+       __be32       psn;
+};
+
+struct ib_unpacked_deth {
+       __be32       qkey;
+       __be32       source_qpn;
+};
+
+struct ib_ud_header {
+       struct ib_unpacked_lrh  lrh;
+       int                     grh_present;
+       struct ib_unpacked_grh  grh;
+       struct ib_unpacked_bth  bth;
+       struct ib_unpacked_deth deth;
+       int                     immediate_present;
+       __be32                  immediate_data;
+};
+
+void ib_pack(const struct ib_field        *desc,
+            int                           desc_len,
+            void                         *structure,
+            void                         *buf);
+
+void ib_unpack(const struct ib_field        *desc,
+              int                           desc_len,
+              void                         *buf,
+              void                         *structure);
+
+void ib_ud_header_init(int                        payload_bytes,
+                      int                 grh_present,
+                      struct ib_ud_header *header);
+
+int ib_ud_header_pack(struct ib_ud_header *header,
+                     void                *buf);
+
+int ib_ud_header_unpack(void                *buf,
+                       struct ib_ud_header *header);
+
+#endif /* IB_PACK_H */
diff --git a/trunk/hw/mthca/kernel/ib_smi.h b/trunk/hw/mthca/kernel/ib_smi.h
new file mode 100644 (file)
index 0000000..f47e132
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_smi.h 2928 2005-07-28 18:45:56Z sean.hefty $
+ */
+
+#if !defined( IB_SMI_H )
+#define IB_SMI_H
+
+#include <ib_mad.h>
+
+#define IB_SMP_DATA_SIZE                       64
+#define IB_SMP_MAX_PATH_HOPS                   64
+
+#pragma pack(push,1)
+struct ib_smp {
+       u8      base_version;
+       u8      mgmt_class;
+       u8      class_version;
+       u8      method;
+       __be16  status;
+       u8      hop_ptr;
+       u8      hop_cnt;
+       __be64  tid;
+       __be16  attr_id;
+       __be16  resv;
+       __be32  attr_mod;
+       __be64  mkey;
+       __be16  dr_slid;
+       __be16  dr_dlid;
+       u8      reserved[28];
+       u8      data[IB_SMP_DATA_SIZE];
+       u8      initial_path[IB_SMP_MAX_PATH_HOPS];
+       u8      return_path[IB_SMP_MAX_PATH_HOPS];
+};
+#pragma pack(pop)
+
+
+/* Subnet management attributes */
+#define IB_SMP_ATTR_NOTICE                     cl_hton16(0x0002)
+#define IB_SMP_ATTR_NODE_DESC                  cl_hton16(0x0010)
+#define IB_SMP_ATTR_NODE_INFO                  cl_hton16(0x0011)
+#define IB_SMP_ATTR_SWITCH_INFO                        cl_hton16(0x0012)
+#define IB_SMP_ATTR_GUID_INFO                  cl_hton16(0x0014)
+#define IB_SMP_ATTR_PORT_INFO                  cl_hton16(0x0015)
+#define IB_SMP_ATTR_PKEY_TABLE                 cl_hton16(0x0016)
+#define IB_SMP_ATTR_SL_TO_VL_TABLE             cl_hton16(0x0017)
+#define IB_SMP_ATTR_VL_ARB_TABLE               cl_hton16(0x0018)
+#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE       cl_hton16(0x0019)
+#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE       cl_hton16(0x001A)
+#define IB_SMP_ATTR_MCAST_FORWARD_TABLE                cl_hton16(0x001B)
+#define IB_SMP_ATTR_SM_INFO                    cl_hton16(0x0020)
+#define IB_SMP_ATTR_VENDOR_DIAG                        cl_hton16(0x0030)
+#define IB_SMP_ATTR_LED_INFO                   cl_hton16(0x0031)
+#define IB_SMP_ATTR_VENDOR_MASK                        cl_hton16(0xFF00)
+
+static inline u8
+ib_get_smp_direction(struct ib_smp *smp)
+{
+       return (u8)((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
+}
+
+#endif /* IB_SMI_H */
diff --git a/trunk/hw/mthca/kernel/ib_verbs.h b/trunk/hw/mthca/kernel/ib_verbs.h
new file mode 100644 (file)
index 0000000..58e73aa
--- /dev/null
@@ -0,0 +1,1350 @@
+/*
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_verbs.h 2975 2005-08-04 04:59:37Z roland $
+ */
+
+#if !defined(IB_VERBS_H)
+#define IB_VERBS_H
+
+#include <iba/ib_types.h>
+#include <iba/ib_ci.h>
+#include <mt_l2w.h>
+
+union ib_gid {
+       u8      raw[16];
+       struct {
+               __be64  subnet_prefix;
+               __be64  interface_id;
+       } global;
+};
+
+enum ib_node_type {
+       IB_NODE_CA      = 1,
+       IB_NODE_SWITCH,
+       IB_NODE_ROUTER
+};
+
+enum ib_device_cap_flags {
+       IB_DEVICE_RESIZE_MAX_WR         = 1,
+       IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
+       IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
+       IB_DEVICE_RAW_MULTI             = (1<<3),
+       IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
+       IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
+       IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
+       IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
+       IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
+       IB_DEVICE_INIT_TYPE             = (1<<9),
+       IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
+       IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
+       IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
+       IB_DEVICE_SRQ_RESIZE            = (1<<13),
+       IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
+};
+
+struct ib_device_attr {
+       u64                     fw_ver;
+       __be64                  sys_image_guid;
+       u64                     max_mr_size;
+       u64                     page_size_cap;
+       u32                     vendor_id;
+       u32                     vendor_part_id;
+       u32                     hw_ver;
+       int                     max_qp;
+       int                     max_qp_wr;
+       int                     device_cap_flags;
+       int                     max_sge;
+       int                     max_sge_rd;
+       int                     max_cq;
+       int                     max_cqe;
+       int                     max_mr;
+       int                     max_pd;
+       int                     max_qp_rd_atom;
+       int                     max_ee_rd_atom;
+       int                     max_res_rd_atom;
+       int                     max_qp_init_rd_atom;
+       int                     max_ee_init_rd_atom;
+       enum ib_atomic_cap      atomic_cap;
+       int                     max_ee;
+       int                     max_rdd;
+       int                     max_mw;
+       int                     max_raw_ipv6_qp;
+       int                     max_raw_ethy_qp;
+       int                     max_mcast_grp;
+       int                     max_mcast_qp_attach;
+       int                     max_total_mcast_qp_attach;
+       int                     max_ah;
+       int                     max_fmr;
+       int                     max_map_per_fmr;
+       int                     max_srq;
+       int                     max_srq_wr;
+       int                     max_srq_sge;
+       u16                     max_pkeys;
+       u8                      local_ca_ack_delay;
+};
+
+static inline int ib_mtu_enum_to_int(int mtu)
+{
+       switch (mtu) {
+       case IB_MTU_256:  return  256;
+       case IB_MTU_512:  return  512;
+       case IB_MTU_1024: return 1024;
+       case IB_MTU_2048: return 2048;
+       case IB_MTU_4096: return 4096;
+       default:          return -1;
+       }
+}
+
+enum ib_port_state {
+       IB_PORT_NOP             = 0,
+       IB_PORT_DOWN            = 1,
+       IB_PORT_INIT            = 2,
+       IB_PORT_ARMED           = 3,
+       IB_PORT_ACTIVE          = 4,
+       IB_PORT_ACTIVE_DEFER    = 5
+};
+
+enum ib_port_cap_flags {
+       IB_PORT_SM                              = 1 <<  1,
+       IB_PORT_NOTICE_SUP                      = 1 <<  2,
+       IB_PORT_TRAP_SUP                        = 1 <<  3,
+       IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
+       IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
+       IB_PORT_SL_MAP_SUP                      = 1 <<  6,
+       IB_PORT_MKEY_NVRAM                      = 1 <<  7,
+       IB_PORT_PKEY_NVRAM                      = 1 <<  8,
+       IB_PORT_LED_INFO_SUP                    = 1 <<  9,
+       IB_PORT_SM_DISABLED                     = 1 << 10,
+       IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
+       IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
+       IB_PORT_CM_SUP                          = 1 << 16,
+       IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
+       IB_PORT_REINIT_SUP                      = 1 << 18,
+       IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
+       IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
+       IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
+       IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
+       IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
+       IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
+       IB_PORT_CLIENT_REG_SUP                  = 1 << 25
+};
+
+enum ib_port_width {
+       IB_WIDTH_1X     = 1,
+       IB_WIDTH_4X     = 2,
+       IB_WIDTH_8X     = 4,
+       IB_WIDTH_12X    = 8
+};
+
+static inline int ib_width_enum_to_int(enum ib_port_width width)
+{
+       switch (width) {
+       case IB_WIDTH_1X:  return  1;
+       case IB_WIDTH_4X:  return  4;
+       case IB_WIDTH_8X:  return  8;
+       case IB_WIDTH_12X: return 12;
+       default:          return -1;
+       }
+}
+
+struct ib_port_attr {
+       enum ib_port_state      state;
+       enum ib_mtu             max_mtu;
+       enum ib_mtu             active_mtu;
+       int                     gid_tbl_len;
+       u32                     port_cap_flags;
+       u32                     max_msg_sz;
+       u32                     bad_pkey_cntr;
+       u32                     qkey_viol_cntr;
+       u16                     pkey_tbl_len;
+       u16                     lid;
+       u16                     sm_lid;
+       u8                      lmc;
+       u8                      max_vl_num;
+       u8                      sm_sl;
+       u8                      subnet_timeout;
+       u8                      init_type_reply;
+       u8                      active_width;
+       u8                      active_speed;
+       u8                      phys_state;
+};
+
+enum ib_device_modify_flags {
+       IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
+};
+
+struct ib_device_modify {
+       u64     sys_image_guid;
+};
+
+enum ib_port_modify_flags {
+       IB_PORT_SHUTDOWN                = 1,
+       IB_PORT_INIT_TYPE               = (1<<2),
+       IB_PORT_RESET_QKEY_CNTR         = (1<<3)
+};
+
+struct ib_port_modify {
+       u32     set_port_cap_mask;
+       u32     clr_port_cap_mask;
+       u8      init_type;
+};
+
+enum ib_event_type {
+       IB_EVENT_CQ_ERR                                                                 = IB_AE_CQ_ERROR,
+       IB_EVENT_QP_FATAL                                                               = IB_AE_QP_FATAL,
+       IB_EVENT_QP_REQ_ERR                                                     = IB_AE_WQ_REQ_ERROR,
+       IB_EVENT_QP_ACCESS_ERR                                  = IB_AE_WQ_ACCESS_ERROR,
+       IB_EVENT_COMM_EST                                                       = IB_AE_QP_COMM,
+       IB_EVENT_SQ_DRAINED                                             = IB_AE_SQ_DRAINED,
+       IB_EVENT_PATH_MIG                                                               = IB_AE_QP_APM,
+       IB_EVENT_PATH_MIG_ERR                                   = IB_AE_QP_APM_ERROR,
+       IB_EVENT_DEVICE_FATAL                                           = IB_AE_LOCAL_FATAL,
+       IB_EVENT_PORT_ACTIVE                                            = IB_AE_PORT_ACTIVE,
+       IB_EVENT_PORT_ERR                                                               = IB_AE_PORT_DOWN,
+       IB_EVENT_LID_CHANGE                                                     = IB_AE_UNKNOWN + 1,
+       IB_EVENT_PKEY_CHANGE,
+       IB_EVENT_SM_CHANGE,
+       IB_EVENT_SRQ_ERR,
+       IB_EVENT_SRQ_LIMIT_REACHED,
+       IB_EVENT_QP_LAST_WQE_REACHED
+};
+
+struct ib_event {
+       struct ib_device        *device;
+       union {
+               struct ib_cq    *cq;
+               struct ib_qp    *qp;
+               struct ib_srq   *srq;
+               u8              port_num;
+       } element;
+       enum ib_event_type      event;
+};
+
+struct ib_event_handler {
+       struct ib_device *device;
+       void            (*handler)(struct ib_event_handler *, struct ib_event *);
+       struct list_head  list;
+};
+
+#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)         \
+       (_ptr)->device  = _device;                      \
+       (_ptr)->handler = _handler;                     \
+       INIT_LIST_HEAD(&(_ptr)->list)                   
+
+struct ib_global_route {
+       union ib_gid    dgid;
+       u32             flow_label;
+       u8              sgid_index;
+       u8              hop_limit;
+       u8              traffic_class;
+};
+
+struct ib_grh {
+       __be32          version_tclass_flow;
+       __be16          paylen;
+       u8              next_hdr;
+       u8              hop_limit;
+       union ib_gid    sgid;
+       union ib_gid    dgid;
+};
+
+enum {
+       IB_MULTICAST_QPN = 0xffffff
+};
+
+enum ib_ah_flags {
+       IB_AH_GRH       = 1
+};
+
+struct ib_ah_attr {
+       struct ib_global_route  grh;
+       u16                     dlid;
+       u8                      sl;
+       u8                      src_path_bits;
+       u8                      static_rate;
+       u8                      ah_flags;
+       u8                      port_num;
+};
+
+#ifdef WIN_TO_BE_REMOVE
+//define in ib_types.h
+enum ib_wc_status {
+       IB_WC_SUCCESS,
+       IB_WC_LOC_LEN_ERR,
+       IB_WC_LOC_QP_OP_ERR,
+       IB_WC_LOC_EEC_OP_ERR,
+       IB_WC_LOC_PROT_ERR,
+       IB_WC_WR_FLUSH_ERR,
+       IB_WC_MW_BIND_ERR,
+       IB_WC_BAD_RESP_ERR,
+       IB_WC_LOC_ACCESS_ERR,
+       IB_WC_REM_INV_REQ_ERR,
+       IB_WC_REM_ACCESS_ERR,
+       IB_WC_REM_OP_ERR,
+       IB_WC_RETRY_EXC_ERR,
+       IB_WC_RNR_RETRY_EXC_ERR,
+       IB_WC_LOC_RDD_VIOL_ERR,
+       IB_WC_REM_INV_RD_REQ_ERR,
+       IB_WC_REM_ABORT_ERR,
+       IB_WC_INV_EECN_ERR,
+       IB_WC_INV_EEC_STATE_ERR,
+       IB_WC_FATAL_ERR,
+       IB_WC_RESP_TIMEOUT_ERR,
+       IB_WC_GENERAL_ERR
+};
+#endif
+
+enum ib_cq_notify {
+       IB_CQ_SOLICITED,
+       IB_CQ_NEXT_COMP
+};
+
+enum ib_srq_attr_mask {
+       IB_SRQ_MAX_WR   = 1 << 0,
+       IB_SRQ_LIMIT    = 1 << 1,
+};
+
+struct ib_srq_attr {
+       u32     max_wr;
+       u32     max_sge;
+       u32     srq_limit;
+};
+
+struct ib_srq_init_attr {
+       void                  (*event_handler)(struct ib_event *, void *);
+       void                   *srq_context;
+       struct ib_srq_attr      attr;
+};
+
+struct ib_qp_cap {
+       u32     max_send_wr;
+       u32     max_recv_wr;
+       u32     max_send_sge;
+       u32     max_recv_sge;
+       u32     max_inline_data;
+};
+
+enum ib_sig_type {
+       IB_SIGNAL_ALL_WR,
+       IB_SIGNAL_REQ_WR
+};
+
+struct ib_qp_init_attr {
+       void                  (*event_handler)(struct ib_event *, void *);
+       void                   *qp_context;
+       struct ib_cq           *send_cq;
+       struct ib_cq           *recv_cq;
+       struct ib_srq          *srq;
+       struct ib_qp_cap        cap;
+       enum ib_sig_type        sq_sig_type;
+       enum ib_qp_type_t               qp_type;
+       u8                      port_num; /* special QP types only */
+};
+
+enum ib_rnr_timeout {
+       IB_RNR_TIMER_655_36 =  0,
+       IB_RNR_TIMER_000_01 =  1,
+       IB_RNR_TIMER_000_02 =  2,
+       IB_RNR_TIMER_000_03 =  3,
+       IB_RNR_TIMER_000_04 =  4,
+       IB_RNR_TIMER_000_06 =  5,
+       IB_RNR_TIMER_000_08 =  6,
+       IB_RNR_TIMER_000_12 =  7,
+       IB_RNR_TIMER_000_16 =  8,
+       IB_RNR_TIMER_000_24 =  9,
+       IB_RNR_TIMER_000_32 = 10,
+       IB_RNR_TIMER_000_48 = 11,
+       IB_RNR_TIMER_000_64 = 12,
+       IB_RNR_TIMER_000_96 = 13,
+       IB_RNR_TIMER_001_28 = 14,
+       IB_RNR_TIMER_001_92 = 15,
+       IB_RNR_TIMER_002_56 = 16,
+       IB_RNR_TIMER_003_84 = 17,
+       IB_RNR_TIMER_005_12 = 18,
+       IB_RNR_TIMER_007_68 = 19,
+       IB_RNR_TIMER_010_24 = 20,
+       IB_RNR_TIMER_015_36 = 21,
+       IB_RNR_TIMER_020_48 = 22,
+       IB_RNR_TIMER_030_72 = 23,
+       IB_RNR_TIMER_040_96 = 24,
+       IB_RNR_TIMER_061_44 = 25,
+       IB_RNR_TIMER_081_92 = 26,
+       IB_RNR_TIMER_122_88 = 27,
+       IB_RNR_TIMER_163_84 = 28,
+       IB_RNR_TIMER_245_76 = 29,
+       IB_RNR_TIMER_327_68 = 30,
+       IB_RNR_TIMER_491_52 = 31
+};
+
+enum ib_qp_attr_mask {
+       IB_QP_STATE                     = 1,
+       IB_QP_CUR_STATE                 = (1<<1),
+       IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
+       IB_QP_ACCESS_FLAGS              = (1<<3),
+       IB_QP_PKEY_INDEX                = (1<<4),
+       IB_QP_PORT                      = (1<<5),
+       IB_QP_QKEY                      = (1<<6),
+       IB_QP_AV                        = (1<<7),
+       IB_QP_PATH_MTU                  = (1<<8),
+       IB_QP_TIMEOUT                   = (1<<9),
+       IB_QP_RETRY_CNT                 = (1<<10),
+       IB_QP_RNR_RETRY                 = (1<<11),
+       IB_QP_RQ_PSN                    = (1<<12),
+       IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
+       IB_QP_ALT_PATH                  = (1<<14),
+       IB_QP_MIN_RNR_TIMER             = (1<<15),
+       IB_QP_SQ_PSN                    = (1<<16),
+       IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
+       IB_QP_PATH_MIG_STATE            = (1<<18),
+       IB_QP_CAP                       = (1<<19),
+       IB_QP_DEST_QPN                  = (1<<20)
+};
+
+//TODO: these literals are also defined in ib_types.h and have there ANOTHER VALUES !!! 
+enum ib_qp_state {
+       IBQPS_RESET,
+       IBQPS_INIT,
+       IBQPS_RTR,
+       IBQPS_RTS,
+       IBQPS_SQD,
+       IBQPS_SQE,
+       IBQPS_ERR
+};
+
+
+struct ib_qp_attr {
+       enum ib_qp_state        qp_state;
+       enum ib_qp_state        cur_qp_state;
+       enum ib_mtu             path_mtu;
+       ib_apm_state_t  path_mig_state;
+       u32                     qkey;
+       u32                     rq_psn;
+       u32                     sq_psn;
+       u32                     dest_qp_num;
+       int                     qp_access_flags;
+       struct ib_qp_cap        cap;
+       struct ib_ah_attr       ah_attr;
+       struct ib_ah_attr       alt_ah_attr;
+       u16                     pkey_index;
+       u16                     alt_pkey_index;
+       u8                      en_sqd_async_notify;
+       u8                      sq_draining;
+       u8                      max_rd_atomic;
+       u8                      max_dest_rd_atomic;
+       u8                      min_rnr_timer;
+       u8                      port_num;
+       u8                      timeout;
+       u8                      retry_cnt;
+       u8                      rnr_retry;
+       u8                      alt_port_num;
+       u8                      alt_timeout;
+};
+
+struct ib_sge {
+       u64     addr;
+       u32     length;
+       u32     lkey;
+};
+
+
+typedef enum MTHCA_QP_ACCESS_FLAGS {
+       MTHCA_ACCESS_LOCAL_WRITE        = 1,
+       MTHCA_ACCESS_REMOTE_WRITE       = (1<<1),
+       MTHCA_ACCESS_REMOTE_READ        = (1<<2),
+       MTHCA_ACCESS_REMOTE_ATOMIC      = (1<<3),
+       MTHCA_ACCESS_MW_BIND    = (1<<4)
+} mthca_qp_access_t;
+
+struct ib_phys_buf {
+       u64      addr;
+       u64      size;
+};
+
+struct ib_mr_attr {
+       struct ib_pd    *pd;
+       u64             device_virt_addr;
+       u64             size;
+       mthca_qp_access_t               mr_access_flags;
+       u32             lkey;
+       u32             rkey;
+};
+
+enum ib_mr_rereg_flags {
+       IB_MR_REREG_TRANS       = 1,
+       IB_MR_REREG_PD          = (1<<1),
+       IB_MR_REREG_ACCESS      = (1<<2)
+};
+
+struct ib_mw_bind {
+       struct ib_mr   *mr;
+       u64             wr_id;
+       u64             addr;
+       u32             length;
+       int             send_flags;
+       int             mw_access_flags;
+};
+
+struct ib_fmr_attr {
+       int     max_pages;
+       int     max_maps;
+       u8      page_size;
+};
+
+struct ib_ucontext {
+       struct ib_device  *device;
+       PVOID   user_uar;
+       struct ib_pd *pd;
+       atomic_t                usecnt; /* count all resources */
+       ULONG           is_removing;
+};
+
+struct ib_uobject {
+       u64                     user_handle;    /* handle given to us by userspace */
+       struct ib_ucontext     *context;        /* associated user context */
+       struct list_head        list;           /* link to context's list */
+       u32                     id;             /* index into kernel idr */
+};
+
+struct ib_umem {
+       u64             user_base;
+       u64             virt_base;
+       u64                     length;
+       int                     offset;
+       int                     page_size;
+       int                     writable;
+       struct list_head        chunk_list;
+};
+
+#pragma warning( disable : 4200 )
+struct ib_umem_chunk {
+       struct list_head        list;
+       int                     nents;
+       int                     nmap;
+       struct scatterlist      page_list[0];
+};
+#pragma warning( default : 4200 )
+
+struct ib_udata {
+       void *inbuf;
+       void *outbuf;
+       size_t       inlen;
+       size_t       outlen;
+};
+
+#define IB_UMEM_MAX_PAGE_CHUNK                                         \
+       ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /      \
+        ((char *) &((struct ib_umem_chunk *) 0)->page_list[1] -        \
+         (char *) &((struct ib_umem_chunk *) 0)->page_list[0]))
+
+struct ib_umem_object {
+       struct ib_uobject       uobject;
+       struct ib_umem          umem;
+};
+
+struct ib_pd {
+       struct ib_device       *device;
+       struct ib_ucontext      *ucontext;
+       atomic_t                usecnt; /* count all resources */
+};
+
+struct ib_ah {
+       struct ib_device        *device;
+       struct ib_pd            *pd;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr *ib_mr;
+};
+
+typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
+
+struct ib_cq {
+       struct ib_device       *device;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr *ib_mr;
+       ib_comp_handler         comp_handler;
+       void                  (*event_handler)(struct ib_event *, void *);
+       void *                  cq_context;
+       int                     cqe;
+       atomic_t                usecnt; /* count number of work queues */
+};
+
+struct ib_srq {
+       struct ib_device       *device;
+       struct ib_pd           *pd;
+       struct ib_uobject      *uobject;
+       void                  (*event_handler)(struct ib_event *, void *);
+       void                   *srq_context;
+       atomic_t                usecnt;
+};
+
+struct ib_qp {
+       struct ib_device       *device;
+       struct ib_pd           *pd;
+       struct ib_cq           *send_cq;
+       struct ib_cq           *recv_cq;
+       struct ib_srq          *srq;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr *ib_mr;
+       void                  (*event_handler)(struct ib_event *, void *);
+       void                   *qp_context;
+       u32                     qp_num;
+       enum ib_qp_type_t               qp_type;
+};
+
+struct ib_mr {
+       struct ib_device  *device;
+       struct ib_pd      *pd;
+       u32                lkey;
+       u32                rkey;
+       atomic_t           usecnt; /* count number of MWs */
+};
+
+struct ib_mw {
+       struct ib_device        *device;
+       struct ib_pd            *pd;
+       u32                     rkey;
+};
+
+struct ib_fmr {
+       struct ib_device        *device;
+       struct ib_pd            *pd;
+       struct list_head        list;
+       u32                     lkey;
+       u32                     rkey;
+};
+
+struct ib_mad;
+struct ib_grh;
+
+enum ib_process_mad_flags {
+       IB_MAD_IGNORE_MKEY      = 1,
+       IB_MAD_IGNORE_BKEY      = 2,
+       IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
+};
+
+enum ib_mad_result {
+       IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
+       IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
+       IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
+       IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
+};
+
+#define IB_DEVICE_NAME_MAX 64
+
+struct ib_cache {
+       rwlock_t                lock;
+       struct ib_event_handler event_handler;
+       struct ib_pkey_cache  **pkey_cache;
+       struct ib_gid_cache   **gid_cache;
+};
+
+struct mthca_dev;
+
+struct ib_device {
+       struct mthca_dev                *mdev;
+
+       char                          name[IB_DEVICE_NAME_MAX];
+
+       struct list_head              event_handler_list;
+       spinlock_t                    event_handler_lock;
+
+       struct list_head              core_list;
+       struct list_head              client_data_list;
+       spinlock_t                    client_data_lock;
+
+       struct ib_cache               cache;
+
+       u32                           flags;
+
+       int                        (*query_device)(struct ib_device *device,
+                                                  struct ib_device_attr *device_attr);
+       int                        (*query_port)(struct ib_device *device,
+                                                u8 port_num,
+                                                struct ib_port_attr *port_attr);
+       int                        (*query_gid)(struct ib_device *device,
+                                               u8 port_num, int index,
+                                               union ib_gid *gid);
+       int                        (*query_pkey)(struct ib_device *device,
+                                                u8 port_num, u16 index, u16 *pkey);
+       int                        (*modify_device)(struct ib_device *device,
+                                                   int device_modify_mask,
+                                                   struct ib_device_modify *device_modify);
+       int                        (*modify_port)(struct ib_device *device,
+                                                 u8 port_num, int port_modify_mask,
+                                                 struct ib_port_modify *port_modify);
+       struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
+                                                    ci_umv_buf_t* const        p_umv_buf);
+       int                        (*dealloc_ucontext)(struct ib_ucontext *context);
+       struct ib_pd *             (*alloc_pd)(struct ib_device *device,
+                                              struct ib_ucontext *context,
+                                              ci_umv_buf_t* const      p_umv_buf);
+       int                        (*dealloc_pd)(struct ib_pd *pd);
+       struct ib_ah *             (*create_ah)(struct ib_pd *pd,
+                                               struct ib_ah_attr *ah_attr);
+       int                        (*modify_ah)(struct ib_ah *ah,
+                                               struct ib_ah_attr *ah_attr);
+       int                        (*query_ah)(struct ib_ah *ah,
+                                              struct ib_ah_attr *ah_attr);
+       int                        (*destroy_ah)(struct ib_ah *ah);
+       struct ib_srq *            (*create_srq)(struct ib_pd *pd,
+                                                struct ib_srq_init_attr *srq_init_attr,
+                                                ci_umv_buf_t* const    p_umv_buf);
+       int                        (*modify_srq)(struct ib_srq *srq,
+                                                struct ib_srq_attr *srq_attr,
+                                                enum ib_srq_attr_mask srq_attr_mask);
+       int                        (*query_srq)(struct ib_srq *srq,
+                                               struct ib_srq_attr *srq_attr);
+       int                        (*destroy_srq)(struct ib_srq *srq);
+       int                        (*post_srq_recv)(struct ib_srq *srq,
+                                                   struct _ib_recv_wr *recv_wr,
+                                                   struct _ib_recv_wr **bad_recv_wr);
+       struct ib_qp *             (*create_qp)(struct ib_pd *pd,
+                                               struct ib_qp_init_attr *qp_init_attr,
+                                               ci_umv_buf_t* const     p_umv_buf);
+       int                        (*modify_qp)(struct ib_qp *qp,
+                                               struct ib_qp_attr *qp_attr,
+                                               int qp_attr_mask);
+       int                        (*query_qp)(struct ib_qp *qp,
+                                              struct ib_qp_attr *qp_attr,
+                                              int qp_attr_mask,
+                                              struct ib_qp_init_attr *qp_init_attr);
+       int                        (*destroy_qp)(struct ib_qp *qp);
+       int                        (*post_send)(struct ib_qp *qp,
+                                               struct _ib_send_wr *send_wr,
+                                               struct _ib_send_wr **bad_send_wr);
+       int                        (*post_recv)(struct ib_qp *qp,
+                                               struct _ib_recv_wr *recv_wr,
+                                               struct _ib_recv_wr **bad_recv_wr);
+       struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
+                                               struct ib_ucontext *context,
+                                               ci_umv_buf_t* const     p_umv_buf);
+       int                        (*destroy_cq)(struct ib_cq *cq);
+       int                        (*resize_cq)(struct ib_cq *cq, int *cqe);
+       int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
+                                             struct _ib_wc *wc);
+       int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
+       int                        (*req_notify_cq)(struct ib_cq *cq,
+                                                   enum ib_cq_notify cq_notify);
+       int                        (*req_ncomp_notif)(struct ib_cq *cq,
+                                                     int wc_cnt);
+       struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
+                                                mthca_qp_access_t mr_access_flags);
+       struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
+                                                 struct ib_phys_buf *phys_buf_array,
+                                                 int num_phys_buf,
+                                                 mthca_qp_access_t mr_access_flags,
+                                                 u64 *iova_start);
+       struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd, 
+                                               void* __ptr64   vaddr, uint64_t length, uint64_t hca_va, mthca_qp_access_t acc);
+       int                        (*query_mr)(struct ib_mr *mr,
+                                              struct ib_mr_attr *mr_attr);
+       int                        (*dereg_mr)(struct ib_mr *mr);
+       int                        (*rereg_phys_mr)(struct ib_mr *mr,
+                                                   int mr_rereg_mask,
+                                                   struct ib_pd *pd,
+                                                   struct ib_phys_buf *phys_buf_array,
+                                                   int num_phys_buf,
+                                                   mthca_qp_access_t mr_access_flags,
+                                                   u64 *iova_start);
+       struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
+       int                        (*bind_mw)(struct ib_qp *qp,
+                                             struct ib_mw *mw,
+                                             struct ib_mw_bind *mw_bind);
+       int                        (*dealloc_mw)(struct ib_mw *mw);
+       struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
+                                               mthca_qp_access_t mr_access_flags,
+                                               struct ib_fmr_attr *fmr_attr);
+       int                        (*map_phys_fmr)(struct ib_fmr *fmr,
+                                                  u64 *page_list, int list_len,
+                                                  u64 iova);
+       int                        (*unmap_fmr)(struct list_head *fmr_list);
+       int                        (*dealloc_fmr)(struct ib_fmr *fmr);
+       int                        (*attach_mcast)(struct ib_qp *qp,
+                                                  union ib_gid *gid,
+                                                  u16 lid);
+       int                        (*detach_mcast)(struct ib_qp *qp,
+                                                  union ib_gid *gid,
+                                                  u16 lid);
+       int                        (*process_mad)(struct ib_device *device,
+                                                 int process_mad_flags,
+                                                 u8 port_num,
+                                                 struct _ib_wc *in_wc,
+                                                 struct ib_grh *in_grh,
+                                                 struct ib_mad *in_mad,
+                                                 struct ib_mad *out_mad);
+
+       struct list_head             port_list;
+
+       u64                                                             uverbs_cmd_mask;
+       __be64                                  node_guid;
+       u8                           node_type;
+       u8                           phys_port_cnt;
+};
+
+struct ib_client {
+       char  *name;
+       void (*add)   (struct ib_device *);
+       void (*remove)(struct ib_device *);
+
+       struct list_head list;
+};
+
+struct ib_device *ib_alloc_device(size_t size);
+void ib_dealloc_device(struct ib_device *device);
+
+int ib_register_device   (struct ib_device *device);
+void ib_unregister_device(struct ib_device *device);
+
+int ib_register_client   (struct ib_client *client);
+void ib_unregister_client(struct ib_client *client);
+
+void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
+void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
+                        void *data);
+
+int ib_core_init(void);
+
+void ib_core_cleanup(void);
+
+int ib_register_event_handler  (struct ib_event_handler *event_handler);
+int ib_unregister_event_handler(struct ib_event_handler *event_handler);
+void ib_dispatch_event(struct ib_event *event);
+
+int ib_query_device(struct ib_device *device,
+                   struct ib_device_attr *device_attr);
+
+int ib_query_port(struct ib_device *device,
+                 u8 port_num, struct ib_port_attr *port_attr);
+
+int ib_query_gid(struct ib_device *device,
+                u8 port_num, int index, union ib_gid *gid);
+
+int ib_query_pkey(struct ib_device *device,
+                 u8 port_num, u16 index, u16 *pkey);
+
+int ib_modify_device(struct ib_device *device,
+                    int device_modify_mask,
+                    struct ib_device_modify *device_modify);
+
+int ib_modify_port(struct ib_device *device,
+                  u8 port_num, int port_modify_mask,
+                  struct ib_port_modify *port_modify);
+
+/**
+ * ibv_alloc_pd - Allocates an unused protection domain.
+ * @device: The device on which to allocate the protection domain.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
+ *
+ * A protection domain object provides an association between QPs, shared
+ * receive queues, address handles, memory regions, and memory windows.
+ */
+struct ib_pd *ibv_alloc_pd(struct ib_device *device,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
+/**
+ * ibv_dealloc_pd - Deallocates a protection domain.
+ * @pd: The protection domain to deallocate.
+ */
+int ibv_dealloc_pd(struct ib_pd *pd);
+
+/**
+ * ibv_create_ah - Creates an address handle for the given address vector.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
+ *
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
+/**
+ * ibv_create_ah_from_wc - Creates an address handle associated with the
+ *   sender of the specified work completion.
+ * @pd: The protection domain associated with the address handle.
+ * @wc: Work completion information associated with a received message.
+ * @grh: References the received global route header.  This parameter is
+ *   ignored unless the work completion indicates that the GRH is valid.
+ * @port_num: The outbound port number to associate with the address.
+ *
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *ibv_create_ah_from_wc(struct ib_pd *pd, struct _ib_wc *wc,
+                                  struct ib_grh *grh, u8 port_num);
+
+/**
+ * ibv_modify_ah - Modifies the address vector associated with an address
+ *   handle.
+ * @ah: The address handle to modify.
+ * @ah_attr: The new address vector attributes to associate with the
+ *   address handle.
+ */
+int ibv_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+/**
+ * ibv_query_ah - Queries the address vector associated with an address
+ *   handle.
+ * @ah: The address handle to query.
+ * @ah_attr: The address vector attributes associated with the address
+ *   handle.
+ */
+int ibv_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+/**
+ * ibv_destroy_ah - Destroys an address handle.
+ * @ah: The address handle to destroy.
+ */
+int ibv_destroy_ah(struct ib_ah *ah);
+
+/**
+ * ibv_create_srq - Creates a SRQ associated with the specified protection
+ *   domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the SRQ.
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ibv_create_srq(struct ib_pd *pd,
+                            struct ib_srq_init_attr *srq_init_attr);
+
+/**
+ * ibv_modify_srq - Modifies the attributes for the specified SRQ.
+ * @srq: The SRQ to modify.
+ * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
+ *   the current values of selected SRQ attributes are returned.
+ * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
+ *   are being modified.
+ *
+ * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
+ * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
+ * the number of receives queued drops below the limit.
+ */
+int ibv_modify_srq(struct ib_srq *srq,
+                 struct ib_srq_attr *srq_attr,
+                 enum ib_srq_attr_mask srq_attr_mask);
+
+/**
+ * ibv_query_srq - Returns the attribute list and current values for the
+ *   specified SRQ.
+ * @srq: The SRQ to query.
+ * @srq_attr: The attributes of the specified SRQ.
+ */
+int ibv_query_srq(struct ib_srq *srq,
+                struct ib_srq_attr *srq_attr);
+
+/**
+ * ibv_destroy_srq - Destroys the specified SRQ.
+ * @srq: The SRQ to destroy.
+ */
+int ibv_destroy_srq(struct ib_srq *srq);
+
+/**
+ * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
+ * @srq: The SRQ to post the work request on.
+ * @recv_wr: A list of work requests to post on the receive queue.
+ * @bad_recv_wr: On an immediate failure, this parameter will reference
+ *   the work request that failed to be posted on the QP.
+ */
+static inline int ibv_post_srq_recv(struct ib_srq *srq,
+                                  struct _ib_recv_wr *recv_wr,
+                                  struct _ib_recv_wr **bad_recv_wr)
+{
+       return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
+}
+
+/**
+ * ibv_create_qp - Creates a QP associated with the specified protection
+ *   domain.
+ * @pd: The protection domain associated with the QP.
+ * @qp_init_attr: A list of initial attributes required to create the QP.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
+ */
+       struct ib_qp *ibv_create_qp(struct ib_pd *pd,
+               struct ib_qp_init_attr *qp_init_attr,
+               struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
+/**
+ * ibv_modify_qp - Modifies the attributes for the specified QP and then
+ *   transitions the QP to the given state.
+ * @qp: The QP to modify.
+ * @qp_attr: On input, specifies the QP attributes to modify.  On output,
+ *   the current values of selected QP attributes are returned.
+ * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
+ *   are being modified.
+ */
+int ibv_modify_qp(struct ib_qp *qp,
+                struct ib_qp_attr *qp_attr,
+                int qp_attr_mask);
+
+/**
+ * ibv_query_qp - Returns the attribute list and current values for the
+ *   specified QP.
+ * @qp: The QP to query.
+ * @qp_attr: The attributes of the specified QP.
+ * @qp_attr_mask: A bit-mask used to select specific attributes to query.
+ * @qp_init_attr: Additional attributes of the selected QP.
+ *
+ * The qp_attr_mask may be used to limit the query to gathering only the
+ * selected attributes.
+ */
+int ibv_query_qp(struct ib_qp *qp,
+               struct ib_qp_attr *qp_attr,
+               int qp_attr_mask,
+               struct ib_qp_init_attr *qp_init_attr);
+
+/**
+ * ibv_destroy_qp - Destroys the specified QP.
+ * @qp: The QP to destroy.
+ */
+int ibv_destroy_qp(struct ib_qp *qp);
+
+/**
+ * ib_post_send - Posts a list of work requests to the send queue of
+ *   the specified QP.
+ * @qp: The QP to post the work request on.
+ * @send_wr: A list of work requests to post on the send queue.
+ * @bad_send_wr: On an immediate failure, this parameter will reference
+ *   the work request that failed to be posted on the QP.
+ */
+static inline int ib_post_send(struct ib_qp *qp,
+                              struct _ib_send_wr *send_wr,
+                              struct _ib_send_wr **bad_send_wr)
+{
+       return qp->device->post_send(qp, send_wr, bad_send_wr);
+}
+
+/**
+ * ib_post_recv - Posts a list of work requests to the receive queue of
+ *   the specified QP.
+ * @qp: The QP to post the work request on.
+ * @recv_wr: A list of work requests to post on the receive queue.
+ * @bad_recv_wr: On an immediate failure, this parameter will reference
+ *   the work request that failed to be posted on the QP.
+ */
+static inline int ib_post_recv(struct ib_qp *qp,
+                              struct _ib_recv_wr *recv_wr,
+                              struct _ib_recv_wr **bad_recv_wr)
+{
+       return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
+}
+
+/**
+ * ibv_create_cq - Creates a CQ on the specified device.
+ * @device: The device on which to create the CQ.
+ * @comp_handler: A user-specified callback that is invoked when a
+ *   completion event occurs on the CQ.
+ * @event_handler: A user-specified callback that is invoked when an
+ *   asynchronous event not associated with a completion occurs on the CQ.
+ * @cq_context: Context associated with the CQ returned to the user via
+ *   the associated completion and event handlers.
+ * @cqe: The minimum size of the CQ.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
+ *
+ * Users can examine the cq structure to determine the actual CQ size.
+ */
+struct ib_cq *ibv_create_cq(struct ib_device *device,
+                          ib_comp_handler comp_handler,
+                          void (*event_handler)(struct ib_event *, void *),
+                          void *cq_context, int cqe, 
+                          struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
+/**
+ * ibv_resize_cq - Modifies the capacity of the CQ.
+ * @cq: The CQ to resize.
+ * @cqe: The minimum size of the CQ.
+ *
+ * Users can examine the cq structure to determine the actual CQ size.
+ */
+int ibv_resize_cq(struct ib_cq *cq, int cqe);
+
+/**
+ * ibv_destroy_cq - Destroys the specified CQ.
+ * @cq: The CQ to destroy.
+ */
+int ibv_destroy_cq(struct ib_cq *cq);
+
+/**
+ * ib_poll_cq - poll a CQ for completion(s)
+ * @cq:the CQ being polled
+ * @num_entries:maximum number of completions to return
+ * @wc:array of at least @num_entries &struct _ib_wc where completions
+ *   will be returned
+ *
+ * Poll a CQ for (possibly multiple) completions.  If the return value
+ * is < 0, an error occurred.  If the return value is >= 0, it is the
+ * number of completions returned.  If the return value is
+ * non-negative and < num_entries, then the CQ was emptied.
+ */
+static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
+                            struct _ib_wc *wc)
+{
+       return cq->device->poll_cq(cq, num_entries, wc);
+}
+
+/**
+ * ib_peek_cq - Returns the number of unreaped completions currently
+ *   on the specified CQ.
+ * @cq: The CQ to peek.
+ * @wc_cnt: A minimum number of unreaped completions to check for.
+ *
+ * If the number of unreaped completions is greater than or equal to wc_cnt,
+ * this function returns wc_cnt, otherwise, it returns the actual number of
+ * unreaped completions.
+ */
+int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
+
+/**
+ * ib_req_notify_cq - Request completion notification on a CQ.
+ * @cq: The CQ to generate an event for.
+ * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
+ *   occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
+ *   notification will occur on the next completion.
+ */
+static inline int ib_req_notify_cq(struct ib_cq *cq,
+                                  enum ib_cq_notify cq_notify)
+{
+       return cq->device->req_notify_cq(cq, cq_notify);
+}
+
+/**
+ * ib_req_ncomp_notif - Request completion notification when there are
+ *   at least the specified number of unreaped completions on the CQ.
+ * @cq: The CQ to generate an event for.
+ * @wc_cnt: The number of unreaped completions that should be on the
+ *   CQ before an event is generated.
+ */
+static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
+{
+       return cq->device->req_ncomp_notif ?
+               cq->device->req_ncomp_notif(cq, wc_cnt) :
+               -ENOSYS;
+}
+
+/**
+ * ibv_reg_mr - Prepares a virtually addressed memory region for use
+ *   by an HCA.
+ * @pd: The protection domain associated assigned to the registered region.
+ * @vaddr: virtual address of the region
+ * @length: Specifies the size of the region.
+ * @hca_va: virtual address in HCA
+ * @mr_access_flags: Specifies the memory access rights.
+ * @um_call: call from user, when TRUE.
+ */
+struct ib_mr *ibv_reg_mr(struct ib_pd *pd, 
+       mthca_qp_access_t mr_access_flags,
+       void* __ptr64                   vaddr,
+       uint64_t                                length,
+       uint64_t                                hca_va,
+       boolean_t                       um_call
+       );
+
+/**
+ * ibv_get_dma_mr - Returns a memory region for system memory that is
+ *   usable for DMA.
+ * @pd: The protection domain associated with the memory region.
+ * @mr_access_flags: Specifies the memory access rights.
+ */
+struct ib_mr *ibv_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t mr_access_flags);
+
+/**
+ * ibv_reg_phys_mr - Prepares a virtually addressed memory region for use
+ *   by an HCA.
+ * @pd: The protection domain associated assigned to the registered region.
+ * @phys_buf_array: Specifies a list of physical buffers to use in the
+ *   memory region.
+ * @num_phys_buf: Specifies the size of the phys_buf_array.
+ * @mr_access_flags: Specifies the memory access rights.
+ * @iova_start: The offset of the region's starting I/O virtual address.
+ */
+struct ib_mr *ibv_reg_phys_mr(struct ib_pd *pd,
+                            struct ib_phys_buf *phys_buf_array,
+                            int num_phys_buf,
+                            mthca_qp_access_t mr_access_flags,
+                            u64 *iova_start);
+
+/**
+ * ibv_rereg_phys_mr - Modifies the attributes of an existing memory region.
+ *   Conceptually, this call performs the functions deregister memory region
+ *   followed by register physical memory region.  Where possible,
+ *   resources are reused instead of deallocated and reallocated.
+ * @mr: The memory region to modify.
+ * @mr_rereg_mask: A bit-mask used to indicate which of the following
+ *   properties of the memory region are being modified.
+ * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
+ *   the new protection domain to associated with the memory region,
+ *   otherwise, this parameter is ignored.
+ * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
+ *   field specifies a list of physical buffers to use in the new
+ *   translation, otherwise, this parameter is ignored.
+ * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
+ *   field specifies the size of the phys_buf_array, otherwise, this
+ *   parameter is ignored.
+ * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
+ *   field specifies the new memory access rights, otherwise, this
+ *   parameter is ignored.
+ * @iova_start: The offset of the region's starting I/O virtual address.
+ */
+int ibv_rereg_phys_mr(struct ib_mr *mr,
+                    int mr_rereg_mask,
+                    struct ib_pd *pd,
+                    struct ib_phys_buf *phys_buf_array,
+                    int num_phys_buf,
+                    mthca_qp_access_t mr_access_flags,
+                    u64 *iova_start);
+
+/**
+ * ibv_query_mr - Retrieves information about a specific memory region.
+ * @mr: The memory region to retrieve information about.
+ * @mr_attr: The attributes of the specified memory region.
+ */
+int ibv_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
+
+/**
+ * ibv_dereg_mr - Deregisters a memory region and removes it from the
+ *   HCA translation table.
+ * @mr: The memory region to deregister.
+ */
+int ibv_dereg_mr(struct ib_mr *mr);
+
+/**
+ * ibv_alloc_mw - Allocates a memory window.
+ * @pd: The protection domain associated with the memory window.
+ */
+struct ib_mw *ibv_alloc_mw(struct ib_pd *pd);
+
+/**
+ * ib_bind_mw - Posts a work request to the send queue of the specified
+ *   QP, which binds the memory window to the given address range and
+ *   remote access attributes.
+ * @qp: QP to post the bind work request on.
+ * @mw: The memory window to bind.
+ * @mw_bind: Specifies information about the memory window, including
+ *   its address range, remote access rights, and associated memory region.
+ */
+static inline int ib_bind_mw(struct ib_qp *qp,
+                            struct ib_mw *mw,
+                            struct ib_mw_bind *mw_bind)
+{
+       /* XXX reference counting in corresponding MR? */
+       return mw->device->bind_mw ?
+               mw->device->bind_mw(qp, mw, mw_bind) :
+               -ENOSYS;
+}
+
+/**
+ * ibv_dealloc_mw - Deallocates a memory window.
+ * @mw: The memory window to deallocate.
+ */
+int ibv_dealloc_mw(struct ib_mw *mw);
+
+/**
+ * ibv_alloc_fmr - Allocates a unmapped fast memory region.
+ * @pd: The protection domain associated with the unmapped region.
+ * @mr_access_flags: Specifies the memory access rights.
+ * @fmr_attr: Attributes of the unmapped region.
+ *
+ * A fast memory region must be mapped before it can be used as part of
+ * a work request.
+ */
+struct ib_fmr *ibv_alloc_fmr(struct ib_pd *pd,
+                           mthca_qp_access_t mr_access_flags,
+                           struct ib_fmr_attr *fmr_attr);
+
+/**
+ * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
+ * @fmr: The fast memory region to associate with the pages.
+ * @page_list: An array of physical pages to map to the fast memory region.
+ * @list_len: The number of pages in page_list.
+ * @iova: The I/O virtual address to use with the mapped region.
+ */
+static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
+                                 u64 *page_list, int list_len,
+                                 u64 iova)
+{
+       return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
+}
+
+/**
+ * ibv_unmap_fmr - Removes the mapping from a list of fast memory regions.
+ * @fmr_list: A linked list of fast memory regions to unmap.
+ */
+int ibv_unmap_fmr(struct list_head *fmr_list);
+
+/**
+ * ibv_dealloc_fmr - Deallocates a fast memory region.
+ * @fmr: The fast memory region to deallocate.
+ */
+int ibv_dealloc_fmr(struct ib_fmr *fmr);
+
+/**
+ * ibv_attach_mcast - Attaches the specified QP to a multicast group.
+ * @qp: QP to attach to the multicast group.  The QP must be type
+ *   IB_QPT_UNRELIABLE_DGRM.
+ * @gid: Multicast group GID.
+ * @lid: Multicast group LID in host byte order.
+ *
+ * In order to send and receive multicast packets, subnet
+ * administration must have created the multicast group and configured
+ * the fabric appropriately.  The port associated with the specified
+ * QP must also be a member of the multicast group.
+ */
+int ibv_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+/**
+ * ibv_detach_mcast - Detaches the specified QP from a multicast group.
+ * @qp: QP to detach from the multicast group.
+ * @gid: Multicast group GID.
+ * @lid: Multicast group LID in host byte order.
+ */
+int ibv_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+/**
+ * ibv_um_close - Releases application.
+ * @h_um_ca: application context
+ */
+void ibv_um_close(struct ib_ucontext * h_um_ca);
+
+#endif /* IB_VERBS_H */
diff --git a/trunk/hw/mthca/kernel/mt_atomic.h b/trunk/hw/mthca/kernel/mt_atomic.h
new file mode 100644 (file)
index 0000000..4dcf5f3
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef MT_ATOMIC_H
+#define MT_ATOMIC_H
+
+// atomic
+typedef LONG atomic_t;
+
+static inline void atomic_inc(atomic_t *pval)
+{
+       InterlockedIncrement(pval);     
+}
+
+static inline void atomic_dec(atomic_t *pval)
+{
+       InterlockedDecrement(pval);     
+}
+
+static inline atomic_t atomic_read(atomic_t *pval)
+{
+       return (atomic_t)InterlockedOr (pval,0);        
+}
+
+static inline void atomic_set(atomic_t *pval, long val)
+{
+       InterlockedExchange(pval, val); 
+}
+
+/**
+* atomic_inc_and_test - decrement and test
+* pval: pointer of type atomic_t
+* 
+* Atomically increments pval by 1 and
+* returns true if the result is 0, or false for all other
+* cases.
+*/ 
+static inline int
+atomic_inc_and_test(atomic_t *pval)
+{ 
+       LONG val = InterlockedIncrement(pval);
+       return (val == 0);
+}
+
+/**
+* atomic_dec_and_test - decrement and test
+* pval: pointer of type atomic_t
+* 
+* Atomically decrements pval by 1 and
+* returns true if the result is 0, or false for all other
+* cases.
+*/ 
+static inline int
+atomic_dec_and_test(atomic_t *pval)
+{ 
+       LONG  val = InterlockedDecrement(pval);
+       return (val == 0);
+}
+
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_bitmap.h b/trunk/hw/mthca/kernel/mt_bitmap.h
new file mode 100644 (file)
index 0000000..22940ff
--- /dev/null
@@ -0,0 +1,260 @@
+#ifndef MT_BITMAP_H
+#define MT_BITMAP_H
+
+// DECLARE_BITMAP
+#define BITS_PER_LONG          32
+#define BITS_TO_LONGS(bits) \
+        (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#define DECLARE_BITMAP(name,bits) \
+    unsigned long name[BITS_TO_LONGS(bits)]
+
+/**
+* atomic_set_bit - Atomically set a bit in memory
+* @nr: the bit to set
+* @addr: the address to start counting from
+*
+* This function is atomic and may not be reordered.  See __set_bit()
+* if you do not require the atomic guarantees.
+*
+* Note: there are no guarantees that this function will not be reordered
+* on non x86 architectures, so if you are writting portable code,
+* make sure not to rely on its reordering guarantees.
+*
+* Note that @nr may be almost arbitrarily large; this function is not
+* restricted to acting on a single-word quantity.
+*/
+static inline unsigned long atomic_set_bit(int nr, volatile long * addr)
+{
+               return InterlockedOr( addr, (1 << nr) );
+}
+
+/**
+* atomic_clear_bit - Clears a bit in memory
+* @nr: Bit to clear
+* @addr: Address to start counting from
+*
+* clear_bit() is atomic and may not be reordered.  However, it does
+* not contain a memory barrier, so if it is used for locking purposes,
+* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+* in order to ensure changes are visible on other processors.
+*/
+static inline unsigned long atomic_clear_bit(int nr, volatile long * addr)
+{
+       return InterlockedAnd( addr, ~(1 << nr) );
+}
+
+static inline  int set_bit(int nr,long * addr)
+{
+    addr += nr >> 5;
+       return atomic_set_bit( nr & 0x1f, (volatile long *)addr );       
+}
+
+static inline  int clear_bit(int nr, long * addr)
+{
+       addr += nr >> 5;
+       return atomic_clear_bit( nr & 0x1f, (volatile long *)addr );  
+}
+
+static inline  int test_bit(int nr, const unsigned long * addr)
+{
+       int     mask;
+
+       addr += nr >> 5;
+       mask = 1 << (nr & 0x1f);
+       return ((mask & *addr) != 0);
+}
+
+
+/**
+* bitmap_zero - clear the bitmap
+* @dst: the bitmap address
+* @nbits: the bitmap size in bits
+*
+*/
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+       *dst = 0UL;
+       else {
+       int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+       RtlZeroMemory(dst, len);
+       }
+}
+
+#define BITMAP_LAST_WORD_MASK(nbits)    \
+       ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL )
+
+int __bitmap_full(const unsigned long *bitmap, int bits);
+
+static inline int bitmap_full(const unsigned long *src, int nbits)
+{
+       if (nbits <= BITS_PER_LONG)
+               return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+       else
+               return __bitmap_full(src, nbits);
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits);
+
+static inline int bitmap_empty(const unsigned long *src, int nbits)
+{
+        if (nbits <= BITS_PER_LONG)
+                return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+        else
+                return __bitmap_empty(src, nbits);
+}
+
+/*
+* fls: find last bit set.
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+
+static inline int fls(int x)
+{
+       int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xffff0000u)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xff000000u)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xf0000000u)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xc0000000u)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000u)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+
+
+/**
+* _ffs - find the first one bit in a word
+* @addr: The address to start the search at
+* @offset: The bitnumber to start searching at
+*
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+static inline int _ffs(const unsigned long *addr, int offset)
+{
+       //TODO: not an effective code - is better in Assembler
+       int mask = 1 << offset;
+       int rbc = BITS_PER_LONG - offset;
+       int ix;
+       for (ix=0; ix<rbc; ix++, mask<<=1) {
+               if (*addr & mask)
+                       return offset + ix + 1;
+       }
+       return 0;
+}
+
+#define ffs(val)       _ffs((const unsigned long *)&val,0)
+
+/**
+* _ffz - find the first zero bit in a word
+* @addr: The address to start the search at
+* @offset: The bitnumber to start searching at
+*
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+static inline int _ffz(const unsigned long *addr, int offset)
+{
+       //TODO: not an effective code - is better in Assembler
+       int mask = 1 << offset;
+       int rbc = BITS_PER_LONG - offset;
+       int ix;
+       for (ix=0; ix<rbc; ix++, mask<<=1) {
+               if (!(*addr & mask))
+                       return offset + ix + 1;
+       }
+       return 0;
+}
+
+#define ffz(val)       _ffz(&val,0)
+
+/**
+* find_next_zero_bit - find the first zero bit in a memory region
+* @addr: The address to base the search on
+* @offset: The bitnumber to start searching at
+* @bits_size: The maximum size to search
+*
+* Returns the bit-number of the first zero bit, not the number of the byte
+* containing a bit. If not found - returns 'size'
+*/
+static inline int find_next_zero_bit(const unsigned long *addr, int bits_size, int offset)
+{      
+       int len = BITS_TO_LONGS(bits_size);
+       int ix = offset % BITS_PER_LONG;
+       int w_offset = offset / BITS_PER_LONG;
+
+       MT_ASSERT(w_offset < len);
+       
+       // search in the first word
+       ix = _ffz(addr + w_offset,ix);
+       if (ix)
+               return ix - 1;
+       
+       // look in the rest
+       for (; ++w_offset < len; ) {
+               ix = _ffz(addr + w_offset,0);
+               if (ix)
+                       return (w_offset * BITS_PER_LONG) + ix - 1;
+       }
+
+       return bits_size;
+       
+}
+
+/**
+* find_first_zero_bit - find the first zero bit in a memory region
+* @addr: The address to start the search at
+* @bits_size: The maximum size to search
+*
+* Returns the bit-number of the first zero bit, not the number of the byte
+* containing a bit. If not found - returns 'bits_size'.
+*/
+static inline int find_first_zero_bit(const unsigned long *addr, unsigned bits_size)
+{
+       int len = BITS_TO_LONGS(bits_size);
+       int i, ix;
+       for (i=0; i<len; i++,addr++) {
+               ix = _ffz(addr,0);
+               if (ix)
+                       return (i * BITS_PER_LONG) + ix - 1;
+       }
+       return bits_size;
+}
+
+/**
+* find_first_bit - find the first set bit in a memory region
+* @addr: The address to start the search at
+* @bits_size: The maximum size to search (in bits)
+*
+* Returns the bit-number of the first set bit, not the number of the byte
+* containing a bit. Returns 'bits_size', if not found
+*/
+static inline int find_first_bit(const unsigned long *addr, unsigned bits_size)
+{
+       int len = BITS_TO_LONGS(bits_size);
+       int i, ix;
+       for (i=0; i<len; i++,addr++) {
+               ix = _ffs(addr,0);
+               if (ix)
+                       return (i * BITS_PER_LONG) + ix - 1;
+       }
+       return bits_size;
+}
+
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_cache.c b/trunk/hw/mthca/kernel/mt_cache.c
new file mode 100644 (file)
index 0000000..cffc64e
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: cache.c 2918 2005-07-27 21:04:40Z roland $
+ */
+
+#include <mt_l2w.h>
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_cache.tmh"
+#endif
+#include <ib_cache.h>
+
+#include "ib_cache.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, ib_cache_setup)
+#pragma alloc_text (PAGE, ib_cache_cleanup)
+#endif
+
+
+#pragma warning( disable : 4200)
+struct ib_pkey_cache {
+       int             table_len;
+       u16             table[0];
+};
+
+struct ib_gid_cache {
+       int             table_len;
+       union ib_gid    table[0];
+};
+#pragma warning( default  : 4200)
+
+struct ib_update_work {
+       PIO_WORKITEM work_item;
+       struct ib_device  *device;
+       u8                 port_num;
+};
+
+int ib_get_cached_gid(struct ib_device *device,
+                     u8                port_num,
+                     int               index,
+                     union ib_gid     *gid)
+{
+       struct ib_gid_cache *cache;
+       int ret = 0;
+       SPIN_LOCK_PREP(lh);
+
+       // sanity checks
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+       if (!device->cache.gid_cache)
+               return -EFAULT;
+
+       read_lock_irqsave(&device->cache.lock, &lh);
+
+       cache = device->cache.gid_cache[port_num - start_port(device)];
+
+       if (index < 0 || index >= cache->table_len)
+               ret = -EINVAL;
+       else
+               *gid = cache->table[index];
+
+       read_unlock_irqrestore(&lh);
+
+       return ret;
+}
+
+int ib_find_cached_gid(struct ib_device *device,
+                      union ib_gid     *gid,
+                      u8               *port_num,
+                      u16              *index)
+{
+       struct ib_gid_cache *cache;
+       int i;
+       u8 p;
+       int ret = -ENOENT;
+       SPIN_LOCK_PREP(lh);
+
+       *port_num = (u8)-1;
+       if (index)
+               *index = (u16)-1;
+
+       read_lock_irqsave(&device->cache.lock, &lh);
+
+       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+               cache = device->cache.gid_cache[p];
+               for (i = 0; i < cache->table_len; ++i) {
+                       if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
+                               *port_num = p + start_port(device);
+                               if (index)
+                                       *index = (u16)i;
+                               ret = 0;
+                               goto found;
+                       }
+               }
+       }
+found:
+       read_unlock_irqrestore(&lh);
+
+       return ret;
+}
+
+int ib_get_cached_pkey(struct ib_device *device,
+                      u8                port_num,
+                      int               index,
+                      u16              *pkey)
+{
+       struct ib_pkey_cache *cache;
+       int ret = 0;
+       SPIN_LOCK_PREP(lh);
+
+       // sanity checks
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+       if (!device->cache.gid_cache)
+               return -EFAULT;
+
+       read_lock_irqsave(&device->cache.lock, &lh);
+
+       cache = device->cache.pkey_cache[port_num - start_port(device)];
+
+       if (index < 0 || index >= cache->table_len)
+               ret = -EINVAL;
+       else
+               *pkey = cache->table[index];
+
+       read_unlock_irqrestore(&lh);
+
+       return ret;
+}
+
+int ib_find_cached_pkey(struct ib_device *device,
+                       u8                port_num,
+                       u16               pkey,
+                       u16              *index)
+{
+       struct ib_pkey_cache *cache;
+       int i;
+       int ret = -ENOENT;
+       SPIN_LOCK_PREP(lh);
+
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+
+       read_lock_irqsave(&device->cache.lock, &lh);
+
+       cache = device->cache.pkey_cache[port_num - start_port(device)];
+
+       *index = (u16)-1;
+
+       for (i = 0; i < cache->table_len; ++i)
+               if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
+                       *index = (u16)i;
+                       ret = 0;
+                       break;
+               }
+
+       read_unlock_irqrestore(&lh);
+
+       return ret;
+}
+
+static void ib_cache_update(struct ib_device *device,
+                           u8                port)
+{
+       struct ib_port_attr       *tprops = NULL;
+       struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
+       struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
+       int                        i;
+       int                        ret;
+       SPIN_LOCK_PREP(lh);
+
+       tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
+       if (!tprops)
+               return;
+
+       ret = ib_query_port(device, port, tprops);
+       if (ret) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("ib_query_port failed (%d) for %s, port %d\n",
+                      ret, device->name, port));
+               goto err;
+       }
+
+       pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
+                            sizeof *pkey_cache->table, GFP_KERNEL);
+       if (!pkey_cache)
+               goto err;
+
+       pkey_cache->table_len = tprops->pkey_tbl_len;
+
+       gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
+                           sizeof *gid_cache->table, GFP_KERNEL);
+       if (!gid_cache)
+               goto err;
+
+       gid_cache->table_len = tprops->gid_tbl_len;
+
+       for (i = 0; i < pkey_cache->table_len; ++i) {
+               ret = ib_query_pkey(device, port, (u16)i, pkey_cache->table + i);
+               if (ret) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("ib_query_pkey failed (%d) for %s (index %d)\n",
+                              ret, device->name, i));
+                       goto err;
+               }
+       }
+
+       for (i = 0; i < gid_cache->table_len; ++i) {
+               ret = ib_query_gid(device, port, i, gid_cache->table + i);
+               if (ret) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("ib_query_gid failed (%d) for %s (index %d)\n",
+                              ret, device->name, i));
+                       goto err;
+               }
+       }
+
+       write_lock_irq(&device->cache.lock, &lh);
+
+       old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
+       old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
+
+       device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
+       device->cache.gid_cache [port - start_port(device)] = gid_cache;
+
+       write_unlock_irq(&lh);
+
+       kfree(old_pkey_cache);
+       kfree(old_gid_cache);
+       kfree(tprops);
+       return;
+
+err:
+       kfree(pkey_cache);
+       kfree(gid_cache);
+       kfree(tprops);
+}
+
+static void ib_cache_task(void *work_ptr)
+{
+       struct ib_update_work *work = work_ptr;
+
+       ib_cache_update(work->device, work->port_num);
+}
+
+/* leo: wrapper for Linux work_item callback */
+VOID
+  ib_work_item (
+    IN PDEVICE_OBJECT  DeviceObject,
+    IN PVOID  Context 
+    )
+{
+       struct ib_update_work *work = (struct ib_update_work *)Context;
+       UNREFERENCED_PARAMETER(DeviceObject);
+       ib_cache_task(Context);
+       IoFreeWorkItem(work->work_item);
+       kfree(Context);
+}
+
+static void ib_cache_event(struct ib_event_handler *handler,
+                          struct ib_event *event)
+{
+       struct ib_update_work *work;
+       static int temp_skip = 10;
+
+       if (temp_skip-- <= 0)
+               return;
+
+       if (event->event == IB_EVENT_PORT_ERR    ||
+           event->event == IB_EVENT_PORT_ACTIVE ||
+           event->event == IB_EVENT_LID_CHANGE  ||
+           event->event == IB_EVENT_PKEY_CHANGE ||
+           event->event == IB_EVENT_SM_CHANGE) {
+               work = kmalloc(sizeof *work, GFP_ATOMIC);
+               //TODO: what will happen on allocation failure ?
+               if (work) {
+                       work->device   = event->device;
+                       work->port_num = event->element.port_num;
+
+                       { // schedule a work item to work
+                               // get PDO
+                               PDEVICE_OBJECT pdo = handler->device->mdev->ext->cl_ext.p_self_do;
+
+                               // allocate work item
+                               work->work_item = IoAllocateWorkItem(pdo);
+                               if (work->work_item == NULL) {
+                                       //TODO: at least - print error. Need to return code, but the function is void 
+                               }
+                               else { // schedule the work
+                                       IoQueueWorkItem(
+                                                       work->work_item,
+                                                       ib_work_item,
+                                                       DelayedWorkQueue,
+                                                       work
+                                                       );
+                               }
+                       }
+                       
+               }
+       }
+}
+
+static void ib_cache_setup_one(struct ib_device *device)
+{
+       u8 p;
+
+       rwlock_init(&device->cache.lock);
+
+       device->cache.pkey_cache =
+               kmalloc(sizeof *device->cache.pkey_cache *
+                       (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+       device->cache.gid_cache =
+               kmalloc(sizeof *device->cache.pkey_cache *
+                       (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+
+       if (!device->cache.pkey_cache || !device->cache.gid_cache) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Couldn't allocate cache "
+                      "for %s\n", device->name));
+               goto err;
+       }
+
+       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+               device->cache.pkey_cache[p] = NULL;
+               device->cache.gid_cache [p] = NULL;
+               ib_cache_update(device, p + start_port(device));
+       }
+
+       INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
+                             device, ib_cache_event);
+       if (ib_register_event_handler(&device->cache.event_handler))
+               goto err_cache;
+
+       return;
+
+err_cache:
+       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+               kfree(device->cache.pkey_cache[p]);
+               kfree(device->cache.gid_cache[p]);
+       }
+
+err:
+       kfree(device->cache.pkey_cache);
+       kfree(device->cache.gid_cache);
+}
+
+static void ib_cache_cleanup_one(struct ib_device *device)
+{
+       int p;
+
+       ib_unregister_event_handler(&device->cache.event_handler);
+       //TODO: how to do that ?
+       // LINUX: flush_scheduled_work();
+
+       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+               kfree(device->cache.pkey_cache[p]);
+               kfree(device->cache.gid_cache[p]);
+       }
+
+       kfree(device->cache.pkey_cache);
+       kfree(device->cache.gid_cache);
+}
+
+static struct ib_client cache_client = { "cache", ib_cache_setup_one, ib_cache_cleanup_one };
+
+int ib_cache_setup(void)
+{
+       return ib_register_client(&cache_client);
+}
+
+void ib_cache_cleanup(void)
+{
+       ib_unregister_client(&cache_client);
+}
+
diff --git a/trunk/hw/mthca/kernel/mt_device.c b/trunk/hw/mthca/kernel/mt_device.c
new file mode 100644 (file)
index 0000000..a8d4d0c
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: device.c 2730 2005-06-28 16:43:03Z sean.hefty $
+ */
+
+#include "hca_driver.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_device.tmh"
+#endif
+#include "ib_verbs.h"
+#include "ib_cache.h"
+
+struct ib_client_data {
+       struct list_head  list;
+       struct ib_client *client;
+       void *            data;
+};
+
+static LIST_HEAD(device_list);
+static LIST_HEAD(client_list);
+
+/*
+ * device_mutex protects access to both device_list and client_list.
+ * There's no real point to using multiple locks or something fancier
+ * like an rwsem: we always access both lists, and we're always
+ * modifying one list or the other list.  In any case this is not a
+ * hot path so there's no point in trying to optimize.
+ */
+KMUTEX device_mutex;
+
+static int ib_device_check_mandatory(struct ib_device *device)
+{
+#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
+       static const struct {
+               size_t offset;
+               char  *name;
+       } mandatory_table[] = {
+               IB_MANDATORY_FUNC(query_device),
+               IB_MANDATORY_FUNC(query_port),
+               IB_MANDATORY_FUNC(query_pkey),
+               IB_MANDATORY_FUNC(query_gid),
+               IB_MANDATORY_FUNC(alloc_pd),
+               IB_MANDATORY_FUNC(dealloc_pd),
+               IB_MANDATORY_FUNC(create_ah),
+               IB_MANDATORY_FUNC(destroy_ah),
+               IB_MANDATORY_FUNC(create_qp),
+               IB_MANDATORY_FUNC(modify_qp),
+               IB_MANDATORY_FUNC(destroy_qp),
+               IB_MANDATORY_FUNC(post_send),
+               IB_MANDATORY_FUNC(post_recv),
+               IB_MANDATORY_FUNC(create_cq),
+               IB_MANDATORY_FUNC(destroy_cq),
+               IB_MANDATORY_FUNC(poll_cq),
+               IB_MANDATORY_FUNC(req_notify_cq),
+               IB_MANDATORY_FUNC(get_dma_mr),
+               IB_MANDATORY_FUNC(dereg_mr)
+       };
+       int i;
+
+       for (i = 0; i < sizeof mandatory_table / sizeof mandatory_table[0]; ++i) {
+               if (!*(void **) ((u8 *) device + mandatory_table[i].offset)) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Device %s is missing mandatory function %s\n",
+                              device->name, mandatory_table[i].name));
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static struct ib_device *__ib_device_get_by_name(const char *name)
+{
+       struct ib_device *device;
+
+       list_for_each_entry(device, &device_list, core_list,struct ib_device)
+               if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
+                       return device;
+
+       return NULL;
+}
+
+static int __extract_number(char *dest_str, const char *format, int *num)
+{
+       char *ptr;
+       UNREFERENCED_PARAMETER(format);
+       for (ptr = dest_str; *ptr; ptr++) {
+               if (*ptr >= '0' && *ptr <= '9') {
+                       *num = atoi(ptr);
+                       return 1;
+               }
+       }
+       return 0;
+}
+static int alloc_name(char *name)
+{
+       long *inuse;
+       char buf[IB_DEVICE_NAME_MAX];
+       struct ib_device *device;
+       int i;
+
+       inuse = (long *) get_zeroed_page(GFP_KERNEL);
+       if (!inuse)
+               return -ENOMEM;
+
+       list_for_each_entry(device, &device_list, core_list,struct ib_device) {
+               if (!__extract_number(device->name, name, &i))
+                       continue;
+               if (i < 0 || i >= PAGE_SIZE * 8)
+                       continue;
+               snprintf(buf, sizeof buf, name, i);
+               if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
+                       set_bit(i, inuse);
+       }
+
+       i = find_first_zero_bit((const unsigned long *)inuse, PAGE_SIZE * 8);
+       free_page(inuse);
+       snprintf(buf, sizeof buf, name, i);
+
+       if (__ib_device_get_by_name(buf))
+               return -ENFILE;
+
+       strlcpy(name, buf, IB_DEVICE_NAME_MAX);
+       return 0;
+}
+
+static int add_client_context(struct ib_device *device, struct ib_client *client)
+{
+       struct ib_client_data *context;
+       SPIN_LOCK_PREP(lh);
+
+       context = kmalloc(sizeof *context, GFP_KERNEL);
+       if (!context) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Couldn't allocate client context for %s/%s\n",
+                      device->name, client->name));
+               return -ENOMEM;
+       }
+
+       context->client = client;
+       context->data   = NULL;
+
+       spin_lock_irqsave(&device->client_data_lock, &lh);
+       list_add(&context->list, &device->client_data_list);
+       spin_unlock_irqrestore(&lh);
+
+       return 0;
+}
+
+/**
+ * ib_register_device - Register an IB device with IB core
+ * @device:Device to register
+ *
+ * Low-level drivers use ib_register_device() to register their
+ * devices with the IB core.  All registered clients will receive a
+ * callback for each device that is added. @device must be allocated
+ * with ib_alloc_device().
+ */
+int ib_register_device(struct ib_device *device)
+{
+       int ret = 0;
+
+       down(&device_mutex);
+
+       if (strchr(device->name, '%')) {
+               ret = alloc_name(device->name);
+               if (ret)
+                       goto out;
+       }
+
+       if (ib_device_check_mandatory(device)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       INIT_LIST_HEAD(&device->event_handler_list);
+       INIT_LIST_HEAD(&device->client_data_list);
+       spin_lock_init(&device->event_handler_lock);
+       spin_lock_init(&device->client_data_lock);
+
+       list_add_tail(&device->core_list, &device_list);
+
+       {
+               struct ib_client *client;
+
+               list_for_each_entry(client, &client_list, list,struct ib_client)
+                       if (client->add && !add_client_context(device, client))
+                               client->add(device);
+       }
+
+ out:
+       up(&device_mutex);
+       return ret;
+}
+
+
+/**
+ * ib_unregister_device - Unregister an IB device
+ * @device:Device to unregister
+ *
+ * Unregister an IB device.  All clients will receive a remove callback.
+ */
+void ib_unregister_device(struct ib_device *device)
+{
+       struct ib_client *client;
+       struct ib_client_data *context, *tmp;
+       SPIN_LOCK_PREP(lh);
+
+       down(&device_mutex);
+
+       list_for_each_entry_reverse(client, &client_list, list,struct ib_client)
+               if (client->remove)
+                       client->remove(device);
+
+       list_del(&device->core_list);
+
+       up(&device_mutex);
+
+       spin_lock_irqsave(&device->client_data_lock, &lh);
+       list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data)
+               kfree(context);
+       spin_unlock_irqrestore(&lh);
+
+}
+
+
+/**
+ * ib_register_client - Register an IB client
+ * @client:Client to register
+ *
+ * Upper level users of the IB drivers can use ib_register_client() to
+ * register callbacks for IB device addition and removal.  When an IB
+ * device is added, each registered client's add method will be called
+ * (in the order the clients were registered), and when a device is
+ * removed, each client's remove method will be called (in the reverse
+ * order that clients were registered).  In addition, when
+ * ib_register_client() is called, the client will receive an add
+ * callback for all devices already registered.
+ */
+int ib_register_client(struct ib_client *client)
+{
+       struct ib_device *device;
+
+       down(&device_mutex);
+
+       list_add_tail(&client->list, &client_list);
+       list_for_each_entry(device, &device_list, core_list,struct ib_device)
+               if (client->add && !add_client_context(device, client))
+                       client->add(device);
+
+       up(&device_mutex);
+
+       return 0;
+}
+
+
+/**
+ * ib_unregister_client - Unregister an IB client
+ * @client:Client to unregister
+ *
+ * Upper level users use ib_unregister_client() to remove their client
+ * registration.  When ib_unregister_client() is called, the client
+ * will receive a remove callback for each IB device still registered.
+ */
+void ib_unregister_client(struct ib_client *client)
+{
+       struct ib_client_data *context, *tmp;
+       struct ib_device *device;
+       SPIN_LOCK_PREP(lh);
+
+       down(&device_mutex);
+
+       list_for_each_entry(device, &device_list, core_list,struct ib_device) {
+               if (client->remove)
+                       client->remove(device);
+
+               spin_lock_irqsave(&device->client_data_lock, &lh);
+               list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data)
+                       if (context->client == client) {
+                               list_del(&context->list);
+                               kfree(context);
+                       }
+               spin_unlock_irqrestore(&lh);
+       }
+       list_del(&client->list);
+
+       up(&device_mutex);
+}
+
+
+/**
+ * ib_get_client_data - Get IB client context
+ * @device:Device to get context for
+ * @client:Client to get context for
+ *
+ * ib_get_client_data() returns client context set with
+ * ib_set_client_data().
+ */
+void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
+{
+       struct ib_client_data *context;
+       void *ret = NULL;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&device->client_data_lock, &lh);
+       list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data)
+               if (context->client == client) {
+                       ret = context->data;
+                       break;
+               }
+       spin_unlock_irqrestore(&lh);
+
+       return ret;
+}
+
+
+/**
+ * ib_set_client_data - Get IB client context
+ * @device:Device to set context for
+ * @client:Client to set context for
+ * @data:Context to set
+ *
+ * ib_set_client_data() sets client context that can be retrieved with
+ * ib_get_client_data().
+ */
+void ib_set_client_data(struct ib_device *device, struct ib_client *client,
+                       void *data)
+{
+       struct ib_client_data *context;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&device->client_data_lock, &lh);
+       list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data)
+               if (context->client == client) {
+                       context->data = data;
+                       goto out;
+               }
+
+       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No client context found for %s/%s\n",
+              device->name, client->name));
+
+out:
+       spin_unlock_irqrestore(&lh);
+}
+
+
+/**
+ * ib_register_event_handler - Register an IB event handler
+ * @event_handler:Handler to register
+ *
+ * ib_register_event_handler() registers an event handler that will be
+ * called back when asynchronous IB events occur (as defined in
+ * chapter 11 of the InfiniBand Architecture Specification).  This
+ * callback may occur in interrupt context.
+ */
+int ib_register_event_handler  (struct ib_event_handler *event_handler)
+{
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&event_handler->device->event_handler_lock, &lh);
+       list_add_tail(&event_handler->list,
+                     &event_handler->device->event_handler_list);
+       spin_unlock_irqrestore(&lh);
+
+       return 0;
+}
+
+
+/**
+ * ib_unregister_event_handler - Unregister an event handler
+ * @event_handler:Handler to unregister
+ *
+ * Unregister an event handler registered with
+ * ib_register_event_handler().
+ */
+int ib_unregister_event_handler(struct ib_event_handler *event_handler)
+{
+       SPIN_LOCK_PREP(lh);
+       spin_lock_irqsave(&event_handler->device->event_handler_lock, &lh);
+       list_del(&event_handler->list);
+       spin_unlock_irqrestore(&lh);
+
+       return 0;
+}
+
+
+/**
+ * ib_dispatch_event - Dispatch an asynchronous event
+ * @event:Event to dispatch
+ *
+ * Low-level drivers must call ib_dispatch_event() to dispatch the
+ * event to all registered event handlers when an asynchronous event
+ * occurs.
+ */
+void ib_dispatch_event(struct ib_event *event)
+{
+       struct ib_event_handler *handler;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&event->device->event_handler_lock, &lh);
+
+       list_for_each_entry(handler, &event->device->event_handler_list, list,struct ib_event_handler)
+               handler->handler(handler, event);
+
+       spin_unlock_irqrestore(&lh);
+}
+
+
+/**
+ * ib_query_device - Query IB device attributes
+ * @device:Device to query
+ * @device_attr:Device attributes
+ *
+ * ib_query_device() returns the attributes of a device through the
+ * @device_attr pointer.
+ */
+int ib_query_device(struct ib_device *device,
+                   struct ib_device_attr *device_attr)
+{
+       return device->query_device(device, device_attr);
+}
+
+
+/**
+ * ib_query_port - Query IB port attributes
+ * @device:Device to query
+ * @port_num:Port number to query
+ * @port_attr:Port attributes
+ *
+ * ib_query_port() returns the attributes of a port through the
+ * @port_attr pointer.
+ */
+int ib_query_port(struct ib_device *device,
+                 u8 port_num,
+                 struct ib_port_attr *port_attr)
+{
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+       return device->query_port(device, port_num, port_attr);
+}
+
+
+/**
+ * ib_query_gid - Get GID table entry
+ * @device:Device to query
+ * @port_num:Port number to query
+ * @index:GID table index to query
+ * @gid:Returned GID
+ *
+ * ib_query_gid() fetches the specified GID table entry.
+ */
+int ib_query_gid(struct ib_device *device,
+                u8 port_num, int index, union ib_gid *gid)
+{
+       return device->query_gid(device, port_num, index, gid);
+}
+
+
+/**
+ * ib_query_pkey - Get P_Key table entry
+ * @device:Device to query
+ * @port_num:Port number to query
+ * @index:P_Key table index to query
+ * @pkey:Returned P_Key
+ *
+ * ib_query_pkey() fetches the specified P_Key table entry.
+ */
+int ib_query_pkey(struct ib_device *device,
+                 u8 port_num, u16 index, u16 *pkey)
+{
+       return device->query_pkey(device, port_num, index, pkey);
+}
+
+
+/**
+ * ib_modify_device - Change IB device attributes
+ * @device:Device to modify
+ * @device_modify_mask:Mask of attributes to change
+ * @device_modify:New attribute values
+ *
+ * ib_modify_device() changes a device's attributes as specified by
+ * the @device_modify_mask and @device_modify structure.
+ */
+int ib_modify_device(struct ib_device *device,
+                    int device_modify_mask,
+                    struct ib_device_modify *device_modify)
+{
+       return device->modify_device(device, device_modify_mask,
+                                    device_modify);
+}
+
+
+/**
+ * ib_modify_port - Modifies the attributes for the specified port.
+ * @device: The device to modify.
+ * @port_num: The number of the port to modify.
+ * @port_modify_mask: Mask used to specify which attributes of the port
+ *   to change.
+ * @port_modify: New attribute values for the port.
+ *
+ * ib_modify_port() changes a port's attributes as specified by the
+ * @port_modify_mask and @port_modify structure.
+ */
+int ib_modify_port(struct ib_device *device,
+                  u8 port_num, int port_modify_mask,
+                  struct ib_port_modify *port_modify)
+{
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+
+       return device->modify_port(device, port_num, port_modify_mask,
+                                  port_modify);
+}
+
+int ib_core_init(void)
+{
+       int ret;
+
+       /* leo: added because there is no static init of semaphore in Windows */
+       KeInitializeMutex(&device_mutex,0);
+       
+       ret = ib_cache_setup();
+       if (ret) {
+               HCA_PRINT(TRACE_LEVEL_WARNING   ,HCA_DBG_LOW   ,("Couldn't set up InfiniBand P_Key/GID cache\n"));
+       }
+
+       return ret;
+}
+
+void ib_core_cleanup(void)
+{
+       ib_cache_cleanup();
+}
+
diff --git a/trunk/hw/mthca/kernel/mt_l2w.c b/trunk/hw/mthca/kernel/mt_l2w.c
new file mode 100644 (file)
index 0000000..bfd32b1
--- /dev/null
@@ -0,0 +1,151 @@
+#include <mt_l2w.h>
+
+u64 mt_ticks_per_sec;
+
+void MT_time_calibrate()
+{
+#if defined(_WIN64) && (defined(IA64) || defined(_IA64_))
+       /* Itanium */
+       mt_ticks_per_sec = 10000000;    
+       
+#elif defined(_WIN64) && (defined(AMD64) || defined(_AMD64_))
+       /* x64 */
+       
+       LARGE_INTEGER a;
+       KeQueryPerformanceCounter( &a );
+       mt_ticks_per_sec = a.QuadPart;
+       
+#elif defined(_WIN32) && (defined(i386) || defined(_x86_))
+       /* x86 */
+       
+       LARGE_INTEGER a;
+       KeQueryPerformanceCounter( &a );
+       mt_ticks_per_sec = a.QuadPart;
+       
+#else
+       #error Unsupported platform
+#endif
+
+}
+
+pci_pool_t *
+pci_pool_create (const char *name, struct mthca_dev *mdev,
+        size_t size, size_t align, size_t allocation)
+{
+       pci_pool_t *pool;
+       UNREFERENCED_PARAMETER(align);
+       UNREFERENCED_PARAMETER(allocation);
+
+       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       
+       // allocation parameter is not handled yet
+       ASSERT(allocation == 0);
+
+       // allocate object
+       pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
+       if (pool == NULL) 
+               return NULL;
+
+       //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
+       // while default alloc function  - ExAllocatePoolWithTag -doesn't.
+       // But for now it is used for elements of size <= PAGE_SIZE
+       // Anyway - a sanity check:
+       ASSERT(size <= PAGE_SIZE);
+       if (size > PAGE_SIZE)
+               return NULL;
+
+       //TODO: not too effective: one can read its own alloc/free functions
+       ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
+       
+       // fill the object
+       pool->mdev = mdev;
+       pool->size = size;
+       strncpy( pool->name, name, sizeof pool->name );
+
+       return pool;            
+}
+
+// from lib/string.c
+/**
+* strlcpy - Copy a %NUL terminated string into a sized buffer
+* @dest: Where to copy the string to
+* @src: Where to copy the string from
+* @size: size of destination buffer
+*
+* Compatible with *BSD: the result is always a valid
+* NUL-terminated string that fits in the buffer (unless,
+* of course, the buffer size is zero). It does not pad
+* out the result like strncpy() does.
+*/
+SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)
+{
+        SIZE_T ret = strlen(src);
+
+        if (size) {
+                SIZE_T len = (ret >= size) ? size-1 : ret;
+                memcpy(dest, src, len);
+                dest[len] = '\0';
+        }
+        return ret;
+}
+
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+       int k, lim = bits/BITS_PER_LONG;
+       for (k = 0; k < lim; ++k)
+               if (~bitmap[k])
+               return 0;
+
+       if (bits % BITS_PER_LONG)
+               if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+               return 0;
+
+       return 1;
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+       int k, lim = bits/BITS_PER_LONG;
+       for (k = 0; k < lim; ++k)
+               if (bitmap[k])
+                       return 0;
+
+       if (bits % BITS_PER_LONG)
+               if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+                       return 0;
+
+       return 1;
+}
+
+int request_irq(
+       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */
+       IN              KSPIN_LOCK      *isr_lock,              /* spin lock for ISR */                 
+       IN              PKSERVICE_ROUTINE isr,          /* ISR */
+       IN              void *isr_ctx,                                          /* ISR context */
+       OUT     PKINTERRUPT *int_obj                    /* interrupt object */
+       )
+{
+       NTSTATUS                status;
+
+       status = IoConnectInterrupt(
+               int_obj,                                                                                                                /* InterruptObject */
+               isr,                                                                                                                            /* ISR */ 
+               isr_ctx,                                                                                                                /* ISR context */
+               isr_lock,                                                                                                       /* spinlock */
+               int_info->u.Interrupt.Vector,                                   /* interrupt vector */
+               (KIRQL)int_info->u.Interrupt.Level,             /* IRQL */
+               (KIRQL)int_info->u.Interrupt.Level,             /* Synchronize IRQL */
+               (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? 
+               Latched : LevelSensitive),                                                      /* interrupt type: LATCHED or LEVEL */
+               (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared),         /* vector shared or not */
+               (KAFFINITY)int_info->u.Interrupt.Affinity,      /* interrupt affinity */ 
+               FALSE                                                                                                                   /* whether to save Float registers */
+               );
+
+       if (!NT_SUCCESS(status))
+               return -EFAULT;         /* failed to connect interrupt */
+       else
+               return 0;
+}
+
diff --git a/trunk/hw/mthca/kernel/mt_l2w.h b/trunk/hw/mthca/kernel/mt_l2w.h
new file mode 100644 (file)
index 0000000..ccf40d1
--- /dev/null
@@ -0,0 +1,94 @@
+#ifndef MT_L2W_H
+#define MT_L2W_H
+
+// ===========================================
+// INCLUDES
+// ===========================================
+
+// OS
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <errno.h>
+
+// ours - the order is important
+#include <mt_types.h>
+#include <mt_bitmap.h>
+#include <mt_memory.h>
+#include <mt_list.h>
+#include <mt_spinlock.h>
+#include <mt_atomic.h>
+#include <mt_sync.h>
+#include <mt_pci.h>
+#include <mt_pcipool.h>
+//#include <mt_byteorder.h>
+#include <mt_time.h>
+#include <hca_debug.h>
+
+
+// ===========================================
+// SUBSTITUTIONS
+// ===========================================
+
+#define BUG_ON(exp)            ASSERT(!(exp)) /* in Linux follows here panic() !*/ 
+#define WARN_ON(exp)           ASSERT(!(exp)) /* in Linux follows here panic() !*/ 
+#define snprintf       _snprintf
+
+// memory barriers
+#define wmb KeMemoryBarrier
+#define rmb KeMemoryBarrier
+#define mb KeMemoryBarrier
+
+// ===========================================
+// LITERALS
+// ===========================================
+
+
+
+
+// ===========================================
+// TYPES
+// ===========================================
+
+// rw_lock
+typedef spinlock_t             rwlock_t;
+
+// dummy function
+typedef void (*MT_EMPTY_FUNC)();
+
+// ===========================================
+// MACROS
+// ===========================================
+
+// nullifying macros
+#define might_sleep()                          
+
+// ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+// ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+// there is a bug in Microsoft compiler, that when _byteswap_uint64() gets an expression
+// it executes the expression but doesn't swap tte dwords
+// So, there's a workaround
+#ifdef BYTESWAP_UINT64_BUG_FIXED
+#define CPU_2_BE64_PREP                
+#define CPU_2_BE64(x)                  cl_hton64(x)
+#else
+#define CPU_2_BE64_PREP        unsigned __int64 __tmp__;       
+#define CPU_2_BE64(x)                  ( __tmp__ = x, cl_hton64(__tmp__) )
+#endif
+
+
+SIZE_T strlcpy(char *dest, const char *src, SIZE_T size);
+void MT_time_calibrate();
+
+#define ERR_PTR(error)         ((void*)(LONG_PTR)(error))
+#define PTR_ERR(ptr)                   ((long)(LONG_PTR)(void*)(ptr))
+//TODO: there are 2 assumptions here:
+// - pointer can't be too big (around -1)
+// - error can't be bigger than 1000
+#define IS_ERR(ptr)                            ((ULONG_PTR)ptr > (ULONG_PTR)-1000L)
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_list.h b/trunk/hw/mthca/kernel/mt_list.h
new file mode 100644 (file)
index 0000000..9fa96d8
--- /dev/null
@@ -0,0 +1,168 @@
+#ifndef MT_LIST_H
+#define MT_LIST_H
+
+// taken from list.h
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1  ((void *) 0x00100100)
+#define LIST_POISON2  ((void *) 0x00200200)
+
+/*
+* Simple doubly linked list implementation.
+*
+* Some of the internal functions ("__xxx") are useful when
+* manipulating whole lists rather than single entries, as
+* sometimes we already know the next/prev entries and we can
+* generate better code by using them directly rather than
+* using the generic single-entry routines.
+*/
+
+struct list_head {
+        struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+       struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) \
+        (ptr)->next = (ptr); (ptr)->prev = (ptr)
+
+
+/*
+* Insert a new entry between two known consecutive entries.
+*
+* This is only for internal list manipulation where we know
+* the prev/next entries already!
+*/
+static inline void __list_add(struct list_head *new,
+                             struct list_head *prev,
+                             struct list_head *next)
+{
+       next->prev = new;
+       new->next = next;
+       new->prev = prev;
+       prev->next = new;
+}
+
+/**
+* list_add - add a new entry
+* @new: new entry to be added
+* @head: list head to add it after
+*
+* Insert a new entry after the specified head.
+* This is good for implementing stacks.
+*/
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+       __list_add(new, head, head->next);
+}
+
+/**
+* list_add_tail - add a new entry
+* @new: new entry to be added
+* @head: list head to add it before
+*
+* Insert a new entry before the specified head.
+* This is useful for implementing queues.
+*/
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+       __list_add(new, head->prev, head);
+}
+
+ /*
+  * Delete a list entry by making the prev/next entries
+  * point to each other.
+  *
+  * This is only for internal list manipulation where we know
+  * the prev/next entries already!
+  */
+ static inline void __list_del(struct list_head * prev, struct list_head * next)
+ {
+         next->prev = prev;
+         prev->next = next;
+ }
+ /**
+  * list_del - deletes entry from list.
+  * @entry: the element to delete from the list.
+  * Note: list_empty on entry does not return true after this, the entry is
+  * in an undefined state.
+  */
+ static inline void list_del(struct list_head *entry)
+ {
+         __list_del(entry->prev, entry->next);
+         entry->next = LIST_POISON1;
+         entry->prev = LIST_POISON2;
+ }
+
+/**
+* list_empty - tests whether a list is empty
+* @head: the list to test.
+*/
+static inline int list_empty(const struct list_head *head)
+{
+                        return head->next == head;
+}
+
+ /**
+ * list_entry - get the struct for this entry
+ * @ptr:        the &struct list_head pointer.
+ * @type:       the type of the struct this is embedded in.
+ * @member:     the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+        container_of(ptr, type, member)
+
+//leo: macro changed out of unportable operator typeof
+/**
+* list_for_each_entry  -       iterate over list of given type
+* @pos:        the type * to use as a loop counter.
+* @head:       the head for your list.
+* @member:     the name of the list_struct within the struct.
+* @type:                       typeof(*pos)
+*/
+#define list_for_each_entry(pos, head, member,type)          \
+        for (pos = list_entry((head)->next, type, member);      \
+             &pos->member != (head);        \
+             pos = list_entry(pos->member.next, type, member))
+
+
+//leo: macro changed out of unportable operator typeof
+/**
+* list_for_each_entry_reverse - iterate backwards over list of given type.
+* @pos:                                the type * to use as a loop counter.
+* @head:                       the head for your list.
+* @member:             the name of the list_struct within the struct.
+* @type:                               typeof(*pos)
+*/
+#define list_for_each_entry_reverse(pos, head, member,type)                                                                    \
+                       for (pos = list_entry((head)->prev, type, member);                      \
+                                        &pos->member != (head);                                \
+                                        pos = list_entry(pos->member.prev, type, member))
+
+
+//leo: macro changed out of unportable operator typeof
+/**
+* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+* @pos:                                the type * to use as a loop counter.
+* @n:                                  another type * to use as temporary storage
+* @head:                       the head for your list.
+* @member:             the name of the list_struct within the struct.
+* @type:                               typeof(*pos)
+* @type_n:                     typeof(*n)
+*/
+#define list_for_each_entry_safe(pos, n, head, member,type,type_n)                                                                     \
+                               for (pos = list_entry((head)->next, type, member),                      \
+                                                       n = list_entry(pos->member.next, type, member); \
+                                                &pos->member != (head);                                                                                                                                                \
+                                                pos = n, n = list_entry(n->member.next, type_n, member))
+
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_memory.c b/trunk/hw/mthca/kernel/mt_memory.c
new file mode 100644 (file)
index 0000000..4c19376
--- /dev/null
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: packer.c 2730 2005-06-28 16:43:03Z sean.hefty $
+ */
+ #include "hca_driver.h"
+#include "mthca_dev.h"
+#if defined (EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_memory.tmh"
+#endif 
+
+
+/*
+*      Function: map user buffer to kernel and lock it
+*
+*      Return: 
+*/
+int get_user_pages(
+       IN              struct mthca_dev *dev,  /* device */
+       IN              u64 start,                                                      /* address in user space */
+       IN              int npages,                                             /* size in pages */
+       IN              int write_access,                               /* access rights */
+       OUT     struct scatterlist *sg                  /* s/g list */
+       )
+{
+       PMDL mdl_p;
+       int size = npages << PAGE_SHIFT;
+       int access = (write_access) ? IoWriteAccess : IoReadAccess;
+       int err;
+       void * kva;     /* kernel virtual address */
+
+       UNREFERENCED_PARAMETER(dev);
+       
+       HCA_ENTER(HCA_DBG_MEMORY);
+       ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+       
+       /* allocate MDL */
+       mdl_p = IoAllocateMdl( (PVOID)(ULONG_PTR)start, (ULONG)size, 
+               FALSE,
+               FALSE,          /* not charge quota */
+               NULL);
+       if (mdl_p == NULL) {
+               err = -ENOMEM;  
+               goto err0;
+       }
+
+       /* lock memory */
+       __try   {       
+               MmProbeAndLockPages( mdl_p, UserMode,   access ); 
+       } 
+       __except (EXCEPTION_EXECUTE_HANDLER)
+       {
+               NTSTATUS Status = GetExceptionCode();
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("Exception 0x%x on MmProbeAndLockPages(), addr 0x%I64x, size %d\n", Status, start, size));
+               switch(Status){
+                       case STATUS_WORKING_SET_QUOTA:
+                               err = -ENOMEM;break;
+                       case STATUS_ACCESS_VIOLATION:
+                               err = -EACCES;break;
+                       default :
+                               err = -EINVAL;
+                       }
+
+               goto err1;
+       }
+
+       /* map it to kernel */
+       kva = MmMapLockedPagesSpecifyCache( mdl_p, 
+               KernelMode, MmNonCached, 
+               NULL, FALSE, NormalPagePriority );
+       if (kva == NULL) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("MmMapLockedPagesSpecifyCache failed\n"));
+               err = -EFAULT;
+               goto err2;
+       }
+
+       sg->page = kva;
+       sg->length = size;
+       sg->offset = (unsigned int)(start & ~PAGE_MASK);
+       sg->p_mdl = mdl_p;      
+       sg->dma_address = MmGetPhysicalAddress(kva).QuadPart;
+       return 0;       
+       
+err2:  
+       MmUnlockPages(mdl_p);
+err1:          
+    IoFreeMdl(mdl_p);
+err0:
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return err;
+               
+ }
+
+void put_page(struct scatterlist *sg)
+{
+       if (sg->p_mdl) {
+               MmUnmapLockedPages( sg->page, sg->p_mdl );
+               MmUnlockPages(sg->p_mdl);
+               IoFreeMdl(sg->p_mdl);
+       }
+}
+
+VOID
+  AdapterListControl(
+    IN PDEVICE_OBJECT  DeviceObject,
+    IN PIRP  Irp,
+    IN PSCATTER_GATHER_LIST  ScatterGather,
+    IN PVOID  Context
+    )
+{
+       struct scatterlist *p_sg = (struct scatterlist *)Context;
+
+       UNREFERENCED_PARAMETER(DeviceObject);
+       UNREFERENCED_PARAMETER(Irp);
+
+       // sanity checks
+       if (!ScatterGather || !Context) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("AdapterListControl failed: invalid parameters\n"));
+               return;
+       }
+       if (ScatterGather->NumberOfElements > 1) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("AdapterListControl failed: unexpected sg size; %d elements \n",
+                       ScatterGather->NumberOfElements ));
+       }
+       if (ScatterGather->Elements[0].Length != p_sg->length) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("AdapterListControl failed: unexpected buffer size %#x (expected %#x) \n",
+                       ScatterGather->Elements[0].Length, p_sg->length ));
+       }
+
+       // results      
+       p_sg->dma_address = ScatterGather->Elements[0].Address.QuadPart;        // get logical address
+       p_sg->p_os_sg = ScatterGather;          // store sg list address for releasing
+       //NB: we do not flush the buffers by FlushAdapterBuffers(), because we don't really transfer data
+}
+
+/* Returns: the number of mapped sg elements */
+int pci_map_sg(struct mthca_dev *dev, 
+       struct scatterlist *sg,         int nents, int direction)
+{
+#ifndef USE_GET_SG_LIST
+
+       UNREFERENCED_PARAMETER(dev);
+       UNREFERENCED_PARAMETER(sg);
+       UNREFERENCED_PARAMETER(direction);
+
+       // mapping was performed in alloc_dma_mem
+       return nents;
+
+#else
+
+       int i;
+       NTSTATUS status;
+       hca_dev_ext_t *p_ext = dev->ext;
+       struct scatterlist *p_sg = sg;
+       KIRQL irql = KeRaiseIrqlToDpcLevel();
+
+       for (i=0; i<nents; ++i, ++p_sg) {
+               status =        p_ext->p_dma_adapter->DmaOperations->GetScatterGatherList( 
+                       p_ext->p_dma_adapter, p_ext->cl_ext.p_self_do, p_sg->p_mdl, p_sg->page, 
+                       p_sg->length, AdapterListControl, sg, (BOOLEAN)direction );
+               if (!NT_SUCCESS(status)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("GetScatterGatherList failed %#x\n", status)));
+                       break;
+               }
+       }
+       KeLowerIrql(irql);
+       return i; /* i.e., we mapped all the entries */
+
+#endif 
+}
+
+/* Returns: the number of unmapped sg elements */
+int pci_unmap_sg(struct mthca_dev *dev, 
+       struct scatterlist *sg,         int nents, int direction)
+{
+#ifndef USE_GET_SG_LIST
+       
+               UNREFERENCED_PARAMETER(dev);
+               UNREFERENCED_PARAMETER(sg);
+               UNREFERENCED_PARAMETER(direction);
+               // mapping was performed in alloc_dma_mem
+               return nents;
+       
+#else
+
+       int i;
+       hca_dev_ext_t *p_ext = dev->ext;
+       struct scatterlist *p_sg = sg;
+       KIRQL irql = KeRaiseIrqlToDpcLevel();
+       void *p_os_sg = p_sg->p_os_sg;
+
+       for (i=0; i<nents; ++i, ++p_sg) {
+               if (p_os_sg)
+                       p_sg->p_os_sg = NULL;
+                       p_ext->p_dma_adapter->DmaOperations->PutScatterGatherList( 
+                               p_ext->p_dma_adapter, p_os_sg, (BOOLEAN)direction );
+       }
+       KeLowerIrql(irql);
+       return i; /* i.e., we mapped all the entries */
+
+#endif 
+}
+
+void *alloc_dma_mem(
+       IN              struct mthca_dev *dev, 
+       IN              unsigned long size,
+       OUT     struct scatterlist *p_sg)
+{
+       void *va;
+       DMA_ADAPTER *p_dma = dev->ext->p_dma_adapter;
+
+
+#ifndef USE_GET_SG_LIST
+
+       PHYSICAL_ADDRESS  pa;
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+
+       RtlZeroMemory(p_sg,sizeof *p_sg);
+       p_sg->length    = size;
+       va  = p_dma->DmaOperations->AllocateCommonBuffer(
+               p_dma, size, &pa, FALSE );
+       p_sg->dma_address = pa.QuadPart;
+
+#else
+
+       int err;
+       PHYSICAL_ADDRESS la = {0}, ba = {0}, ha = {(u64)(-1I64)};
+
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+       RtlZeroMemory(p_sg,sizeof *p_sg);
+       p_sg->length    = size;
+
+       // allocate memory
+       va = MmAllocateContiguousMemorySpecifyCache(
+               size, la, ha, ba, MmNonCached );
+       if (!va) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MmAllocateContiguousMemorySpecifyCache failed on %#x size\n", size )));
+               goto err_alloc;
+       }
+
+       // allocate MDL 
+       p_sg->p_mdl = IoAllocateMdl( va, size, FALSE, FALSE, NULL );
+       if (!p_sg->p_mdl) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MmAllocateContiguousMemorySpecifyCache failed on %#x size\n", size )));
+               goto err_mdl;
+       }
+       MmBuildMdlForNonPagedPool( p_sg->p_mdl );
+       goto end;
+
+err_mdl:
+       MmFreeContiguousMemory(va);
+       va = NULL;
+err_alloc:
+end:
+
+#endif
+
+       p_sg->page = va;
+       return va;
+}
+
+void free_dma_mem(
+       IN              struct mthca_dev *dev, 
+       IN              struct scatterlist *p_sg)
+{
+#ifndef USE_GET_SG_LIST
+
+       PHYSICAL_ADDRESS  pa;
+       DMA_ADAPTER *p_dma = dev->ext->p_dma_adapter;
+
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+
+       pa.QuadPart = p_sg->dma_address;
+       p_dma->DmaOperations->FreeCommonBuffer( 
+               p_dma, p_sg->length, pa, 
+               p_sg->page, FALSE );
+
+#else
+
+       PMDL p_mdl = p_sg->p_mdl;
+       PVOID page = p_sg->page;
+
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+       if (p_mdl) {
+               p_sg->p_mdl = NULL;
+               IoFreeMdl( p_mdl );
+       }
+       if (page) {
+               p_sg->page = NULL;
+               MmFreeContiguousMemory(page);   
+       }
+
+#endif
+}
+
+
+typedef struct _mt_iobuf_seg {
+  LIST_ENTRY   link;
+  PMDL   mdl_p;
+  u64 va;  /* virtual address of the buffer */
+  u64 size;     /* size in bytes of the buffer */
+  u32 nr_pages;
+  int  is_user;
+} mt_iobuf_seg_t;
+
+static int register_segment(
+       IN              u64 va,
+       IN              u64 size,
+       IN              int is_user,
+       IN              ib_access_t acc,
+    IN OUT mt_iobuf_t * iobuf_p)
+{
+       PMDL mdl_p;
+       int rc;
+       KPROCESSOR_MODE mode;  
+       mt_iobuf_seg_t * new_iobuf;
+       static ULONG cnt=0;
+       LOCK_OPERATION Operation;
+  
+       // set Operation
+       if (acc & IB_AC_LOCAL_WRITE)
+               Operation = IoModifyAccess;
+       else
+               Operation = IoReadAccess;
+       
+       // allocate IOBUF segment object
+       new_iobuf = (mt_iobuf_seg_t *)kmalloc(sizeof(mt_iobuf_seg_t), GFP_KERNEL );
+       if (new_iobuf == NULL) {
+               rc = -ENOMEM;
+               goto err_nomem;
+       }
+   
+       // allocate MDL 
+       mdl_p = IoAllocateMdl( (PVOID)(ULONG_PTR)va, (ULONG)size, FALSE,FALSE,NULL);
+       if (mdl_p == NULL) {
+               rc = ENOMEM;
+               goto err_alloc_mdl;
+       }
+
+       // make context-dependent things
+       if (is_user) {
+               ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+       mode = UserMode;
+       }
+       else {  /* Mapping to kernel virtual address */
+               //    MmBuildMdlForNonPagedPool(mdl_p);   // fill MDL ??? - should we do that really ?
+       mode = KernelMode;
+       }
+
+       __try { /* try */
+       MmProbeAndLockPages( mdl_p, mode, Operation );   /* lock memory */
+       } /* try */
+               
+       __except (EXCEPTION_EXECUTE_HANDLER)    {
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY, 
+                       ("MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %I64d, sz %I64d\n", 
+                       GetExceptionCode(), va, size));
+               rc = -EACCES;
+               goto err_probe;
+       }
+       
+       // fill IOBUF object
+       new_iobuf->va = va;
+       new_iobuf->size= size;
+       new_iobuf->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size );
+       new_iobuf->mdl_p = mdl_p;
+       new_iobuf->is_user = is_user;
+       InsertTailList( &iobuf_p->seg_que, &new_iobuf->link );
+       return 0;
+
+err_probe:
+  IoFreeMdl(mdl_p);
+err_alloc_mdl:  
+  ExFreePool((PVOID)new_iobuf);
+err_nomem:  
+  return rc;
+}
+
+int iobuf_register(
+       IN              u64 va,
+       IN              u64 size,
+       IN              int is_user,
+       IN              ib_access_t acc,
+       IN OUT  mt_iobuf_t *iobuf_p)
+{
+  int rc=0;
+  u64 seg_va = va;     // current segment start
+  u64 seg_size = size; // current segment size
+  u64 rdc = size;                      // remain data counter - what is rest to lock
+  u64 delta;                           // he size of the last not full page of the first segment
+  unsigned page_size = PAGE_SIZE;
+  
+// 32 - for any case  
+#define PFNS_IN_PAGE_SIZE_MDL          ((PAGE_SIZE - sizeof(struct _MDL) - 32) / sizeof(long))
+#define MIN_IOBUF_SEGMENT_SIZE (PAGE_SIZE * PFNS_IN_PAGE_SIZE_MDL)     // 4MB  
+
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+       // init IOBUF object
+       InitializeListHead( &iobuf_p->seg_que );
+       iobuf_p->seg_num = 0;
+       if (rdc <= 0)
+               return -EINVAL;
+               
+       // allocate segments
+       while (rdc > 0) {
+               // map a segment
+               rc = register_segment(seg_va, seg_size, is_user, acc, iobuf_p );
+
+               // success - move to another segment
+               if (!rc) {
+                       rdc -= seg_size;
+                       seg_va += seg_size;
+                       iobuf_p->seg_num++;
+                       if (seg_size > rdc)
+                               seg_size = rdc;
+                       continue;
+               }
+
+               // failure - too large a buffer: lessen it and try once more
+               if (rc == -ENOMEM) {
+                       // no where to lessen - too low memory
+                       if (seg_size <= MIN_IOBUF_SEGMENT_SIZE)
+                               break;
+                       // lessen the size
+                       seg_size >>= 1;
+                       // round the segment size to the page boundary (only for the first segment)
+                       if (iobuf_p->seg_num == 0) {
+                               delta = (seg_va + seg_size) & (page_size - 1);
+                               seg_size -= delta;
+                               seg_size += page_size;
+                               if (seg_size > rdc)
+                                       seg_size = rdc;
+                       }
+                       continue;
+               }
+
+               // got unrecoverable error
+               break;
+       }
+
+       // SUCCESS
+       if (rc) 
+               iobuf_deregister( iobuf_p );
+       else     {
+               // fill IOBUF object
+               iobuf_p->va = va;
+               iobuf_p->size= size;
+               iobuf_p->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size );
+               iobuf_p->is_user = is_user;
+       }
+
+       return rc;
+}
+
+
+static void deregister_segment(mt_iobuf_seg_t * iobuf_seg_p)
+{
+  MmUnlockPages( iobuf_seg_p->mdl_p );    // unlock the buffer 
+  IoFreeMdl( iobuf_seg_p->mdl_p );        // free MDL
+  ExFreePool(iobuf_seg_p);
+}
+
+void iobuf_deregister(mt_iobuf_t *iobuf_p)
+{
+  mt_iobuf_seg_t *iobuf_seg_p;         // pointer to current segment object
+  
+  ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+  // release segments
+  while (!IsListEmpty( &iobuf_p->seg_que )) {
+       iobuf_seg_p = (mt_iobuf_seg_t *)(PVOID)RemoveTailList( &iobuf_p->seg_que );
+       deregister_segment(iobuf_seg_p);
+       iobuf_p->seg_num--;
+  }
+  ASSERT(iobuf_p->seg_num == 0);
+}
+
+
+
+
diff --git a/trunk/hw/mthca/kernel/mt_memory.h b/trunk/hw/mthca/kernel/mt_memory.h
new file mode 100644 (file)
index 0000000..89685a9
--- /dev/null
@@ -0,0 +1,277 @@
+#ifndef MT_MEMORY_H
+#define MT_MEMORY_H
+
+#include "iba/ib_types.h"
+
+// ===========================================
+// CONSTANTS
+// ===========================================
+
+#define MT_TAG_ATOMIC          'MOTA'
+#define MT_TAG_KERNEL          'LNRK'
+#define MT_TAG_HIGH                    'HGIH'
+#define MT_TAG_PCIPOOL         'PICP'
+#define MT_TAG_IOMAP                   'PAMI'
+
+// ===========================================
+// SUBSTITUTIONS
+// ===========================================
+
+#define memcpy_toio            memcpy
+
+// ===========================================
+// MACROS
+// ===========================================
+
+#define PAGE_MASK                              (~(PAGE_SIZE-1))
+#define NEXT_PAGE_ALIGN(addr)  (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+
+// ===========================================
+// SYSTEM MEMORY
+// ===========================================
+
+// memory
+#define __GFP_NOWARN           0       /* Suppress page allocation failure warning */
+#define __GFP_HIGHMEM  0
+
+#define GFP_ATOMIC                     1               /* can't wait (i.e. DPC or higher) */
+#define GFP_KERNEL                     2               /* can wait (npaged) */
+#define GFP_HIGHUSER           4               /* GFP_KERNEL, that can be in HIGH memory */
+
+
+#define SLAB_ATOMIC            GFP_ATOMIC
+#define SLAB_KERNEL            GFP_KERNEL
+
+#if 1
+static inline void * kmalloc( SIZE_T bsize, unsigned int gfp_mask)
+{
+       void *ptr;
+       MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+       switch (gfp_mask) {
+               case GFP_ATOMIC:
+                       ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_ATOMIC );
+                       break;
+               case GFP_KERNEL:
+                       ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL );
+                       break;
+               case GFP_HIGHUSER:
+                       ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_HIGH );
+                       break;
+               default:
+                       DbgPrint("kmalloc: unsupported flag %d\n", gfp_mask);
+                       ptr = NULL;
+                       break;
+       }
+       return ptr;
+}
+#else
+#define kmalloc(bsize,flags)   ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL ) 
+#endif
+
+static inline void * kzalloc( SIZE_T bsize, unsigned int gfp_mask)
+{
+       void* va = kmalloc(bsize, gfp_mask);
+       if (va)
+               RtlZeroMemory(va, bsize);
+       return va;
+}
+
+static inline void kfree (const void *pobj)
+{
+       MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+       if (pobj)
+               ExFreePool((void *)pobj);
+}
+
+#define get_zeroed_page(mask)                          kzalloc(PAGE_SIZE, mask)
+#define free_page(ptr)                                                                 kfree(ptr)
+
+
+// ===========================================
+// IO SPACE <==> SYSTEM MEMORY
+// ===========================================
+
+
+/**
+* ioremap     -   map bus memory into CPU space
+* @offset:    bus address of the memory
+* @size:      size of the resource to map
+*
+* ioremap performs a platform specific sequence of operations to
+* make bus memory CPU accessible via the readb/readw/readl/writeb/
+* writew/writel functions and the other mmio helpers. The returned
+* address is not guaranteed to be usable directly as a virtual
+* address. 
+*/
+static inline  void *ioremap(io_addr_t addr, SIZE_T size, SIZE_T* psize)
+{
+       PHYSICAL_ADDRESS pa;
+       void *va;
+       
+       MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+       pa.QuadPart = addr;
+       va = MmMapIoSpace( pa, size, MmNonCached ); 
+       *psize = size;
+       return va;
+}
+
+static inline void iounmap(void *va, SIZE_T size)
+{
+       MmUnmapIoSpace( va, size);
+}
+
+ // ===========================================
+ // DMA SUPPORT
+ // ===========================================
+
+#define PCI_DMA_BIDIRECTIONAL   0
+#define PCI_DMA_TODEVICE                        1
+#define PCI_DMA_FROMDEVICE              2
+#define DMA_TO_DEVICE                                  PCI_DMA_TODEVICE
+
+ struct scatterlist {
+               dma_addr_t              dma_address;    /* logical (device) address */
+               void *                          page;                           /* kernel virtual address */
+               PMDL                            p_mdl;                                  /* MDL, if any (used for user space buffers) */
+               PSCATTER_GATHER_LIST p_os_sg;   /* adapter scatter-gather list */
+               unsigned int            offset;                         /* offset in the first page */
+               unsigned int            length;                         /* buffer length */
+       };
+
+ #define sg_dma_address(sg)     ((sg)->dma_address)
+ #define sg_dma_len(sg)                        ((sg)->length)
+
+ struct mthca_dev;
+
+ int pci_map_sg(struct mthca_dev *dev, 
+               struct scatterlist *sg,         int nents, int direction);
+ int pci_unmap_sg(struct mthca_dev *dev, 
+               struct scatterlist *sg,         int nents, int direction);
+
+ void free_dma_mem(
+               IN              struct mthca_dev *dev, 
+               IN              struct scatterlist *p_sg);
+ void *alloc_dma_mem(
+        IN      struct mthca_dev *dev, 
+        IN      unsigned long size,
+        OUT struct scatterlist *p_sg);
+
+static inline void *alloc_dma_zmem(
+        IN      struct mthca_dev *dev, 
+        IN      unsigned long size,
+        OUT struct scatterlist *p_sg)
+{
+       void *va = alloc_dma_mem( dev, size, p_sg );
+       if (va)
+               RtlZeroMemory(va, size);
+       return va;
+}
+
+static inline void *alloc_dma_zmem_map(
+       IN              struct mthca_dev *dev, 
+       IN              unsigned long size,
+       IN              int direction,
+       OUT struct scatterlist *p_sg)
+{
+       void *va = alloc_dma_zmem( dev, size, p_sg );
+       if (va) {
+               RtlZeroMemory(va, size);
+               if (!pci_map_sg( dev, p_sg, 1, direction )) {
+                       free_dma_mem( dev, p_sg );
+                       va = NULL;
+               }
+       }
+       return va;
+}
+        
+static inline void free_dma_mem_map(
+        IN      struct mthca_dev *dev, 
+        IN      struct scatterlist *p_sg,
+        IN    int direction )
+{
+       pci_unmap_sg( dev, p_sg, 1,  direction );
+       free_dma_mem( dev, p_sg );
+}
+
+ static inline dma_addr_t pci_mape_page(struct mthca_dev *dev, 
+        void *va,      unsigned long offset,  SIZE_T size, int direction)
+ {
+        UNREFERENCED_PARAMETER(dev);
+        UNREFERENCED_PARAMETER(va);
+        UNREFERENCED_PARAMETER(offset);
+        UNREFERENCED_PARAMETER(size);
+        UNREFERENCED_PARAMETER(direction);
+        /* suppose, that pages where always translated to DMA space */
+        return 0; /* i.e., we unmapped all the entries */ 
+ }
+
+ // ===========================================
+ // HELPERS
+ // ===========================================
+ static inline int get_order(unsigned long size)
+{
+        int order;
+
+        size = (size-1) >> (PAGE_SHIFT-1);
+        order = -1;
+        do {
+                size >>= 1;
+                order++;
+        } while (size);
+        return order;
+}
+
+static inline int long_log2(unsigned long x)
+{
+        int r = 0;
+        for (x >>= 1; x > 0; x >>= 1)
+                r++;
+        return r;
+}
+
+static inline unsigned long roundup_pow_of_two(unsigned long x)
+{
+        return (1UL << fls(x - 1));
+}
+
+// ===========================================
+// PROTOTYPES
+// ===========================================
+
+void put_page(struct scatterlist *sg);
+int get_user_pages(
+       IN              struct mthca_dev *dev,  /* device */
+       IN              u64 start,                                                      /* address in user space */
+       IN              int npages,                                             /* size in pages */
+       IN              int write_access,                               /* access rights */
+       OUT     struct scatterlist *sg                  /* s/g list */
+       );
+
+typedef struct _mt_iobuf {
+  u64 va;  /* virtual address of the buffer */
+  u64 size;     /* size in bytes of the buffer */
+  LIST_ENTRY           seg_que;
+  u32 nr_pages;
+  int  is_user;
+  int                          seg_num;
+} mt_iobuf_t;
+
+
+void iobuf_deregister(mt_iobuf_t *iobuf_p);
+int iobuf_register(
+       IN              u64 va,
+       IN              u64 size,
+       IN              int is_user,
+       IN              ib_access_t acc,
+       IN OUT  mt_iobuf_t *iobuf_p);
+
+
+unsigned long copy_from_user(void *to, const void *from, unsigned long n);
+unsigned long copy_to_user(void *to, const void *from, unsigned long n);
+
+
+#endif 
diff --git a/trunk/hw/mthca/kernel/mt_packer.c b/trunk/hw/mthca/kernel/mt_packer.c
new file mode 100644 (file)
index 0000000..afba3a6
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: packer.c 2730 2005-06-28 16:43:03Z sean.hefty $
+ */
+
+#include <ib_pack.h>
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_packer.tmh"
+#endif
+
+static u64 value_read(int offset, int size, u8 *structure)
+{
+       switch (size) {
+       case 1: return                *(u8  *) (structure + offset);
+       case 2: return cl_ntoh16(*(__be16 *) (structure + offset));
+       case 4: return cl_ntoh32(*(__be32 *) (structure + offset));
+       case 8: return cl_ntoh64(*(__be64 *) (structure + offset));
+       default:
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Field size %d bits not handled\n", size * 8));
+               return 0;
+       }
+}
+
+/**
+ * ib_pack - Pack a structure into a buffer
+ * @desc:Array of structure field descriptions
+ * @desc_len:Number of entries in @desc
+ * @structure:Structure to pack from
+ * @buf:Buffer to pack into
+ *
+ * ib_pack() packs a list of structure fields into a buffer,
+ * controlled by the array of fields in @desc.
+ */
+void ib_pack(const struct ib_field        *desc,
+            int                           desc_len,
+            u8                         *structure,
+            u8                         *buf)
+{
+       int i;
+       CPU_2_BE64_PREP;
+
+       for (i = 0; i < desc_len; ++i) {
+               if (desc[i].size_bits <= 32) {
+                       int shift;
+                       u32 val;
+                       __be32 mask;
+                       __be32 *addr;
+
+                       shift = 32 - desc[i].offset_bits - desc[i].size_bits;
+                       if (desc[i].struct_size_bytes)
+                               val = (u32)value_read(desc[i].struct_offset_bytes,
+                                                desc[i].struct_size_bytes,
+                                                structure) << shift;
+                       else
+                               val = 0;
+
+                       mask = cl_hton32(((1ull << desc[i].size_bits) - 1) << shift);
+                       addr = (__be32 *) buf + desc[i].offset_words;
+                       *addr = (*addr & ~mask) | (cl_hton32(val) & mask);
+               } else if (desc[i].size_bits <= 64) {
+                       int shift;
+                       u64 val;
+                       __be64 mask;
+                       __be64 *addr;
+
+                       shift = 64 - desc[i].offset_bits - desc[i].size_bits;
+                       if (desc[i].struct_size_bytes)
+                               val = value_read(desc[i].struct_offset_bytes,
+                                                desc[i].struct_size_bytes,
+                                                structure) << shift;
+                       else
+                               val = 0;
+
+                       mask = CPU_2_BE64((~0ull >> (64 - desc[i].size_bits)) << shift);
+                       addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
+                       *addr = (*addr & ~mask) | (cl_hton64(val) & mask);
+               } else {
+                       if (desc[i].offset_bits % 8 ||
+                           desc[i].size_bits   % 8) {
+                               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Structure field %s of size %d "
+                                      "bits is not byte-aligned\n",
+                                      desc[i].field_name, desc[i].size_bits));
+                       }
+
+                       if (desc[i].struct_size_bytes)
+                               memcpy(buf + desc[i].offset_words * 4 +
+                                      desc[i].offset_bits / 8,
+                                      structure + desc[i].struct_offset_bytes,
+                                      desc[i].size_bits / 8);
+                       else
+                               RtlZeroMemory(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8,
+                              desc[i].size_bits / 8);
+               }
+       }
+}
+
+static void value_write(int offset, int size, u64 val, u8 *structure)
+{
+       switch (size * 8) {
+       case 8:  *(    u8 *) (structure + offset) = (u8)val; break;
+       case 16: *(__be16 *) (structure + offset) = cl_hton16((u16)val); break;
+       case 32: *(__be32 *) (structure + offset) = cl_hton32((u32)val); break;
+       case 64: *(__be64 *) (structure + offset) = cl_hton64(val); break;
+       default:
+               HCA_PRINT(TRACE_LEVEL_WARNING   ,HCA_DBG_LOW   ,("Field size %d bits not handled\n", size * 8));
+       }
+}
+
+/**
+ * ib_unpack - Unpack a buffer into a structure
+ * @desc:Array of structure field descriptions
+ * @desc_len:Number of entries in @desc
+ * @buf:Buffer to unpack from
+ * @structure:Structure to unpack into
+ *
+ * ib_pack() unpacks a list of structure fields from a buffer,
+ * controlled by the array of fields in @desc.
+ */
+void ib_unpack(const struct ib_field        *desc,
+              int                           desc_len,
+              u8                         *buf,
+              u8                         *structure)
+{
+       int i;
+
+       for (i = 0; i < desc_len; ++i) {
+               if (!desc[i].struct_size_bytes)
+                       continue;
+
+               if (desc[i].size_bits <= 32) {
+                       int shift;
+                       u32  val;
+                       u32  mask;
+                       __be32 *addr;
+
+                       shift = 32 - desc[i].offset_bits - desc[i].size_bits;
+                       mask = ((1ull << desc[i].size_bits) - 1) << shift;
+                       addr = (__be32 *) buf + desc[i].offset_words;
+                       val = (cl_ntoh32(*addr) & mask) >> shift;
+                       value_write(desc[i].struct_offset_bytes,
+                                   desc[i].struct_size_bytes,
+                                   val,
+                                   structure);
+               } else if (desc[i].size_bits <= 64) {
+                       int shift;
+                       u64  val;
+                       u64  mask;
+                       __be64 *addr;
+
+                       shift = 64 - desc[i].offset_bits - desc[i].size_bits;
+                       mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
+                       addr = (__be64 *) buf + desc[i].offset_words;
+                       val = (cl_ntoh64(*addr) & mask) >> shift;
+                       value_write(desc[i].struct_offset_bytes,
+                                   desc[i].struct_size_bytes,
+                                   val,
+                                   structure);
+               } else {
+                       if (desc[i].offset_bits % 8 ||
+                           desc[i].size_bits   % 8) {
+                               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Structure field %s of size %d "
+                                      "bits is not byte-aligned\n",
+                                      desc[i].field_name, desc[i].size_bits));
+                       }
+
+                       memcpy(structure + desc[i].struct_offset_bytes,
+                              buf + desc[i].offset_words * 4 +
+                              desc[i].offset_bits / 8,
+                              desc[i].size_bits / 8);
+               }
+       }
+}
diff --git a/trunk/hw/mthca/kernel/mt_pci.h b/trunk/hw/mthca/kernel/mt_pci.h
new file mode 100644 (file)
index 0000000..83947ef
--- /dev/null
@@ -0,0 +1,115 @@
+#ifndef MT_PCI_H
+#define MT_PCI_H
+
+// ===========================================
+// LITERALS
+// ===========================================
+
+#ifndef PCI_VENDOR_ID_MELLANOX
+#define PCI_VENDOR_ID_MELLANOX                                                                 0x15b3
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR
+#define PCI_DEVICE_ID_MELLANOX_TAVOR                                           0x5a44
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT            0x6278
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL
+#define PCI_DEVICE_ID_MELLANOX_ARBEL                                           0x6282
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD                               0x5e8c
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI
+#define PCI_DEVICE_ID_MELLANOX_SINAI                                           0x6274
+#endif
+
+#ifndef PCI_VENDOR_ID_TOPSPIN
+#define PCI_VENDOR_ID_TOPSPIN                                                                          0x1867
+#endif
+
+
+// ===========================================
+// TYPES
+// ===========================================
+
+
+// ===========================================
+// MACROS/FUNCTIONS
+// ===========================================
+
+// get bar boundaries
+#if 1
+#define pci_resource_start(dev,bar_num)        ((dev)->ext->bar[bar_num].phys)
+#define pci_resource_len(dev,bar_num)  ((dev)->ext->bar[bar_num].size)
+#else
+static inline  uint64_t pci_resource_start(struct mthca_dev *dev, int bar_num) 
+{
+       return dev->ext->bar[bar_num].phys;
+}
+#endif
+
+
+// i/o to registers
+
+static inline u64 readq(const volatile void __iomem *addr)
+{
+       //TODO: write atomic implementation of _IO_READ_QWORD and change mthca_doorbell.h
+       u64 val;
+       READ_REGISTER_BUFFER_ULONG((PULONG)(addr), (PULONG)&val, 2 );
+       return val;
+}
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+       return READ_REGISTER_ULONG((PULONG)(addr));
+}
+
+static inline u16 reads(const volatile void __iomem *addr)
+{
+       return READ_REGISTER_USHORT((PUSHORT)(addr));
+}
+
+static inline u8 readb(const volatile void __iomem *addr)
+{
+       return READ_REGISTER_UCHAR((PUCHAR)(addr));
+}
+
+#define __raw_readq            readq
+#define __raw_readl            readl
+#define __raw_reads            reads
+#define __raw_readb            readb
+
+static inline void writeq(unsigned __int64 val, volatile void __iomem *addr)
+{
+       //TODO: write atomic implementation of _IO_WRITE_QWORD and change mthca_doorbell.h
+       WRITE_REGISTER_BUFFER_ULONG( (PULONG)(addr), (PULONG)&val, 2 );
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+       WRITE_REGISTER_ULONG((PULONG)(addr),val);
+}
+
+static inline void writes(unsigned short val, volatile void __iomem *addr)
+{
+       WRITE_REGISTER_USHORT((PUSHORT)(addr),val);
+}
+
+static inline void writeb(unsigned char val, volatile void __iomem *addr)
+{
+       WRITE_REGISTER_UCHAR((PUCHAR)(addr),val);
+}
+
+#define __raw_writeq           writeq
+#define __raw_writel           writel
+#define __raw_writes           writes
+#define __raw_writeb           writeb
+
+#endif
+
diff --git a/trunk/hw/mthca/kernel/mt_pcipool.h b/trunk/hw/mthca/kernel/mt_pcipool.h
new file mode 100644 (file)
index 0000000..996cb11
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef MT_PCIPOOL_H
+#define MT_PCIPOOL_H
+
+typedef struct pci_pool {
+       size_t                                                          size;
+       struct mthca_dev                *mdev;
+       char                                                                    name [32];
+       NPAGED_LOOKASIDE_LIST  pool_hdr;
+} pci_pool_t;
+       
+// taken from dmapool.c
+
+/**
+* pci_pool_create - Creates a pool of consistent memory blocks, for dma.
+* @name: name of pool, for diagnostics
+* @mdev: device that will be doing the DMA
+* @size: size of the blocks in this pool.
+* @align: alignment requirement for blocks; must be a power of two
+* @allocation: returned blocks won't cross this boundary (or zero)
+* Context: !in_interrupt()
+*
+* Returns a dma allocation pool with the requested characteristics, or
+* null if one can't be created.  Given one of these pools, dma_pool_alloc()
+* may be used to allocate memory.  Such memory will all have "consistent"
+* DMA mappings, accessible by the device and its driver without using
+* cache flushing primitives.  The actual size of blocks allocated may be
+* larger than requested because of alignment.
+*
+* If allocation is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary.  This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+pci_pool_t *
+pci_pool_create (const char *name, struct mthca_dev *mdev,
+        size_t size, size_t align, size_t allocation);
+
+/**
+ * dma_pool_alloc - get a block of consistent memory
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * This returns the kernel virtual address of a currently unused block,
+ * and reports its dma address through the handle.
+ * If such a memory block can't be allocated, null is returned.
+ */
+static inline void * 
+pci_pool_alloc (pci_pool_t *pool, int mem_flags, dma_addr_t *handle)
+{
+       PHYSICAL_ADDRESS pa;
+       void * ptr;
+       UNREFERENCED_PARAMETER(mem_flags);
+
+       MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+
+       ptr = ExAllocateFromNPagedLookasideList( &pool->pool_hdr );
+       if (ptr != NULL) {
+               pa = MmGetPhysicalAddress( ptr );
+               *handle = pa.QuadPart;
+       }
+       return ptr; 
+}
+       
+       
+/**
+* dma_pool_free - put block back into dma pool
+* @pool: the dma pool holding the block
+* @vaddr: virtual address of block
+* @dma: dma address of block
+*
+* Caller promises neither device nor driver will again touch this block
+* unless it is first re-allocated.
+*/
+static inline  void
+pci_pool_free (pci_pool_t *pool, void *vaddr, dma_addr_t dma)
+{
+       UNREFERENCED_PARAMETER(dma);
+       MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
+       ExFreeToNPagedLookasideList( &pool->pool_hdr, vaddr );
+}
+       
+       
+
+/**
+ * pci_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+static inline  void
+pci_pool_destroy (pci_pool_t *pool)
+{
+       ExDeleteNPagedLookasideList( &pool->pool_hdr );
+       ExFreePool( pool);
+}
+
+
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_reset_tavor.c b/trunk/hw/mthca/kernel/mt_reset_tavor.c
new file mode 100644 (file)
index 0000000..a8367df
--- /dev/null
@@ -0,0 +1,642 @@
+#include <initguid.h>
+#include <wdmguid.h>
+#include "hca_driver.h"
+#include "mthca.h"
+#include "hca_debug.h"
+#include "Mt_l2w.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_reset_tavor.tmh"
+#endif
+
+
+#pragma warning(disable : 4996)
+
+/* limitations */
+#define N_BUSES                                16              /* max number of PCI buses */
+#define N_DEVICES                      32              /* max number of devices on one bus */
+#define N_FUNCTIONS            8                       /* max number of functions on one device */
+#define N_CARDS                                8                       /* max number of HCA cards */
+
+/*----------------------------------------------------------------*/
+
+PWCHAR 
+WcharFindChar(
+       IN      PWCHAR          pi_BufStart,
+       IN      PWCHAR          pi_BufEnd,
+       IN      WCHAR           pi_FromPattern,
+       IN      WCHAR           pi_ToPattern
+       )
+/*++
+
+Routine Description:
+    Converts wide-character string into ASCII
+
+Arguments:
+
+       pi_BufStart.......... start of the source string
+       pi_BufEnd............ end of the source string
+       pi_FromPattern....... start of pattern range to find
+       pi_ToPattern......... end of pattern range to find
+
+Return Value:
+
+       pointer to the first pattern found or NULL (when reached the end)
+
+--*/
+{ /* WcharFindChar */
+
+       PWCHAR  l_pResult       = pi_BufStart;
+
+       while (l_pResult < pi_BufEnd )
+       {
+               if (*l_pResult >= pi_FromPattern && *l_pResult <= pi_ToPattern)
+                       return l_pResult;
+               l_pResult++;
+       }
+
+       return NULL;
+
+} /* WcharFindChar */
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Function: PciFindDeviceByBusAndId
+ *
+ * Parameters:
+ *             IN                      pi_Bus                          - a bus, to start the scan
+ *             IN                      pi_DevId                        - Device Id to search
+ *             INOUT   po_pDevFunc     - pointer to dev/func, from which to start the search
+ * 
+ * Returns:
+ *             FALSE   - device not found
+ *     TRUE    - a device was found;  *po_pDevFunc contains its location
+ *
+ * Description:
+ *     The function is intended for iterative search on one bus.
+ *             It looks for the device of pi_DevId id, starting from device with  
+ *             *po_pDevFunc location. When it finds the next device of that id, it updates  
+ *             *po_pDevFunc with the found device' location
+ *
+ */
+BOOLEAN PciFindDeviceByBusAndId( 
+       IN      ULONG           pi_Bus, 
+       IN      ULONG           pi_DevId, 
+       IN OUT PULONG   po_pDevFunc )
+{
+       ULONG   l_DevId;
+       ULONG   l_Bytes;
+       ULONG   l_Device;
+       ULONG l_Function; 
+
+       // calculate, where to start the search
+       l_Device = *po_pDevFunc & 0x01f;
+       l_Function = (*po_pDevFunc >> 5) & 7;
+       for (; l_Device < N_DEVICES; l_Device++, l_Function = 0 ) {
+               for (; l_Function < N_FUNCTIONS; l_Function++ ) {
+               l_Bytes = HalGetBusDataByOffset(
+                       PCIConfiguration,
+                       pi_Bus,
+                               l_Device |(l_Function<<5),
+                       (PVOID)&l_DevId,
+                       0,
+                       sizeof(ULONG)
+                       );
+               if (l_Bytes != sizeof(ULONG)) 
+                               continue;       /* as if - "not found" */
+                       if (l_DevId == pi_DevId)
+                               goto ExitFound;
+               }
+       }
+       return FALSE;
+       
+ExitFound:             
+       *po_pDevFunc = l_Device |(l_Function<<5);
+       return TRUE;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Function: PciFindDeviceById
+ *
+ * Parameters:
+ *             IN                      pi_DevId                        - Device Id to search
+ *             INOUT   po_pBus                 - pointer to bus number, from which to start the search
+ *             INOUT   po_pDevFunc     - pointer to dev/func, from which to start the search
+ * 
+ * Returns:
+ *             FALSE   - device was not found
+ *     TRUE    - a device was found;  *po_pBus/*po_pDevFunc contain its location
+ *
+ * Description:
+ *     The function is intended for an iterative search.
+ *             It looks for the device of pi_DevId id, starting from device with *po_pBus and 
+ *             *po_pDevFunc location. When it finds the next device of that id, updates *po_pBus 
+ *             and *po_pDevFunc with the found device' location
+ *
+ */
+static 
+BOOLEAN PciFindDeviceById( 
+       IN      ULONG           pi_DevId, 
+       IN OUT PULONG           po_pBus, 
+       IN OUT PULONG           po_pDevFunc )
+{
+       ULONG l_Bus;
+       ULONG l_DevFunc = *po_pDevFunc;
+       
+       for (l_Bus= *po_pBus; l_Bus < N_BUSES; l_Bus++, l_DevFunc=0) {
+               if (PciFindDeviceByBusAndId(l_Bus, pi_DevId, &l_DevFunc))
+                       break;
+       }
+       if (l_Bus >= N_BUSES)
+               return FALSE;
+       
+       // found
+       *po_pBus = l_Bus;
+       *po_pDevFunc = l_DevFunc;
+       return TRUE;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Function: PciFindBridgeByBus
+ *
+ * Parameters:
+ *             IN              pi_SecBus               - bus number of an HCA in question
+ *             OUT     po_pBus                 - pointer to bus number of the bridge of the HCA, if found
+ *             OUT     po_pDevFunc     - pointer to dev/func of the bridge of the HCA, if found
+ * 
+ * Returns:
+ *             FALSE   - the bridge not found
+ *     TRUE    - a device was found;  *po_pBus/*po_pDevFunc contain its location
+ *
+ * Description:
+ *             The function scans all the buses to find the Bridge of an HCA device, found on bus pi_SecBus. 
+ *             The appropiate bridge must have its PrimaryBus field in PCI cfg header equal to pi_SecBus.
+ *                     
+ */
+static BOOLEAN PciFindBridgeByBus( 
+       IN ULONG                pi_SecBus, 
+       OUT PULONG              po_pBus, 
+       OUT PULONG              po_pDevFunc )
+{
+       ULONG   l_DevFunc=0, l_Bus=0;
+       ULONG l_DevId = ((int)(23110) << 16) | PCI_VENDOR_ID_MELLANOX;  
+       ULONG l_SecBus, l_tmp, l_Bytes;
+       ULONG   l_Device;
+       ULONG l_Function; 
+       int searching =1;
+
+       while (searching) {
+               /* look for a bridge */
+               if (!PciFindDeviceById(l_DevId, &l_Bus, &l_DevFunc)) 
+                       return FALSE;   /* bridge not found */
+               
+               /* found a bridge  -check, whether it is ours */
+       l_Bytes = HalGetBusDataByOffset(
+               PCIConfiguration,
+               l_Bus,
+               l_DevFunc,
+               (PVOID)&l_tmp,
+               24,     /* 24 - PrimaryBus, 25 - SecondaryBus, 26 - SubordinateBus */
+               sizeof(ULONG)
+               );
+       if (l_Bytes != sizeof(ULONG)) 
+                       goto NextDevice;        /* as if - "not found" */
+                        
+               l_SecBus = (l_tmp >> 16) & 255;
+               if ( l_SecBus == pi_SecBus )
+                       break; /* found !!! */
+               
+NextDevice:            
+               // calculate, where to continue the search
+               l_Device = l_DevFunc & 0x01f;
+               l_Function = (l_DevFunc >> 5) & 7;
+               l_Function++;
+               if (l_Function >= N_FUNCTIONS) {
+                       l_Function = 0;
+                       l_Device++;
+                       if (l_Device >= N_DEVICES) {
+                               l_Device = 0;
+                               l_Bus++;
+                       }
+                       if (l_Bus >= N_BUSES)
+                               return FALSE;
+               }
+               l_DevFunc = l_Device |(l_Function<<5);;
+       }
+       
+       *po_pBus = l_Bus;
+       *po_pDevFunc = l_DevFunc;
+       return TRUE;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Function: MdGetDevLocation
+ *
+ * Parameters:
+ *             IN              pi_pPdo                 - PDO of a device in question
+ *             OUT     po_pBus                 - pointer to the bus number of the device in question
+ *             OUT     po_pDevFunc     - pointer to dev/func of the device, if found
+ * 
+ * Returns:
+ *             not STATUS_SUCCESS      - the device location was not found
+ *     STATUS_SUCCESS          - the device location was found and returned in OUT parameters
+ *
+ * Description:
+ *             The function uses IoGetDeviceProperty to get the location of a device with given PDO
+ *                     
+ */
+static NTSTATUS 
+MdGetDevLocation(
+       IN      PDEVICE_OBJECT  pi_pPdo,
+       OUT     ULONG *                 po_pBus,
+       OUT ULONG        *                      po_pDevFunc 
+       )
+{
+       ULONG   l_BusNumber, l_DevNumber, l_Function, l_ResultLength = 0;
+       WCHAR   l_Buffer[40], *l_pEnd, *l_pBuf = l_Buffer, *l_pBufEnd = l_Buffer + sizeof(l_Buffer);
+       NTSTATUS        l_Status;
+       UNICODE_STRING  l_UnicodeNumber;
+
+       /* prepare */
+       l_ResultLength = 0;
+       RtlZeroMemory( l_Buffer, sizeof(l_Buffer) );
+
+       /* Get the device number  */
+       l_Status = IoGetDeviceProperty(pi_pPdo,
+               DevicePropertyLocationInformation, sizeof(l_Buffer), &l_Buffer, &l_ResultLength);
+
+       /* Verify if the function was successful */
+       if ( !NT_SUCCESS(l_Status) || !l_ResultLength ) {
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("(MdGetDevLocation) Unable to get device number: Status 0x%x, ResultSize %d \n", 
+                       l_Status, l_ResultLength  ));
+               goto exit;      
+       }
+
+       // ALL THE BELOW CRAP WE DO INSTEAD OF 
+       // sscanf(l_Buffer, "PCI bus %d, device %d, function %d", &l_BusNumber, &l_DevNumber, &l_Function );
+
+       /* take bus number */
+       l_pBuf  = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' );
+       if (l_pBuf == NULL) goto err;
+       l_pEnd  = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' );
+       if (l_pEnd == NULL) goto err;
+       l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf);
+       l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd;
+       RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_BusNumber);
+
+       /* take slot number */
+       l_pBuf  = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' );
+       if (l_pBuf == NULL) goto err;
+       l_pEnd  = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' );
+       if (l_pEnd == NULL) goto err;
+       l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf);
+       l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd;
+       RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_DevNumber);
+
+       /* take function number */
+       *(l_Buffer + (l_ResultLength>>1)) = 0;  /* set end of string */
+       l_pBuf  = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' );
+       if (l_pBuf == NULL) goto err;
+       l_pEnd  = WcharFindChar( l_pBuf, l_pBufEnd, 0, 0 );
+       if (l_pEnd == NULL) goto err;
+       l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf);
+       l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd;
+       RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_Function);
+
+       /* return the results */
+       *po_pBus                = l_BusNumber;
+       *po_pDevFunc = (l_DevNumber & 0x01f) | ((l_Function & 7) << 5);
+
+       goto exit;
+
+err:
+       l_Status = STATUS_UNSUCCESSFUL;
+exit:
+       return l_Status;
+}
+
+
+/*------------------------------------------------------------------------------------------------------*/
+
+/*
+ * Function: PciFindPdoByPdoAndLocation
+ *
+ * Parameters:
+ *             IN              pi_pPdo                 - PDO of HCA's bus device
+ *             IN      pi_Bus, pi_DevFunc      - bridge location
+ *             OUT     po_pPdo                 - pointer to PDO of the bridge, when found
+ * 
+ * Returns:
+ *             FALSE   - the bridge was not found
+ *     TRUE    - a device was found;  *po_pPdo contains its PDO
+ *
+ * Description:
+ *             The function finds PDO of a Tavor bridge device by scanning through all the 
+ *             devices of the PCI.SYS driver   
+ *
+ *     Note:
+ *             It is a "hack" algorithm. It uses some fields of system structures and some
+ *             optimistic assumptions - see more below
+ */
+static BOOLEAN PciFindPdoByPdoAndLocation( 
+       IN PDEVICE_OBJECT pi_pPdo,
+       IN ULONG                pi_Bus, 
+       IN ULONG                pi_DevFunc,
+       OUT PDEVICE_OBJECT * po_pPdo )
+{
+       PDRIVER_OBJECT l_pDrv;
+       PDEVICE_OBJECT l_pPdo;
+       NTSTATUS l_Status;
+       ULONG   l_Bus, l_DevFunc;
+       // suppose that there is no more than N_PCI_DEVICES, belonging to PCI.SYS
+       #define N_PCI_DEVICES   256
+       // suppose that the PDO objects, once created, never get moved
+       PDEVICE_OBJECT pdo[N_PCI_DEVICES];
+       int i, n_pdos = 0;
+       
+       // suppose, that PDOs are added only at PASSIVE_LEVEL
+       KIRQL irql = KeRaiseIrqlToDpcLevel();
+               
+       // get to the PCI.SYS driver
+       l_pDrv = pi_pPdo->DriverObject;
+
+       // find and store all bus PDO   s (because the bridge is a bus enumerated device)
+       for ( l_pPdo = l_pDrv->DeviceObject; l_pPdo; l_pPdo = l_pPdo->NextDevice ) {
+               if ( l_pPdo->Flags & DO_BUS_ENUMERATED_DEVICE ) {
+                       pdo[n_pdos] = l_pPdo;
+                       if (++n_pdos >= N_PCI_DEVICES)
+                               break;
+               }
+       }
+
+       // return to previous level
+       KeLowerIrql(irql);
+       
+       //  loop over all the PCI driver devices
+       l_pPdo = NULL;  /* mark, that we didn't find PDO */
+       for ( i = 0; i < n_pdos; ++i ) {
+               // get the location of the device of that PDO
+               l_Status = MdGetDevLocation( pdo[i], &l_Bus, &l_DevFunc );
+               if (l_Status != STATUS_SUCCESS)
+                       continue;
+               // check, whether it's our device
+               if (l_Bus == pi_Bus && l_DevFunc == pi_DevFunc) {
+                       l_pPdo = pdo[i];
+                       break;
+               }
+       }
+
+       // check whether we found the PDO
+       if (!l_pPdo)
+               return FALSE;
+       *po_pPdo = l_pPdo;
+       return TRUE;    
+}
+
+/*----------------------------------------------------------------*/
+
+/* Function: SendAwaitIrpCompletion
+ *             
+ *  Parameters:
+ *
+ *  Description:
+ *             IRP completion routine 
+ *
+ *  Returns:
+ *             pointer to the entry on SUCCESS
+ *             NULL - otherwise
+ *
+*/ 
+static
+NTSTATUS
+SendAwaitIrpCompletion (
+    IN PDEVICE_OBJECT   DeviceObject,
+    IN PIRP             Irp,
+    IN PVOID            Context
+    )
+{
+    UNREFERENCED_PARAMETER (DeviceObject);    
+    UNREFERENCED_PARAMETER (Irp);    
+    KeSetEvent ((PKEVENT) Context, IO_NO_INCREMENT, FALSE);
+    return STATUS_MORE_PROCESSING_REQUIRED; // Keep this IRP
+}
+
+/*------------------------------------------------------------------------------------------------------*/
+
+/*
+ *  Function: SendAwaitIrp
+ *
+ *  Description:
+ *             Create and send IRP stack down the stack and wait for the response (Blocking Mode)
+ *
+ *  Parameters:
+ *             pi_pDeviceExt.......... ointer to USB device extension
+ *             pi_MajorCode........... IRP major code
+ *             pi_MinorCode........... IRP minor code
+ *             pi_pBuffer............. parameter buffer
+ *             pi_nSize............... size of the buffer
+ *    po_pInfo.............. returned field Information from IoStatus block
+ *
+ *  Returns:
+ *             pointer to the entry on SUCCESS
+ *             NULL - otherwise
+ *
+*/
+static 
+NTSTATUS 
+SendAwaitIrp(
+       IN  PDEVICE_OBJECT              pi_pFdo,
+       IN  PDEVICE_OBJECT              pi_pLdo,
+       IN  ULONG                               pi_MajorCode,
+       IN  ULONG                               pi_MinorCode,
+       IN      PVOID                           pi_pBuffer,
+       IN      int                                     pi_nSize,
+       OUT     PVOID           *               po_pInfo
+   )
+/*++
+
+ Routine Description: 
+
+       Create and send IRP stack down the stack and wait for the response (
+Blocking Mode)
+
+ Arguments: 
+       pi_pFdo................ our device
+       pi_pLdo................ lower device
+       pi_MajorCode........... IRP major code
+       pi_MinorCode........... IRP minor code
+       pi_pBuffer............. parameter buffer
+       pi_nSize............... size of the buffer
+
+ Returns: 
+       standard NTSTATUS return codes.
+
+ Notes:
+
+--*/
+{ /* SendAwaitIrp */
+       // Event
+       KEVENT                          l_hEvent;
+       // Pointer to IRP
+       PIRP                            l_pIrp;
+       // Stack location
+       PIO_STACK_LOCATION      l_pStackLocation;
+       // Returned status
+       NTSTATUS                        l_Status;
+       // when to invoke
+       BOOLEAN InvokeAlways = TRUE;
+
+       // call validation
+       if(KeGetCurrentIrql() != PASSIVE_LEVEL)
+               return STATUS_SUCCESS;
+
+       // create event
+       KeInitializeEvent(&l_hEvent, NotificationEvent, FALSE);
+
+       // build IRP request to USBD driver
+       l_pIrp = IoAllocateIrp( pi_pFdo->StackSize, FALSE );
+
+       // validate request
+       if (!l_pIrp)
+       {
+           //MdKdPrint( DBGLVL_MAXIMUM, ("(SendAwaitIrp) Unable to allocate IRP !\n"));
+               return STATUS_INSUFFICIENT_RESOURCES;
+       }
+
+       // fill IRP
+       l_pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;
+
+       // set completion routine
+    IoSetCompletionRoutine(l_pIrp,SendAwaitIrpCompletion, &l_hEvent, InvokeAlways, InvokeAlways, InvokeAlways);
+
+       // fill stack location
+    l_pStackLocation = IoGetNextIrpStackLocation(l_pIrp);
+    l_pStackLocation->MajorFunction= (UCHAR)pi_MajorCode;
+    l_pStackLocation->MinorFunction= (UCHAR)pi_MinorCode;
+       RtlCopyMemory( &l_pStackLocation->Parameters, pi_pBuffer, pi_nSize );
+
+       // Call lower driver perform request
+       l_Status = IoCallDriver( pi_pLdo, l_pIrp ); 
+
+       // if the request not performed --> wait
+       if (l_Status == STATUS_PENDING)
+       {
+               // Wait until the IRP  will be complete
+               KeWaitForSingleObject(
+                       &l_hEvent,                                                              // event to wait for
+                       Executive,                                                              // thread type (to wait into its context)
+                       KernelMode,                                                     // mode of work
+                       FALSE,                                                                  // alertable
+                       NULL                                                                    // timeout
+               );
+               l_Status = l_pIrp->IoStatus.Status;
+       }
+
+       if (po_pInfo)
+               *po_pInfo = (PVOID)l_pIrp->IoStatus.Information;
+
+    IoFreeIrp(l_pIrp);
+       return l_Status;
+
+} /* SendAwaitIrp */
+
+/*------------------------------------------------------------------------------------------------------*/
+
+/*
+ * Function: FindBridgeIf
+ *
+ * Parameters:
+ *             IN              pi_ext                          - device extension
+ *             OUT     pi_pInterface   - bus interface to work with the bridge
+ * 
+ * Returns:
+ *             FALSE   - the bridge was not found
+ *     TRUE    - a device was found;  *po_pPdo contains its PDO
+ *
+ * Description:
+ *             The function finds PDO of the bridge by HCA's bus number
+ *                     
+ */
+int
+FindBridgeIf(
+       IN hca_dev_ext_t                *pi_ext,
+       IN      PBUS_INTERFACE_STANDARD pi_pInterface
+       )
+{
+       NTSTATUS rc;
+       IO_STACK_LOCATION l_Iosl;
+       PDEVICE_RELATIONS l_pDr;
+       PDEVICE_OBJECT                  l_pPdo;
+       ULONG l_DevFunc, l_Bus;
+       // parameter buffer for the request
+       IO_STACK_LOCATION l_Stack;
+
+       // find bridge location
+       if (!PciFindBridgeByBus( pi_ext->bus_number,  &l_Bus, &l_DevFunc ))
+               return FALSE;
+       
+       // find PDO of our bus driver (bypassing possible low filter drivers)
+       RtlZeroMemory( &l_Iosl, sizeof(l_Iosl) );
+       l_Iosl.Parameters.QueryDeviceRelations.Type = TargetDeviceRelation;
+       rc = SendAwaitIrp(
+               pi_ext->cl_ext.p_self_do,
+               pi_ext->cl_ext.p_next_do,
+               IRP_MJ_PNP,
+               IRP_MN_QUERY_DEVICE_RELATIONS,
+               &l_Iosl.Parameters,
+               sizeof(l_Iosl.Parameters.QueryDeviceRelations),
+               &l_pDr
+                );
+       
+       if (!NT_SUCCESS (rc)) {
+               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("IRP_MN_QUERY_DEVICE_RELATIONS failed (%#x);: Fdo %p, Ldo %p \n",
+                       rc, pi_ext->cl_ext.p_self_do, pi_ext->cl_ext.p_next_do ));
+               return FALSE;
+       }
+               
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("IRP_MN_QUERY_DEVICE_RELATIONS for Fdo %p, Ldo %p: num_of_PDOs %d, PDO %p \n",
+               pi_ext->cl_ext.p_self_do, pi_ext->cl_ext.p_next_do, l_pDr->Count, l_pDr->Objects[0] ));
+       
+       /* get the PDO of Bridge */
+       if (!PciFindPdoByPdoAndLocation( l_pDr->Objects[0], 
+               l_Bus, l_DevFunc, &l_pPdo )) {
+               HCA_PRINT( TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("Not found bridge's (bus %d, dev/func %x. pdo %p) PDO - can't restore the PCI header \n",
+                               l_Bus, l_DevFunc, l_pDr->Objects[0] ));
+               return FALSE;
+       } 
+       HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("Found bridge's PDO %p (bus %d, dev/func %x. pdo %p) \n",
+               l_pPdo, l_Bus, l_DevFunc, l_pDr->Objects[0] ));
+               
+       // clean interface data
+       RtlZeroMemory( (PCHAR)pi_pInterface, sizeof(BUS_INTERFACE_STANDARD) );
+       
+       // fill request parameters
+       l_Stack.Parameters.QueryInterface.InterfaceType                 = (LPGUID) &GUID_BUS_INTERFACE_STANDARD;
+       l_Stack.Parameters.QueryInterface.Size                                  = sizeof(BUS_INTERFACE_STANDARD);
+       l_Stack.Parameters.QueryInterface.Version                               = 1;
+       l_Stack.Parameters.QueryInterface.Interface                     = (PINTERFACE)pi_pInterface;
+       l_Stack.Parameters.QueryInterface.InterfaceSpecificData = NULL;
+       
+       rc =SendAwaitIrp( pi_ext->cl_ext.p_self_do, l_pPdo, IRP_MJ_PNP, 
+               IRP_MN_QUERY_INTERFACE, &l_Stack.Parameters, sizeof(l_Stack.Parameters), NULL);
+       if (!NT_SUCCESS (rc)) 
+               return FALSE;
+
+       return TRUE;
+}
+
+
+/*----------------------------------------------------------------*/
+
+
diff --git a/trunk/hw/mthca/kernel/mt_spinlock.h b/trunk/hw/mthca/kernel/mt_spinlock.h
new file mode 100644 (file)
index 0000000..9227365
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef MT_SPINLOCK_H
+#define MT_SPINLOCK_H
+
+typedef struct spinlock {
+       KSPIN_LOCK              lock;
+#ifdef SUPPORT_SPINLOCK_IRQ    
+       PKINTERRUPT     p_int_obj;
+       KIRQL                           irql;
+#endif
+} spinlock_t;
+
+#ifdef SUPPORT_SPINLOCK_IRQ    
+
+static inline void
+spin_lock_setint( 
+       IN              spinlock_t* const       l, 
+       IN PKINTERRUPT  p_int_obj )
+{
+       MT_ASSERT( l );
+       l->p_int_obj = p_int_obj;
+}
+
+static inline void spin_lock_irq_init(
+       IN              spinlock_t* const l,
+       IN      PKINTERRUPT int_obj
+       )
+{ 
+       KeInitializeSpinLock( &l->lock ); 
+       l->p_int_obj = int_obj; 
+}
+
+static inline unsigned long
+spin_lock_irq( 
+       IN              spinlock_t* const       l)
+{
+       MT_ASSERT( l );
+       MT_ASSERT( l->p_int_obj );
+       return (unsigned long)(l->irql = KeAcquireInterruptSpinLock ( l->p_int_obj ));
+}
+
+static inline void
+spin_unlock_irq( 
+       IN              spinlock_t* const p_spinlock ) 
+{
+       MT_ASSERT( p_spinlock );
+       MT_ASSERT( p_spinlock->p_int_obj );
+       KeReleaseInterruptSpinLock ( p_spinlock->p_int_obj, p_spinlock->irql );
+}
+
+#endif
+
+#define SPIN_LOCK_PREP(lh)             KLOCK_QUEUE_HANDLE lh
+
+static inline void spin_lock_init(
+       IN              spinlock_t* const p_spinlock )
+{ 
+       KeInitializeSpinLock( &p_spinlock->lock ); 
+}
+
+static inline void
+spin_lock( 
+       IN              spinlock_t* const       l,
+       IN              PKLOCK_QUEUE_HANDLE lockh)
+{
+       MT_ASSERT( l || lockh );
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeAcquireInStackQueuedSpinLock ( &l->lock, lockh );
+}
+
+static inline void
+spin_unlock(
+       IN              PKLOCK_QUEUE_HANDLE lockh)
+{
+       MT_ASSERT( lockh );
+       ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+       KeReleaseInStackQueuedSpinLock( lockh );
+}
+
+static inline void
+spin_lock_sync( 
+       IN              spinlock_t* const       l )
+{
+       KLOCK_QUEUE_HANDLE lockh;
+       MT_ASSERT( l );
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh );
+       KeReleaseInStackQueuedSpinLock( &lockh );
+}
+
+/* to be used only at DPC level */
+static inline void
+spin_lock_dpc( 
+       IN              spinlock_t* const       l,
+       IN              PKLOCK_QUEUE_HANDLE lockh)
+{
+       MT_ASSERT( l || lockh );
+       ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+       KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, lockh );
+}
+
+/* to be used only at DPC level */
+static inline void
+spin_unlock_dpc(
+       IN              PKLOCK_QUEUE_HANDLE lockh)
+{
+       ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+       KeReleaseInStackQueuedSpinLockFromDpcLevel( lockh );
+}
+
+
+/* we are working from DPC level, so we can use usual spinlocks */
+#define spin_lock_irq                                                  spin_lock
+#define spin_unlock_irq                                        spin_unlock
+
+/* no diff in Windows */
+#define spin_lock_irqsave                              spin_lock_irq
+#define spin_unlock_irqrestore                 spin_unlock_irq
+
+/* Windows doesn't support such kind of spinlocks so far, but may be tomorrow ... */
+#define rwlock_init                                                            spin_lock_init
+#define read_lock_irqsave                              spin_lock_irqsave
+#define read_unlock_irqrestore         spin_unlock_irqrestore
+#define write_lock_irq                                         spin_lock_irq
+#define write_unlock_irq                                       spin_unlock_irq
+
+#endif
+
diff --git a/trunk/hw/mthca/kernel/mt_sync.h b/trunk/hw/mthca/kernel/mt_sync.h
new file mode 100644 (file)
index 0000000..90d3f38
--- /dev/null
@@ -0,0 +1,109 @@
+#ifndef MT_SYNC_H
+#define MT_SYNC_H
+
+// literals
+#ifndef LONG_MAX
+#define LONG_MAX      2147483647L   /* maximum (signed) long value */
+#endif
+
+
+// mutex wrapper
+
+// suitable both for mutexes and semaphores
+static inline void down(PRKMUTEX  p_mutex)
+{
+       NTSTATUS                status;
+       int need_to_wait = 1;
+       
+   ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+   while (need_to_wait) {
+         status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, FALSE,  NULL );
+         if (status == STATUS_SUCCESS)
+               break;
+  }
+}
+
+// suitable both for mutexes and semaphores
+static inline int down_interruptible(PRKMUTEX  p_mutex)
+{
+       NTSTATUS                status;
+       
+       ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+       status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, TRUE,  NULL );
+       if (status == STATUS_SUCCESS)
+               return 0;
+       return -EINTR;
+}
+
+#define sem_down(ptr)                                                          down((PRKMUTEX)(ptr))
+#define sem_down_interruptible(ptr)            down_interruptible((PRKMUTEX)(ptr))
+
+static inline void up(PRKMUTEX  p_mutex)
+{
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeReleaseMutex( p_mutex, FALSE );
+}
+
+static inline void sem_up(PRKSEMAPHORE  p_sem)
+{
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeReleaseSemaphore( p_sem, 0, 1, FALSE );
+}
+
+static inline void sem_init(
+       IN PRKSEMAPHORE  p_sem,
+       IN LONG  cnt,
+       IN LONG  limit)
+{
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+       KeInitializeSemaphore( p_sem, cnt, limit );
+}
+
+
+typedef struct wait_queue_head {
+       KEVENT          event;  
+} wait_queue_head_t;
+
+static inline void wait_event(wait_queue_head_t *obj_p, int condition)
+{
+       NTSTATUS                status;
+       int need_to_wait = 1;
+       MT_ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+       if (condition) 
+               return;
+   while (need_to_wait) {
+         status = KeWaitForSingleObject( &obj_p->event, Executive, KernelMode, FALSE,  NULL );
+         if (status == STATUS_SUCCESS)
+               break;
+  }
+}
+
+static inline void wake_up(wait_queue_head_t *obj_p)
+{
+       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeSetEvent( &obj_p->event, 0, FALSE );
+}
+
+static inline void init_waitqueue_head(wait_queue_head_t *obj_p)
+{
+       //TODO: ASSERT is temporary outcommented, because using of fast mutexes in CompLib
+       // cause working on APC_LEVEL
+       //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);   
+       KeInitializeEvent(  &obj_p->event, NotificationEvent , FALSE );
+}
+
+static inline void free_irq(PKINTERRUPT int_obj)
+{
+       IoDisconnectInterrupt( int_obj );
+}
+
+int request_irq(
+       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */
+       IN              KSPIN_LOCK      *isr_lock,              /* spin lcok for ISR */                 
+       IN              PKSERVICE_ROUTINE isr,          /* ISR */
+       IN              void *isr_ctx,                                          /* ISR context */
+       OUT     PKINTERRUPT *int_obj                    /* interrupt object */
+       );
+
+               
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_time.h b/trunk/hw/mthca/kernel/mt_time.h
new file mode 100644 (file)
index 0000000..3738ecf
--- /dev/null
@@ -0,0 +1,54 @@
+#ifndef MT_TIME_H
+#define MT_TIME_H
+
+
+/* get time stamp */
+static inline volatile u64 MT_time_get_stamp(void)
+{
+       volatile u64 tsc;
+       
+#if defined(_WIN64) && (defined(IA64) || defined(_IA64_))
+       /* Itanium */
+
+       /* returns a value in units of 100 nsecs */
+       tsc = KeQueryInterruptTime();           
+
+#elif defined(_WIN64) && (defined(AMD64) || defined(_AMD64_))
+       /* x64 */
+
+       /* returns a value in units of Time-Stamp Counter (usually, clocks) */
+       tsc = __rdtsc();                
+
+#elif defined(_WIN32) && (defined(i386) || defined(_x86_))
+       /* x86 */
+
+       /* returns a value in units of Time-Stamp Counter (usually, clocks) */
+       __asm {                                          
+               lea ebx,tsc
+               rdtsc                            
+               mov  [ebx],eax           
+               mov  [ebx+4],edx                 
+       }
+#else
+       #error Unsupported platform
+#endif
+
+       return tsc;
+}
+
+extern u64 mt_ticks_per_sec;
+
+
+/* CONVERTIONS */
+#define MT_USECS_TO_TICKS(to)          ((mt_ticks_per_sec * (to)) / 1000000 )
+#define MT_MSECS_TO_TICKS(to)  MT_USECS_TO_TICKS(1000 * to)
+
+/* comparison */
+#define time_after(a,b)                                                ((INT64)(b) - (INT64)(a) < 0)
+#define time_before(a,b)                               time_after(b,a)
+#define time_after_eq(a,b)                             ((INT64)(a) - (INT64)(b) >= 0)
+#define time_before_eq(a,b)                    time_after_eq(b,a)
+
+
+#endif
+
diff --git a/trunk/hw/mthca/kernel/mt_types.h b/trunk/hw/mthca/kernel/mt_types.h
new file mode 100644 (file)
index 0000000..efe9a85
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef MT_TYPES_H
+#define MT_TYPES_H
+
+//#include <complib/comp_lib.h>
+#pragma warning( push )
+#include <wdmwarn4.h>
+ #include <ntddk.h>
+#pragma warning( pop )
+
+// ===========================================
+// SUBSTITUTES
+// ===========================================
+
+// gcc compiler attributes
+#define __iomem
+#define likely(x)                      (x)
+#define unlikely(x)                    (x)
+
+// container_of
+#define container_of           CONTAINING_RECORD
+
+// inline 
+#define inline __inline
+
+// ===========================================
+// TYPES
+// ===========================================
+
+// basic types
+typedef unsigned char                  u8, __u8;
+typedef unsigned short int     u16, __u16;
+typedef unsigned int                           u32, __u32;
+typedef unsigned __int64               u64, __u64;
+typedef char                   s8, __s8;
+typedef short int      s16, __s16;
+typedef int                            s32, __s32;
+typedef __int64                s64, __s64;
+
+// inherited
+typedef u16  __le16;
+typedef u16  __be16;
+typedef u32  __le32;
+typedef u32  __be32;
+typedef u64  __le64;
+typedef u64  __be64;
+typedef u64 dma_addr_t;
+typedef u64 io_addr_t;
+
+// ===========================================
+// MACROS
+// ===========================================
+
+// assert
+#ifdef _DEBUG_
+#define MT_ASSERT( exp )       (void)(!(exp)?DbgPrint("Assertion Failed:" #exp "\n"),DbgBreakPoint(),FALSE:TRUE)
+#else
+#define MT_ASSERT( exp )
+#endif /* _DEBUG_ */
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mt_ud_header.c b/trunk/hw/mthca/kernel/mt_ud_header.c
new file mode 100644 (file)
index 0000000..81799f2
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ud_header.c 2928 2005-07-28 18:45:56Z sean.hefty $
+ */
+
+#include <hca_driver.h>
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_ud_header.tmh"
+#endif
+#include <ib_pack.h>
+
+#define STRUCT_FIELD_INIT(header, field,ow,ob,sb) \
+       offsetof(struct ib_unpacked_ ## header, field),      \
+       sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+       ow,ob,sb, \
+       #header ":" #field
+
+#define STRUCT_FIELD_INITR(ow,ob,sb) \
+               0, 0, ow, ob, sb, "reserved"
+
+static const struct ib_field lrh_table[]  = {
+       { STRUCT_FIELD_INIT(lrh, virtual_lane, 0, 0, 4) },
+       { STRUCT_FIELD_INIT(lrh, link_version, 0, 4, 4) },
+       { STRUCT_FIELD_INIT(lrh, service_level, 0, 8, 4) },
+       { STRUCT_FIELD_INITR(0,12,2) },
+       { STRUCT_FIELD_INIT(lrh, link_next_header, 0, 14, 2) },
+       { STRUCT_FIELD_INIT(lrh, destination_lid, 0, 16, 16) },
+       { STRUCT_FIELD_INITR(1,0,5) },
+       { STRUCT_FIELD_INIT(lrh, packet_length, 1, 5, 11) },
+       { STRUCT_FIELD_INIT(lrh, source_lid, 1, 16, 16) }
+};
+
+static const struct ib_field grh_table[]  = {
+       { STRUCT_FIELD_INIT(grh, ip_version, 0, 0, 4) },
+       { STRUCT_FIELD_INIT(grh, traffic_class, 0, 4, 8) },
+       { STRUCT_FIELD_INIT(grh, flow_label, 0, 12, 20) },
+       { STRUCT_FIELD_INIT(grh, payload_length, 1, 0, 16) },
+       { STRUCT_FIELD_INIT(grh, next_header, 1, 16, 8) },
+       { STRUCT_FIELD_INIT(grh, hop_limit, 1, 24, 8) },
+       { STRUCT_FIELD_INIT(grh, source_gid, 2, 0, 128) },
+       { STRUCT_FIELD_INIT(grh, destination_gid, 6, 0, 128) }
+};
+
+static const struct ib_field bth_table[]  = {
+       { STRUCT_FIELD_INIT(bth, opcode, 0, 0, 8) },
+       { STRUCT_FIELD_INIT(bth, solicited_event, 0, 8, 1) },
+       { STRUCT_FIELD_INIT(bth, mig_req, 0, 9, 1) },
+       { STRUCT_FIELD_INIT(bth, pad_count, 0, 10, 2) },
+       { STRUCT_FIELD_INIT(bth, transport_header_version, 0, 12, 4) },
+       { STRUCT_FIELD_INIT(bth, pkey, 0, 16, 16) },
+       { STRUCT_FIELD_INITR(1,0,8) },
+       { STRUCT_FIELD_INIT(bth, destination_qpn, 1, 8, 24) },
+       { STRUCT_FIELD_INIT(bth, ack_req, 2, 0, 1) },
+       { STRUCT_FIELD_INITR(2,1,7) },
+       { STRUCT_FIELD_INIT(bth, psn, 2, 8, 24) }
+};
+
+static const struct ib_field deth_table[] = {
+       { STRUCT_FIELD_INIT(deth, qkey, 0, 0, 32) },
+       { STRUCT_FIELD_INITR(1,0,8) },
+       { STRUCT_FIELD_INIT(deth, source_qpn, 1, 8, 24) }
+};
+
+
+/**
+ * ib_ud_header_init - Initialize UD header structure
+ * @payload_bytes:Length of packet payload
+ * @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @header:Structure to initialize
+ *
+ * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
+ * lrh.packet_length, grh.ip_version, grh.payload_length,
+ * grh.next_header, bth.opcode, bth.pad_count and
+ * bth.transport_header_version fields of a &struct ib_ud_header given
+ * the payload length and whether a GRH will be included.
+ */
+void ib_ud_header_init(int                         payload_bytes,
+                      int                  grh_present,
+                      struct ib_ud_header *header)
+{
+       int header_len;
+       u16 packet_length;
+
+       RtlZeroMemory(header, sizeof *header);
+
+       header_len =
+               IB_LRH_BYTES  +
+               IB_BTH_BYTES  +
+               IB_DETH_BYTES;
+       if (grh_present) {
+               header_len += IB_GRH_BYTES;
+       }
+
+       header->lrh.link_version     = 0;
+       header->lrh.link_next_header =
+               (u8)(grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL);
+       packet_length                = (u16)((IB_LRH_BYTES     +
+                                       IB_BTH_BYTES     +
+                                       IB_DETH_BYTES    +
+                                       payload_bytes    +
+                                       4                + /* ICRC     */
+                                       3) / 4);            /* round up */
+
+       header->grh_present          = grh_present;
+       if (grh_present) {
+               packet_length              += IB_GRH_BYTES / 4;
+               header->grh.ip_version      = 6;
+               header->grh.payload_length  =
+                       cl_hton16((u16)((IB_BTH_BYTES     +
+                                    IB_DETH_BYTES    +
+                                    payload_bytes    +
+                                    4                + /* ICRC     */
+                                    3) & ~3));          /* round up */
+               header->grh.next_header     = 0x1b;
+       }
+
+       header->lrh.packet_length = cl_hton16(packet_length);
+
+       if (header->immediate_present)
+               header->bth.opcode           = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+       else
+               header->bth.opcode           = IB_OPCODE_UD_SEND_ONLY;
+       header->bth.pad_count                = (u8)((4 - payload_bytes) & 3);
+       header->bth.transport_header_version = 0;
+}
+
+/**
+ * ib_ud_header_pack - Pack UD header struct into wire format
+ * @header:UD header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_ud_header_pack() packs the UD header structure @header into wire
+ * format in the buffer @buf.
+ */
+int ib_ud_header_pack(struct ib_ud_header *header,
+                     u8                *buf)
+{
+       int len = 0;
+
+       ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
+               &header->lrh, buf);
+       len += IB_LRH_BYTES;
+
+       if (header->grh_present) {
+               ib_pack(grh_table, ARRAY_SIZE(grh_table),
+                       &header->grh, buf + len);
+               len += IB_GRH_BYTES;
+       }
+
+       ib_pack(bth_table, ARRAY_SIZE(bth_table),
+               &header->bth, buf + len);
+       len += IB_BTH_BYTES;
+
+       ib_pack(deth_table, ARRAY_SIZE(deth_table),
+               &header->deth, buf + len);
+       len += IB_DETH_BYTES;
+
+       if (header->immediate_present) {
+               memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data);
+               len += sizeof header->immediate_data;
+       }
+
+       return len;
+}
+
+/**
+ * ib_ud_header_unpack - Unpack UD header struct from wire format
+ * @header:UD header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_ud_header_pack() unpacks the UD header structure @header from wire
+ * format in the buffer @buf.
+ */
+int ib_ud_header_unpack(u8                *buf,
+                       struct ib_ud_header *header)
+{
+       ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
+                 buf, &header->lrh);
+       buf += IB_LRH_BYTES;
+
+       if (header->lrh.link_version != 0) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid LRH.link_version %d\n",
+                      header->lrh.link_version));
+               return -EINVAL;
+       }
+
+       switch (header->lrh.link_next_header) {
+       case IB_LNH_IBA_LOCAL:
+               header->grh_present = 0;
+               break;
+
+       case IB_LNH_IBA_GLOBAL:
+               header->grh_present = 1;
+               ib_unpack(grh_table, ARRAY_SIZE(grh_table),
+                         buf, &header->grh);
+               buf += IB_GRH_BYTES;
+
+               if (header->grh.ip_version != 6) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid GRH.ip_version %d\n",
+                              header->grh.ip_version));
+                       return -EINVAL;
+               }
+               if (header->grh.next_header != 0x1b) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid GRH.next_header 0x%02x\n",
+                              header->grh.next_header));
+                       return -EINVAL;
+               }
+               break;
+
+       default:
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid LRH.link_next_header %d\n",
+                      header->lrh.link_next_header));
+               return -EINVAL;
+       }
+
+       ib_unpack(bth_table, ARRAY_SIZE(bth_table),
+                 buf, &header->bth);
+       buf += IB_BTH_BYTES;
+
+       switch (header->bth.opcode) {
+       case IB_OPCODE_UD_SEND_ONLY:
+               header->immediate_present = 0;
+               break;
+       case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
+               header->immediate_present = 1;
+               break;
+       default:
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid BTH.opcode 0x%02x\n",
+                      header->bth.opcode));
+               return -EINVAL;
+       }
+
+       if (header->bth.transport_header_version != 0) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid BTH.transport_header_version %d\n",
+                      header->bth.transport_header_version));
+               return -EINVAL;
+       }
+
+       ib_unpack(deth_table, ARRAY_SIZE(deth_table),
+                 buf, &header->deth);
+       buf += IB_DETH_BYTES;
+
+       if (header->immediate_present)
+               memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
+
+       return 0;
+}
diff --git a/trunk/hw/mthca/kernel/mt_uverbs.c b/trunk/hw/mthca/kernel/mt_uverbs.c
new file mode 100644 (file)
index 0000000..d5b307c
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: uverbs_cmd.c 4227 2005-11-30 00:58:50Z roland $
+ */
+
+#include <ib_verbs.h>
+#include <mx_abi.h>
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_uverbs.tmh"
+#endif
+
+
+//TODO: all this module is to be eliminated !!
+
+
+static void ib_uverbs_add_one(struct ib_device *device);
+static void ib_uverbs_remove_one(struct ib_device *device);
+
+static struct ib_client uverbs_client = {
+       "uverbs",
+       ib_uverbs_add_one,
+       ib_uverbs_remove_one
+};
+
+struct ib_uverbs_device {
+       struct ib_device                       *ib_dev;
+};
+
+static void ib_uverbs_add_one(struct ib_device *device)
+{
+       struct ib_uverbs_device *uverbs_dev;
+
+       if (!device->alloc_ucontext)
+               return;
+
+       uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
+       if (!uverbs_dev)
+               return;
+
+       ib_set_client_data(device, &uverbs_client, uverbs_dev);
+}
+
+static void ib_uverbs_remove_one(struct ib_device *device)
+{
+       struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client);
+
+       if (uverbs_dev)
+               kfree(uverbs_dev);
+}
+
+int ib_uverbs_init(void)
+{
+       int ret;
+
+       ret = ib_register_client(&uverbs_client);
+       if (ret) 
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("user_verbs: couldn't register client\n"));
+
+       return ret;
+}
+
+void ib_uverbs_cleanup(void)
+{
+       ib_unregister_client(&uverbs_client);
+}
+
diff --git a/trunk/hw/mthca/kernel/mt_uverbs.h b/trunk/hw/mthca/kernel/mt_uverbs.h
new file mode 100644 (file)
index 0000000..4a3ded7
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: uverbs.h 4227 2005-11-30 00:58:50Z roland $
+ */
+
+#ifndef UVERBS_H
+#define UVERBS_H
+
+#include <ib_verbs.h>
+#include <ib_user_verbs.h>
+
+/*
+ * Our lifetime rules for these structs are the following:
+ *
+ * struct ib_uverbs_device: One reference is held by the module and
+ * released in ib_uverbs_remove_one().  Another reference is taken by
+ * ib_uverbs_open() each time the character special file is opened,
+ * and released in ib_uverbs_release_file() when the file is released.
+ *
+ * struct ib_uverbs_file: One reference is held by the VFS and
+ * released when the file is closed.  Another reference is taken when
+ * an asynchronous event queue file is created and released when the
+ * event file is closed.
+ *
+ * struct ib_uverbs_event_file: One reference is held by the VFS and
+ * released when the file is closed.  For asynchronous event files,
+ * another reference is held by the corresponding main context file
+ * and released when that file is closed.  For completion event files,
+ * a reference is taken when a CQ is created that uses the file, and
+ * released when the CQ is destroyed.
+ */
+
+struct ib_uverbs_device {
+       struct ib_device                       *ib_dev;
+};
+
+struct ib_uverbs_mcast_entry {
+       struct list_head        list;
+       union ib_gid            gid;
+       u16                     lid;
+};
+
+struct ib_uevent_object {
+       struct ib_uobject       uobject;
+       struct list_head        event_list;
+       u32                     events_reported;
+};
+
+struct ib_uqp_object {
+       struct ib_uevent_object uevent;
+       struct list_head        mcast_list;
+};
+
+struct ib_ucq_object {
+       struct ib_uobject       uobject;
+       struct ib_uverbs_file  *uverbs_file;
+       struct list_head        comp_list;
+       struct list_head        async_list;
+       u32                     comp_events_reported;
+       u32                     async_events_reported;
+};
+
+#ifdef LIN_TO_BE_CHANGED
+extern struct semaphore ib_uverbs_idr_mutex;
+extern struct idr ib_uverbs_pd_idr;
+extern struct idr ib_uverbs_mr_idr;
+extern struct idr ib_uverbs_mw_idr;
+extern struct idr ib_uverbs_ah_idr;
+extern struct idr ib_uverbs_cq_idr;
+extern struct idr ib_uverbs_qp_idr;
+extern struct idr ib_uverbs_srq_idr;
+#endif
+
+struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
+                                       int is_async, int *fd);
+void ib_uverbs_release_event_file(struct kref *ref);
+struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
+
+void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
+                          struct ib_uverbs_event_file *ev_file,
+                          struct ib_ucq_object *uobj);
+void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
+                             struct ib_uevent_object *uobj);
+
+void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
+void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_event_handler(struct ib_event_handler *handler,
+                            struct ib_event *event);
+
+int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
+               void *addr, size_t size, int write);
+void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
+void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
+
+#ifdef LIN_TO_BE_CHANGED
+#define IB_UVERBS_DECLARE_CMD(name)                                    \
+       ssize_t ib_uverbs_##name(struct ib_uverbs_file *file,           \
+                                const char *buf, int in_len,   \
+                                int out_len)
+#endif                          
+
+IB_UVERBS_DECLARE_CMD(get_context);
+IB_UVERBS_DECLARE_CMD(query_device);
+IB_UVERBS_DECLARE_CMD(query_port);
+IB_UVERBS_DECLARE_CMD(alloc_pd);
+IB_UVERBS_DECLARE_CMD(dealloc_pd);
+IB_UVERBS_DECLARE_CMD(reg_mr);
+IB_UVERBS_DECLARE_CMD(dereg_mr);
+IB_UVERBS_DECLARE_CMD(create_comp_channel);
+IB_UVERBS_DECLARE_CMD(create_cq);
+IB_UVERBS_DECLARE_CMD(poll_cq);
+IB_UVERBS_DECLARE_CMD(req_notify_cq);
+IB_UVERBS_DECLARE_CMD(destroy_cq);
+IB_UVERBS_DECLARE_CMD(create_qp);
+IB_UVERBS_DECLARE_CMD(modify_qp);
+IB_UVERBS_DECLARE_CMD(destroy_qp);
+IB_UVERBS_DECLARE_CMD(post_send);
+IB_UVERBS_DECLARE_CMD(post_recv);
+IB_UVERBS_DECLARE_CMD(post_srq_recv);
+IB_UVERBS_DECLARE_CMD(create_ah);
+IB_UVERBS_DECLARE_CMD(destroy_ah);
+IB_UVERBS_DECLARE_CMD(attach_mcast);
+IB_UVERBS_DECLARE_CMD(detach_mcast);
+IB_UVERBS_DECLARE_CMD(create_srq);
+IB_UVERBS_DECLARE_CMD(modify_srq);
+IB_UVERBS_DECLARE_CMD(destroy_srq);
+
+struct ib_pd *ib_uverbs_alloc_pd(struct ib_device *device,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
+int ib_uverbs_dealloc_pd(struct ib_pd *pd);
+
+#endif /* UVERBS_H */
diff --git a/trunk/hw/mthca/kernel/mt_uverbsmem.c b/trunk/hw/mthca/kernel/mt_uverbsmem.c
new file mode 100644 (file)
index 0000000..d67f3d6
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: uverbs_mem.c 2783 2005-07-05 02:21:08Z roland $
+ */
+
+#include "ib_verbs.h"
+
+void ibv_umem_release(struct ib_device *dev, struct ib_umem *umem)
+{
+       struct ib_umem_chunk *chunk, *tmp;
+       int i;
+
+       list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list,struct ib_umem_chunk,struct ib_umem_chunk) {
+               pci_unmap_sg((struct mthca_dev *)dev, chunk->page_list,
+                            chunk->nents, PCI_DMA_BIDIRECTIONAL);
+               for (i = 0; i < chunk->nents; ++i) {
+                       put_page(&chunk->page_list[i]);
+               }
+               kfree(chunk);
+       }
+}
+
+int ibv_umem_get(struct ib_device *dev, struct ib_umem *mem,
+               void *addr, size_t size, int write)
+{
+       struct ib_umem_chunk *chunk = NULL, *last_chunk;
+       u64 cur_base;
+       unsigned long npages;
+       int ret = -ENOMEM;
+       int i;
+
+       HCA_ENTER(HCA_DBG_MEMORY);
+       /* fill mem */
+       mem->user_base = (u64)(UINT_PTR)addr;
+       mem->length    = size;
+       mem->offset    = (int)(((u64)(UINT_PTR) addr) & ~PAGE_MASK);
+       mem->page_size = PAGE_SIZE;
+       mem->writable  = write;
+       INIT_LIST_HEAD(&mem->chunk_list);
+
+       /* build sg list */
+       npages = (unsigned long)(NEXT_PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT);
+       cur_base = (u64)(UINT_PTR)addr & PAGE_MASK;
+       while (npages) {
+               /* allocate a max large chunk (it's <= PAGE_SIZE) */
+               chunk = kzalloc(sizeof *chunk + sizeof (struct scatterlist) *
+                               IB_UMEM_MAX_PAGE_CHUNK, GFP_KERNEL);
+               if (!chunk) 
+                       goto err_kmalloc;
+               list_add_tail(&chunk->list, &mem->chunk_list);
+
+               /* fill the chunk */
+               for (i=0; i < IB_UMEM_MAX_PAGE_CHUNK; i++) {
+
+                       /* map a one page */
+                       ret = get_user_pages((struct mthca_dev *)dev, cur_base,
+                               1, write, &chunk->page_list[i] );
+                       if (ret < 0)
+                               goto out;                                            
+
+                       /* update the chunk */
+                       chunk->nents++; /* number of sg elements */
+
+                       /* calculate the rest of the buffer to handle */
+                       cur_base += PAGE_SIZE;
+                       if (!--npages)
+                               break;
+               }
+
+               /* map all chunk pages */
+               chunk->nmap = pci_map_sg((struct mthca_dev *)dev,
+                       chunk->page_list, chunk->nents, PCI_DMA_BIDIRECTIONAL);
+               if (chunk->nmap <= 0) 
+                       goto out;
+
+       }
+
+       /* shorten the last chunk */
+       ret = 0; /* if we get here - all is OK */
+       last_chunk = chunk;
+       chunk = kzalloc(sizeof *chunk + sizeof (struct scatterlist) *
+                       chunk->nents, GFP_KERNEL);
+       if (!chunk) 
+               goto err_kmalloc;
+       memcpy( chunk, last_chunk, sizeof *last_chunk + sizeof (struct scatterlist) *
+               last_chunk->nents);
+       list_del(&last_chunk->list);
+       list_add_tail(&chunk->list, &mem->chunk_list);
+       kfree(last_chunk);
+       goto exit;
+       
+out:
+       ibv_umem_release(dev, mem);
+err_kmalloc:   
+exit:
+       return ret;
+}
+
+
diff --git a/trunk/hw/mthca/kernel/mt_verbs.c b/trunk/hw/mthca/kernel/mt_verbs.c
new file mode 100644 (file)
index 0000000..9d2b566
--- /dev/null
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: verbs.c 2934 2005-07-29 17:31:49Z roland $
+ */
+
+#include <ib_verbs.h>
+#include <ib_cache.h>
+#include "mthca_dev.h"
+#include "mx_abi.h"
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_verbs.tmh"
+#endif
+
+
+void ibv_um_close(     struct ib_ucontext * h_um_ca )
+{
+       int err;
+       ib_api_status_t         status;
+       struct ib_ucontext *context_p = (struct ib_ucontext *)h_um_ca;
+
+       HCA_ENTER(HCA_DBG_SHIM);
+
+       context_p->is_removing = TRUE;
+
+       if (atomic_read(&context_p->usecnt)) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("resources are not released (cnt %d)\n", context_p->usecnt));
+               status = IB_RESOURCE_BUSY;
+               goto err_usage;
+       }
+       
+       err = ibv_dealloc_pd( context_p->pd );
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_dealloc_pd failed (%d)\n", err));
+               status = errno_to_iberr(err);
+       }
+
+       err = mthca_dealloc_ucontext(context_p);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_SHIM  ,("mthca_dealloc_ucontext failed (%d)\n", err));
+               status = errno_to_iberr(err);
+               goto err_dealloc_ucontext;
+       }
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  , ("pcs %p\n", PsGetCurrentProcess()) );
+       status = IB_SUCCESS;
+       goto end;
+       
+err_dealloc_ucontext: 
+err_usage:
+end:
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+       return;
+}
+
+/* Protection domains */
+
+struct ib_pd *ibv_alloc_pd(struct ib_device *device,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
+{
+       struct ib_pd *pd;
+
+       pd = device->alloc_pd(device, context, p_umv_buf);
+
+       if (!IS_ERR(pd)) {
+               pd->device  = device;
+               pd->ucontext = context;
+               atomic_set(&pd->usecnt, 0);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       }
+
+       return pd;
+}
+
+int ibv_dealloc_pd(struct ib_pd *pd)
+{
+       if (atomic_read(&pd->usecnt)) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("resources are not released (cnt %d)\n", pd->usecnt));
+               return -EBUSY;
+       }               
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+               ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       return pd->device->dealloc_pd(pd);
+}
+
+/* Address handles */
+
+struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
+{
+       int err;
+       struct ib_ah *ah;
+       struct ib_mr *ib_mr = NULL;
+       u64 start = 0;
+
+       // for user call we need also allocate MR
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(void*)p_umv_buf->p_inout_buf;
+               
+               // create region
+               ib_mr   = ibv_reg_mr( 
+                       pd, 
+                       create_ah->mr.access_flags, 
+                       (void*)(ULONG_PTR)create_ah->mr.start,
+                       create_ah->mr.length, create_ah->mr.hca_va, TRUE );
+               if (IS_ERR(ib_mr)) {
+                       err = PTR_ERR(ib_mr);
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV  ,("ibv_reg_mr failed (%d)\n", err));
+                       goto err_alloc_mr;
+               }
+
+               start = create_ah->mr.start;
+       }
+
+       ah = pd->device->create_ah(pd, ah_attr);
+
+       if (IS_ERR(ah)) {
+               err = PTR_ERR(ah);
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err));
+               goto err_create_ah;
+       }
+
+       // fill results
+       ah->device  = pd->device;
+       ah->pd      = pd;
+       ah->ucontext = context;
+       atomic_inc(&pd->usecnt);
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_AV  ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+               ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       if (context)
+               atomic_inc(&context->usecnt);
+
+       // fill results for user
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
+               ah->ib_mr = ib_mr;
+               create_ah_resp->start = start;
+               create_ah_resp->mr.lkey = ib_mr->lkey;
+               create_ah_resp->mr.rkey = ib_mr->rkey;
+               create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
+               p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp);
+       }
+
+       return ah;
+       
+err_create_ah:
+       if (ib_mr)
+               ibv_dereg_mr(ib_mr);
+err_alloc_mr:
+       if( p_umv_buf && p_umv_buf->command ) 
+               p_umv_buf->status = IB_ERROR;
+       return ERR_PTR(ib_mr);
+}
+
+struct ib_ah *ibv_create_ah_from_wc(struct ib_pd *pd, struct _ib_wc *wc,
+                                  struct ib_grh *grh, u8 port_num)
+{
+       struct ib_ah_attr ah_attr;
+       u32 flow_class;
+       u16 gid_index;
+       int ret;
+
+       memset(&ah_attr, 0, sizeof ah_attr);
+       ah_attr.dlid = wc->recv.ud.remote_lid;
+       ah_attr.sl = wc->recv.ud.remote_sl;
+       ah_attr.src_path_bits = wc->recv.ud.path_bits;
+       ah_attr.port_num = port_num;
+
+       if (wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID) {
+               ah_attr.ah_flags = IB_AH_GRH;
+               ah_attr.grh.dgid = grh->dgid;
+
+               ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
+                                        &gid_index);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               ah_attr.grh.sgid_index = (u8) gid_index;
+               flow_class = cl_ntoh32(grh->version_tclass_flow);
+               ah_attr.grh.flow_label = flow_class & 0xFFFFF;
+               ah_attr.grh.traffic_class = (u8)((flow_class >> 20) & 0xFF);
+               ah_attr.grh.hop_limit = grh->hop_limit;
+       }
+
+       return ibv_create_ah(pd, &ah_attr, NULL, NULL);
+}
+
+int ibv_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+       return ah->device->modify_ah ?
+               ah->device->modify_ah(ah, ah_attr) :
+               -ENOSYS;
+}
+
+int ibv_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+       return ah->device->query_ah ?
+               ah->device->query_ah(ah, ah_attr) :
+               -ENOSYS;
+}
+
+
+static void release_user_cq_qp_resources(
+       struct ib_ucontext      *ucontext,
+       struct ib_mr * ib_mr)
+{
+       if (ucontext) {
+               ibv_dereg_mr( ib_mr );
+               atomic_dec(&ucontext->usecnt);
+               if (!atomic_read(&ucontext->usecnt) && ucontext->is_removing) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("User resources are released. Removing context\n"));
+                       ibv_um_close(ucontext);
+               }
+       }
+}
+
+int ibv_destroy_ah(struct ib_ah *ah)
+{
+       struct ib_pd *pd;
+       int ret;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr * ib_mr;
+
+       pd = ah->pd;
+       ucontext = ah->ucontext;
+       ib_mr = ah->ib_mr;
+
+       ret = ah->device->destroy_ah(ah);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_AV  ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       }
+       release_user_cq_qp_resources(ucontext, ib_mr);
+
+       return ret;
+}
+
+/* Shared receive queues */
+
+struct ib_srq *ibv_create_srq(struct ib_pd *pd,
+                            struct ib_srq_init_attr *srq_init_attr)
+{
+       struct ib_srq *srq;
+
+       if (!pd->device->create_srq)
+               return ERR_PTR(-ENOSYS);
+
+       srq = pd->device->create_srq(pd, srq_init_attr, NULL);
+
+       if (!IS_ERR(srq)) {
+               srq->device        = pd->device;
+               srq->pd            = pd;
+               srq->uobject       = NULL;
+               srq->event_handler = srq_init_attr->event_handler;
+               srq->srq_context   = srq_init_attr->srq_context;
+               atomic_inc(&pd->usecnt);
+               atomic_set(&srq->usecnt, 0);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return srq;
+}
+
+int ibv_modify_srq(struct ib_srq *srq,
+                 struct ib_srq_attr *srq_attr,
+                 enum ib_srq_attr_mask srq_attr_mask)
+{
+       return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
+}
+
+int ibv_query_srq(struct ib_srq *srq,
+                struct ib_srq_attr *srq_attr)
+{
+       return srq->device->query_srq ?
+               srq->device->query_srq(srq, srq_attr) : -ENOSYS;
+}
+
+int ibv_destroy_srq(struct ib_srq *srq)
+{
+       struct ib_pd *pd;
+       int ret;
+
+       if (atomic_read(&srq->usecnt))
+               return -EBUSY;
+
+       pd = srq->pd;
+
+       ret = srq->device->destroy_srq(srq);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return ret;
+}
+
+/* Queue pairs */
+
+struct ib_qp *ibv_create_qp(struct ib_pd *pd,
+       struct ib_qp_init_attr *qp_init_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
+{
+       int err;
+       struct ib_qp *ib_qp;
+       struct ib_mr *ib_mr = NULL;
+       u64 user_handle = 0;
+
+       HCA_ENTER(HCA_DBG_QP);
+
+       // for user call we need also allocate MR
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_qp *create_qp = (struct ibv_create_qp *)(void*)p_umv_buf->p_inout_buf;
+               
+               // create region
+               ib_mr   = ibv_reg_mr( 
+                       (struct ib_pd *)(ULONG_PTR)create_qp->mr.pd_handle, 
+                       create_qp->mr.access_flags, 
+                       (void*)(ULONG_PTR)create_qp->mr.start,
+                       create_qp->mr.length, create_qp->mr.hca_va, TRUE );
+               if (IS_ERR(ib_mr)) {
+                       err = PTR_ERR(ib_mr);
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err));
+                       goto err_alloc_mr;
+               }
+               create_qp->lkey = ib_mr->lkey;
+               user_handle = create_qp->user_handle;
+       }
+
+       ib_qp = pd->device->create_qp(pd, qp_init_attr, p_umv_buf);
+
+       if (IS_ERR(ib_qp)) {
+               err = PTR_ERR(ib_qp);
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err));
+               goto err_create_qp;
+       }
+
+       // fill results
+       ib_qp->device                           = pd->device;
+       ib_qp->pd                                       = pd;
+       ib_qp->send_cq                          = qp_init_attr->send_cq;
+       ib_qp->recv_cq                          = qp_init_attr->recv_cq;
+       ib_qp->srq                              = qp_init_attr->srq;
+       ib_qp->ucontext                         = context;
+       ib_qp->event_handler    = qp_init_attr->event_handler;
+       ib_qp->qp_context       = qp_init_attr->qp_context;
+       ib_qp->qp_type                          = qp_init_attr->qp_type;
+       atomic_inc(&pd->usecnt);
+       atomic_inc(&qp_init_attr->send_cq->usecnt);
+       atomic_inc(&qp_init_attr->recv_cq->usecnt);
+       if (qp_init_attr->srq)
+               atomic_inc(&qp_init_attr->srq->usecnt);
+       if (context)
+               atomic_inc(&context->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,
+               ("uctx %p, qhndl %p, qnum %#x, q_num  %#x, scq %#x:%#x, rcq %#x:%#x \n",
+               pd->ucontext, ib_qp, ((struct mthca_qp*)ib_qp)->qpn, ib_qp->qp_num,
+               ((struct mthca_cq*)ib_qp->send_cq)->cqn, ib_qp->send_cq->cqe,
+               ((struct mthca_cq*)ib_qp->recv_cq)->cqn, ib_qp->recv_cq->cqe ) );
+
+       // fill results for user
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct mthca_qp *qp = (struct mthca_qp *)ib_qp;
+               struct ibv_create_qp_resp *create_qp_resp = (struct ibv_create_qp_resp *)(void*)p_umv_buf->p_inout_buf;
+               ib_qp->ib_mr = ib_mr;
+               create_qp_resp->qpn = ib_qp->qp_num;
+               create_qp_resp->user_handle = user_handle;
+               create_qp_resp->mr.lkey = ib_mr->lkey;
+               create_qp_resp->mr.rkey = ib_mr->rkey;
+               create_qp_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
+               create_qp_resp->qp_handle = (__u64)(ULONG_PTR)qp;
+               create_qp_resp->max_send_wr = qp->sq.max;
+               create_qp_resp->max_recv_wr = qp->rq.max;
+               create_qp_resp->max_send_sge = qp->sq.max_gs;
+               create_qp_resp->max_recv_sge = qp->rq.max_gs;
+               create_qp_resp->max_inline_data = qp->max_inline_data;
+               p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);
+       }
+
+       return ib_qp;
+
+err_create_qp:
+       if (ib_mr)
+               ibv_dereg_mr(ib_mr);
+err_alloc_mr:
+       if( p_umv_buf && p_umv_buf->command ) 
+               p_umv_buf->status = IB_ERROR;
+       HCA_EXIT(HCA_DBG_QP);
+       return ERR_PTR(err);
+}
+
+int ibv_modify_qp(struct ib_qp *qp,
+                struct ib_qp_attr *qp_attr,
+                int qp_attr_mask)
+{
+       return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
+}
+
+int ibv_query_qp(struct ib_qp *qp,
+               struct ib_qp_attr *qp_attr,
+               int qp_attr_mask,
+               struct ib_qp_init_attr *qp_init_attr)
+{
+       return qp->device->query_qp ?
+               qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
+               -ENOSYS;
+}
+       
+int ibv_destroy_qp(struct ib_qp *qp)
+{
+       struct ib_pd *pd;
+       struct ib_cq *scq, *rcq;
+       struct ib_srq *srq;
+       int ret;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr * ib_mr;
+
+       pd  = qp->pd;
+       scq = qp->send_cq;
+       rcq = qp->recv_cq;
+       srq = qp->srq;
+       ucontext = pd->ucontext;
+       ib_mr = qp->ib_mr;
+
+       ret = qp->device->destroy_qp(qp);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               atomic_dec(&scq->usecnt);
+               atomic_dec(&rcq->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+               if (srq)
+                       atomic_dec(&srq->usecnt);
+               release_user_cq_qp_resources(ucontext, ib_mr);
+       }
+
+       return ret;
+}
+
+/* Completion queues */
+
+struct ib_cq *ibv_create_cq(struct ib_device *device,
+                          ib_comp_handler comp_handler,
+                          void (*event_handler)(struct ib_event *, void *),
+                          void *cq_context, int cqe, 
+                          struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
+{
+       int err;
+       struct ib_cq *cq;
+       struct ib_mr *ib_mr = NULL;
+       u64 user_handle = 0;
+
+       // for user call we need also allocate MR
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_cq *create_cq = (struct ibv_create_cq *)(void*)p_umv_buf->p_inout_buf;
+               
+               // create region
+               ib_mr   = ibv_reg_mr( 
+                       (struct ib_pd *)(ULONG_PTR)create_cq->mr.pd_handle, 
+                       create_cq->mr.access_flags, 
+                       (void*)(ULONG_PTR)create_cq->mr.start,
+                       create_cq->mr.length, create_cq->mr.hca_va, TRUE );
+               if (IS_ERR(ib_mr)) {
+                       err = PTR_ERR(ib_mr);
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("ibv_reg_mr failed (%d)\n", err));
+                       goto err_alloc_mr;
+               }
+               user_handle = create_cq->user_handle;
+               create_cq->lkey = ib_mr->lkey;
+               cqe = create_cq->cqe;
+       }
+       
+       // create cq
+       cq = device->create_cq(device, cqe, context, p_umv_buf);
+       if (IS_ERR(cq)) {
+               err = PTR_ERR(cq);
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("create_qp failed (%d)\n", err));
+               goto err_create_cq;
+       }
+
+       cq->device        = device;
+       cq->ucontext = context;
+       cq->comp_handler  = comp_handler;
+       cq->event_handler = event_handler;
+       cq->cq_context    = cq_context;
+       atomic_set(&cq->usecnt, 0);
+       if (context)
+               atomic_inc(&context->usecnt);
+
+       // fill results
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf;
+               cq->ib_mr = ib_mr;
+               create_cq_resp->user_handle = user_handle;
+               create_cq_resp->mr.lkey = ib_mr->lkey;
+               create_cq_resp->mr.rkey = ib_mr->rkey;
+               create_cq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
+               create_cq_resp->cq_handle = (u64)(ULONG_PTR)cq;
+               create_cq_resp->cqe = cq->cqe;
+               p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);
+       }
+       
+       return cq;
+
+err_create_cq:
+       if (ib_mr)
+               ibv_dereg_mr(ib_mr);
+err_alloc_mr:
+       if( p_umv_buf && p_umv_buf->command ) 
+               p_umv_buf->status = IB_ERROR;
+       return ERR_PTR(err);
+}
+
+int ibv_destroy_cq(struct ib_cq *cq)
+{
+       int ret;
+       struct ib_ucontext      *ucontext = cq->ucontext;
+       struct ib_mr * ib_mr = cq->ib_mr;
+       
+       if (atomic_read(&cq->usecnt))
+               return -EBUSY;
+
+       ret = cq->device->destroy_cq(cq);
+
+       release_user_cq_qp_resources(ucontext, ib_mr);
+       
+       return ret;
+}
+
+int ibv_resize_cq(struct ib_cq *cq,
+                 int           cqe)
+{
+       int ret;
+
+       if (!cq->device->resize_cq)
+               return -ENOSYS;
+
+       ret = cq->device->resize_cq(cq, &cqe);
+       if (!ret)
+               cq->cqe = cqe;
+
+       return ret;
+}
+
+/* Memory regions */
+
+struct ib_mr *ibv_reg_mr(struct ib_pd *pd, 
+       mthca_qp_access_t mr_access_flags,
+       void* __ptr64                   vaddr,
+       uint64_t                                length,
+       uint64_t                                hca_va,
+       boolean_t                       um_call
+       )
+{
+       struct ib_mr *ib_mr;
+       int                          err;
+       HCA_ENTER(HCA_DBG_MEMORY);
+       /* sanity check */
+       if (!um_call) {
+               err = -ENOSYS;
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("ibv_reg_mr for kernel mode is not supported (%d)\n", err));
+               goto err_not_supported;
+       }
+
+       ib_mr = pd->device->reg_user_mr(pd, vaddr, length, hca_va, mr_access_flags);
+       if (IS_ERR(ib_mr)) {
+               err = PTR_ERR(ib_mr);
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err));
+               goto err_reg_user_mr;
+       }
+
+       ib_mr->device  = pd->device;
+       ib_mr->pd      = pd;
+       atomic_inc(&pd->usecnt);
+       atomic_set(&ib_mr->usecnt, 0);
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+               ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return ib_mr;
+
+err_reg_user_mr:
+err_not_supported:
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return ERR_PTR(err);
+}
+
+struct ib_mr *ibv_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t mr_access_flags)
+{
+       struct ib_mr *mr;
+
+       mr = pd->device->get_dma_mr(pd, mr_access_flags);
+
+       if (!IS_ERR(mr)) {
+               mr->device  = pd->device;
+               mr->pd      = pd;
+               atomic_inc(&pd->usecnt);
+               atomic_set(&mr->usecnt, 0);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return mr;
+}
+
+struct ib_mr *ibv_reg_phys_mr(struct ib_pd *pd,
+                            struct ib_phys_buf *phys_buf_array,
+                            int num_phys_buf,
+                            mthca_qp_access_t mr_access_flags,
+                            u64 *iova_start)
+{
+       struct ib_mr *mr;
+
+       mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
+                                    mr_access_flags, iova_start);
+
+       if (!IS_ERR(mr)) {
+               mr->device  = pd->device;
+               mr->pd      = pd;
+               atomic_inc(&pd->usecnt);
+               atomic_set(&mr->usecnt, 0);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return mr;
+}
+
+int ibv_rereg_phys_mr(struct ib_mr *mr,
+                    int mr_rereg_mask,
+                    struct ib_pd *pd,
+                    struct ib_phys_buf *phys_buf_array,
+                    int num_phys_buf,
+                    mthca_qp_access_t mr_access_flags,
+                    u64 *iova_start)
+{
+       struct ib_pd *old_pd;
+       int ret;
+
+       if (!mr->device->rereg_phys_mr)
+               return -ENOSYS;
+
+       if (atomic_read(&mr->usecnt))
+               return -EBUSY;
+
+       old_pd = mr->pd;
+
+       ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
+                                       phys_buf_array, num_phys_buf,
+                                       mr_access_flags, iova_start);
+
+       if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
+               atomic_dec(&old_pd->usecnt);
+               atomic_inc(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return ret;
+}
+
+int ibv_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
+{
+       return mr->device->query_mr ?
+               mr->device->query_mr(mr, mr_attr) : -ENOSYS;
+}
+
+int ibv_dereg_mr(struct ib_mr *mr)
+{
+       int ret;
+       struct ib_pd *pd;
+
+       if (atomic_read(&mr->usecnt))
+               return -EBUSY;
+
+       pd = mr->pd;
+       ret = mr->device->dereg_mr(mr);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+       }
+
+       return ret;
+}
+
+/* Memory windows */
+
+struct ib_mw *ibv_alloc_mw(struct ib_pd *pd)
+{
+       struct ib_mw *mw;
+
+       if (!pd->device->alloc_mw)
+               return ERR_PTR(-ENOSYS);
+
+       mw = pd->device->alloc_mw(pd);
+       if (!IS_ERR(mw)) {
+               mw->device  = pd->device;
+               mw->pd      = pd;
+               atomic_inc(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return mw;
+}
+
+int ibv_dealloc_mw(struct ib_mw *mw)
+{
+       struct ib_pd *pd;
+       int ret;
+
+       pd = mw->pd;
+       ret = mw->device->dealloc_mw(mw);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return ret;
+}
+
+/* "Fast" memory regions */
+
+struct ib_fmr *ibv_alloc_fmr(struct ib_pd *pd,
+                           mthca_qp_access_t mr_access_flags,
+                           struct ib_fmr_attr *fmr_attr)
+{
+       struct ib_fmr *fmr;
+
+       if (!pd->device->alloc_fmr)
+               return ERR_PTR(-ENOSYS);
+
+       fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
+       if (!IS_ERR(fmr)) {
+               fmr->device = pd->device;
+               fmr->pd     = pd;
+               atomic_inc(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return fmr;
+}
+
+int ibv_unmap_fmr(struct list_head *fmr_list)
+{
+       struct ib_fmr *fmr;
+
+       if (list_empty(fmr_list))
+               return 0;
+
+       fmr = list_entry(fmr_list->next, struct ib_fmr, list);
+       return fmr->device->unmap_fmr(fmr_list);
+}
+
+int ibv_dealloc_fmr(struct ib_fmr *fmr)
+{
+       struct ib_pd *pd;
+       int ret;
+
+       pd = fmr->pd;
+       ret = fmr->device->dealloc_fmr(fmr);
+       if (!ret) {
+               atomic_dec(&pd->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+       }
+
+       return ret;
+}
+
+/* Multicast groups */
+
+int ibv_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+{
+       if (!qp->device->attach_mcast)
+               return -ENOSYS;
+       if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM)
+               return -EINVAL;
+
+       return qp->device->attach_mcast(qp, gid, lid);
+}
+
+int ibv_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+{
+       if (!qp->device->detach_mcast)
+               return -ENOSYS;
+       if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM)
+               return -EINVAL;
+
+       return qp->device->detach_mcast(qp, gid, lid);
+}
diff --git a/trunk/hw/mthca/kernel/mthca.h b/trunk/hw/mthca/kernel/mthca.h
new file mode 100644 (file)
index 0000000..9570421
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef MTHCA_H
+#define MTHCA_H
+
+NTSTATUS mthca_init_one(hca_dev_ext_t *ext);
+void mthca_remove_one(hca_dev_ext_t *ext);
+int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id);
+
+#endif
+
diff --git a/trunk/hw/mthca/kernel/mthca.inf b/trunk/hw/mthca/kernel/mthca.inf
new file mode 100644 (file)
index 0000000..5a58a25
--- /dev/null
@@ -0,0 +1,201 @@
+; Mellanox Technologies InfiniBand HCAs.\r
+; Copyright 2005 Mellanox Technologies all Rights Reserved.\r
+\r
+[Version]\r
+Signature="$Windows NT$"\r
+Class=InfiniBandHca\r
+ClassGUID={58517E00-D3CF-40c9-A679-CEE5752F4491}\r
+Provider=%MTL%\r
+CatalogFile=mthca.cat\r
+; must be synchronized with MTHCA_DEV.H\r
+DriverVer=03/01/2006,1.0.4\r
+\r
+; ================= Destination directory section =====================\r
+\r
+[DestinationDirs]\r
+DefaultDestDir=%DIRID_DRIVERS%\r
+ClassCopyFiles=%DIRID_SYSTEM%\r
+MTHCA.UMCopyFiles=%DIRID_SYSTEM%\r
+MTHCA.WOW64CopyFiles=%DIRID_WINDOWS%\SysWOW64\r
+\r
+; ================= Class Install section =====================\r
+\r
+[ClassInstall32]\r
+CopyFiles=ClassCopyFiles\r
+AddReg=ClassAddReg\r
+\r
+[ClassCopyFiles]\r
+IbInstaller.dll\r
+\r
+[ClassAddReg]\r
+HKR,,,,"InfiniBand Host Channel Adapters"\r
+HKR,,Icon,,-5\r
+HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \\r
+ %HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller"\r
+\r
+; ================= Device Install section =====================\r
+\r
+[SourceDisksNames.x86]\r
+1=%DiskId%,,,\x86\r
+\r
+[SourceDisksNames.amd64]\r
+1=%DiskId%,,,\amd64\r
+\r
+[SourceDisksNames.ia64]\r
+1=%DiskId%,,,\ia64\r
+\r
+[SourceDisksFiles]\r
+IbInstaller.dll=1\r
+ibal.sys=1\r
+mthca.sys=1\r
+mthcau.dll=1\r
+mthcaud.dll=1\r
+\r
+[SourceDisksFiles.amd64]\r
+IbInstaller.dll=1\r
+ibal.sys=1\r
+mthca.sys=1\r
+mthcau.dll=1\r
+mthcaud.dll=1\r
+;uvpd32.dll=1\r
+;uvpd32d.dll=1\r
+\r
+[SourceDisksFiles.ia64]\r
+IbInstaller.dll=1\r
+ibal.sys=1\r
+mthca.sys=1\r
+mthcau.dll=1\r
+mthcaud.dll=1\r
+;uvpd32.dll=1\r
+;uvpd32d.dll=1\r
+\r
+[Manufacturer]\r
+%MTL% = HCA.DeviceSection,ntx86,ntamd64,ntia64\r
+\r
+[HCA.DeviceSection]\r
+; empty since we don't support W9x/Me\r
+\r
+[HCA.DeviceSection.ntx86]\r
+%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+\r
+\r
+[HCA.DeviceSection.ntamd64]\r
+%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+\r
+[HCA.DeviceSection.ntia64]\r
+%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
+%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
+%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
+%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
+%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
+\r
+[MTHCA.DDInstall.ntx86]\r
+CopyFiles = MTHCA.CopyFiles\r
+CopyFiles = MTHCA.UMCopyFiles\r
+\r
+[MTHCA.DDInstall.ntamd64]\r
+CopyFiles = MTHCA.CopyFiles\r
+CopyFiles = MTHCA.UMCopyFiles\r
+CopyFiles = MTHCA.WOW64CopyFiles\r
+\r
+[MTHCA.DDInstall.ntia64]\r
+CopyFiles = MTHCA.CopyFiles\r
+CopyFiles = MTHCA.UMCopyFiles\r
+CopyFiles = MTHCA.WOW64CopyFiles\r
+\r
+[MTHCA.DDInstall.ntx86.Services]\r
+AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog\r
+AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall\r
+\r
+[MTHCA.DDInstall.ntamd64.Services]\r
+AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog\r
+AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall\r
+\r
+[MTHCA.DDInstall.ntia64.Services]\r
+AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog\r
+AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall\r
+\r
+[MTHCA.CopyFiles]\r
+ibal.sys\r
+mthca.sys\r
+\r
+[MTHCA.UMCopyFiles]\r
+; 2 lines excluded temporary\r
+mthcau.dll,,,2\r
+mthcaud.dll,,,2\r
+\r
+[MTHCA.WOW64CopyFiles]\r
+mthcau.dll,uvpd32.dll,,2\r
+mthcaud.dll,uvpd32d.dll,,2\r
+\r
+;\r
+; ============= Service Install section ==============\r
+;\r
+\r
+[MTHCA.ServiceInstall]\r
+DisplayName     = %MTHCA.ServiceDesc%\r
+ServiceType     = %SERVICE_KERNEL_DRIVER%\r
+StartType       = %SERVICE_DEMAND_START%\r
+ErrorControl    = %SERVICE_ERROR_NORMAL%\r
+ServiceBinary   = %12%\mthca.sys\r
+LoadOrderGroup  = extended base\r
+AddReg          = MTHCA.ParamsReg\r
+\r
+[Ibal.ServiceInstall]\r
+DisplayName     = %Ibal.ServiceDesc%\r
+ServiceType     = %SERVICE_KERNEL_DRIVER%\r
+StartType       = %SERVICE_DEMAND_START%\r
+ErrorControl    = %SERVICE_ERROR_NORMAL%\r
+ServiceBinary   = %12%\ibal.sys\r
+AddReg          = Ibal.ParamsReg\r
+\r
+\r
+[MTHCA.EventLog]\r
+AddReg = MTHCA.AddEventLogReg\r
+\r
+[MTHCA.AddEventLogReg]\r
+HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mthca.sys"\r
+HKR, , TypesSupported,   0x00010001, 7\r
+\r
+[MTHCA.ParamsReg]\r
+HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003\r
+HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Flags",%REG_DWORD%,0xffff\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Level",%REG_DWORD%,0x3\r
+\r
+[Ibal.ParamsReg]\r
+HKR,"Parameters","DebugFlags",%REG_DWORD_NO_CLOBBER%,0x80000000\r
+HKR,"Parameters","SmiPollInterval",%REG_DWORD_NO_CLOBBER%,20000\r
+HKR,"Parameters","IocQueryTimeout",%REG_DWORD_NO_CLOBBER%,250\r
+HKR,"Parameters","IocQueryRetries",%REG_DWORD_NO_CLOBBER%,4\r
+HKR,"Parameters","IocPollInterval",%REG_DWORD_NO_CLOBBER%,30000\r
+\r
+[Strings]\r
+HcaClassGuid = "{58517E00-D3CF-40c9-A679-CEE5752F4491}"\r
+MTL="Mellanox Technologies Ltd."\r
+Ibal.ServiceDesc = "Mellanox InfiniBand Access Layer"\r
+MTHCA.ServiceDesc = "Driver for Mellanox InfiniHost Devices"\r
+MT23108.DeviceDesc="InfiniHost (MT23108) - Mellanox InfiniBand HCA"\r
+MT25208.DeviceDesc="InfiniHost (MT25208) - Mellanox InfiniBand HCA for PCI Express"\r
+MT25218.DeviceDesc="InfiniHost III Ex (MT25218) - Mellanox InfiniBand HCA for PCI Express"\r
+MT24204.DeviceDesc="InfiniHost III Lx (MT24204) - Mellanox InfiniBand HCA for PCI Express"\r
+MT25204.DeviceDesc="InfiniHost III Lx (MT25204) - Mellanox InfiniBand HCA for PCI Express"\r
+DiskId = "Mellanox InfiniBand HCA installation disk"\r
+SPSVCINST_NULL = 0x0\r
+SPSVCINST_ASSOCSERVICE = 0x00000002\r
+SERVICE_KERNEL_DRIVER  = 1\r
+SERVICE_DEMAND_START   = 3\r
+SERVICE_ERROR_NORMAL   = 1\r
+REG_DWORD              = 0x00010001\r
+REG_DWORD_NO_CLOBBER   = 0x00010003\r
+REG_MULTI_SZ_APPEND    = 0x00010008\r
+DIRID_SYSTEM           = 11\r
+DIRID_DRIVERS          = 12\r
diff --git a/trunk/hw/mthca/kernel/mthca_allocator.c b/trunk/hw/mthca/kernel/mthca_allocator.c
new file mode 100644 (file)
index 0000000..84b1aa9
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_allocator.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_allocator.tmh"
+#endif
+
+/* Trivial bitmap-based allocator */
+u32 mthca_alloc(struct mthca_alloc *alloc)
+{
+       u32 obj;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock(&alloc->lock, &lh);
+       obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
+       if (obj >= alloc->max) {
+               alloc->top = (alloc->top + alloc->max) & alloc->mask;
+               obj = find_first_zero_bit(alloc->table, alloc->max);
+       }
+
+       if (obj < alloc->max) {
+               set_bit(obj, (long*)alloc->table);
+               obj |= alloc->top;
+       } else
+               obj = (u32)-1;
+
+       spin_unlock(&lh);
+
+       return obj;
+}
+
+void mthca_free(struct mthca_alloc *alloc, u32 obj)
+{
+       SPIN_LOCK_PREP(lh);
+       
+       obj &= alloc->max - 1;
+       spin_lock(&alloc->lock, &lh);
+       clear_bit(obj, (long *)alloc->table);
+       alloc->last = MIN(alloc->last, obj);
+       alloc->top = (alloc->top + alloc->max) & alloc->mask;
+       spin_unlock(&lh);
+}
+
+int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
+                    u32 reserved)
+{
+       int i;
+       HCA_ENTER(HCA_DBG_INIT);
+       /* num must be a power of 2 */
+       if ((int)num != 1 << (ffs(num) - 1))
+               return -EINVAL;
+
+       alloc->last = 0;
+       alloc->top  = 0;
+       alloc->max  = num;
+       alloc->mask = mask;
+       spin_lock_init(&alloc->lock);
+       alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
+                              GFP_KERNEL);
+       if (!alloc->table)
+               return -ENOMEM;
+
+       bitmap_zero(alloc->table, num);
+       for (i = 0; i < (int)reserved; ++i)
+               set_bit(i, (long *)alloc->table);
+
+       return 0;
+}
+
+void mthca_alloc_cleanup(struct mthca_alloc *alloc)
+{
+       kfree(alloc->table);
+}
+
+/*
+ * Array of pointers with lazy allocation of leaf pages.  Callers of
+ * _get, _set and _clear methods must use a lock or otherwise
+ * serialize access to the array.
+ */
+
+void *mthca_array_get(struct mthca_array *array, int index)
+{
+       int p = (index * sizeof (void *)) >> PAGE_SHIFT;
+
+       if (array->page_list[p].page) {
+               int i = index & (PAGE_SIZE / sizeof (void *) - 1);
+               return array->page_list[p].page[i];
+       } else
+               return NULL;
+}
+
+int mthca_array_set(struct mthca_array *array, int index, void *value)
+{
+       int p = (index * sizeof (void *)) >> PAGE_SHIFT;
+
+       /* Allocate with GFP_ATOMIC because we'll be called with locks held. */
+       if (!array->page_list[p].page)
+               array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
+
+       if (!array->page_list[p].page)
+               return -ENOMEM;
+
+       array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] =
+               value;
+       ++array->page_list[p].used;
+
+       return 0;
+}
+
+void mthca_array_clear(struct mthca_array *array, int index)
+{
+       int p = (index * sizeof (void *)) >> PAGE_SHIFT;
+
+       if (--array->page_list[p].used == 0) {
+               free_page((void*) array->page_list[p].page);
+               array->page_list[p].page = NULL;
+       }
+
+       if (array->page_list[p].used < 0)
+               HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_LOW,("Array %p index %d page %d with ref count %d < 0\n",
+                        array, index, p, array->page_list[p].used));
+}
+
+int mthca_array_init(struct mthca_array *array, int nent)
+{
+       int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
+       int i;
+
+       array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL);
+       if (!array->page_list)
+               return -ENOMEM;
+
+       for (i = 0; i < npage; ++i) {
+               array->page_list[i].page = NULL;
+               array->page_list[i].used = 0;
+       }
+
+       return 0;
+}
+
+void mthca_array_cleanup(struct mthca_array *array, int nent)
+{
+       int i;
+
+       for (i = 0; i < (int)((nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE); ++i)
+               free_page((void*) array->page_list[i].page);
+
+       kfree(array->page_list);
+}
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.  If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+                   union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+                   int hca_write, struct mthca_mr *mr)
+{
+       int err = -ENOMEM;
+       int npages, shift;
+       u64 *dma_list = NULL;
+       dma_addr_t t;
+       int i;
+
+       HCA_ENTER(HCA_DBG_MEMORY);
+       if (size <= max_direct) {
+               *is_direct = 1;
+               npages     = 1;
+               shift      = get_order(size) + PAGE_SHIFT;
+
+               alloc_dma_zmem_map(dev, size, PCI_DMA_BIDIRECTIONAL, &buf->direct);
+               if (!buf->direct.page)
+                       return -ENOMEM;
+               t = buf->direct.dma_address;            /* shorten the code below */
+
+               while (t & ((1 << shift) - 1)) {
+                       --shift;
+                       npages *= 2;
+               }
+
+               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+               if (!dma_list)
+                       goto err_free;
+
+               for (i = 0; i < npages; ++i)
+                       dma_list[i] = t + i * (1 << shift);
+       } else {
+               *is_direct = 0;
+               npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+               shift      = PAGE_SHIFT;
+
+               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+               if (!dma_list)
+                       return -ENOMEM;
+
+               buf->page_list = kmalloc(npages * sizeof *buf->page_list,
+                                        GFP_KERNEL);
+               if (!buf->page_list)
+                       goto err_out;
+
+               for (i = 0; i < npages; ++i)
+                       buf->page_list[i].page = NULL;
+
+               for (i = 0; i < npages; ++i) {
+                       alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &buf->page_list[i]);
+                       if (!buf->page_list[i].page)
+                               goto err_free;
+                       dma_list[i] = buf->page_list[i].dma_address;
+               }
+       }
+
+       err = mthca_mr_alloc_phys(dev, pd->pd_num,
+                                 dma_list, shift, npages,
+                                 0, size,
+                                 MTHCA_MPT_FLAG_LOCAL_READ |
+                                 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
+                                 mr);
+       if (err)
+               goto err_free;
+
+       kfree(dma_list);
+       
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return 0;
+
+err_free:
+       mthca_buf_free(dev, size, buf, *is_direct, NULL);
+
+err_out:
+       kfree(dma_list);
+
+       return err;
+}
+
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+                   int is_direct, struct mthca_mr *mr)
+{
+       int i;
+
+       if (mr)
+               mthca_free_mr(dev, mr);
+
+       if (is_direct) {
+               free_dma_mem_map(dev, &buf->direct, PCI_DMA_BIDIRECTIONAL);
+       }
+       else {
+               for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) {
+                       free_dma_mem_map(dev, &buf->page_list[i], PCI_DMA_BIDIRECTIONAL);
+               }
+               kfree(buf->page_list);
+       }
+}
diff --git a/trunk/hw/mthca/kernel/mthca_av.c b/trunk/hw/mthca/kernel/mthca_av.c
new file mode 100644 (file)
index 0000000..837bb3d
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_av.c 2928 2005-07-28 18:45:56Z sean.hefty $
+ */
+
+#include <ib_verbs.h>
+#include <ib_cache.h>
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_av.tmh"
+#endif
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_av_table)
+#pragma alloc_text (PAGE, mthca_cleanup_av_table)
+#endif
+       
+
+struct mthca_av {
+       __be32 port_pd;
+       u8     reserved1;
+       u8     g_slid;
+       __be16 dlid;
+       u8     reserved2;
+       u8     gid_index;
+       u8     msg_sr;
+       u8     hop_limit;
+       __be32 sl_tclass_flowlabel;
+       __be32 dgid[4];
+};
+
+int mthca_create_ah(struct mthca_dev *dev,
+                   struct mthca_pd *pd,
+                   struct ib_ah_attr *ah_attr,
+                   struct mthca_ah *ah)
+{
+       u32 index = (u32)-1;
+       struct mthca_av *av = NULL;
+
+       ah->type = MTHCA_AH_PCI_POOL;
+
+       if (mthca_is_memfree(dev)) {
+               ah->av   = kmalloc(sizeof *ah->av, GFP_ATOMIC);
+               if (!ah->av)
+                       return -ENOMEM;
+
+               ah->type = MTHCA_AH_KMALLOC;
+               av       = ah->av;
+       } else if (!atomic_read(&pd->sqp_count) &&
+                !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
+               index = mthca_alloc(&dev->av_table.alloc);
+
+               /* fall back to allocate in host memory */
+               if (index == -1)
+                       goto on_hca_fail;
+
+               av = kmalloc(sizeof *av, GFP_ATOMIC);
+               if (!av)
+                       goto on_hca_fail;
+
+               ah->type = MTHCA_AH_ON_HCA;
+               ah->avdma  = dev->av_table.ddr_av_base +
+                       index * MTHCA_AV_SIZE;
+       }
+
+on_hca_fail:
+       if (ah->type == MTHCA_AH_PCI_POOL) {
+               ah->av = pci_pool_alloc(dev->av_table.pool,
+                                       SLAB_ATOMIC, &ah->avdma);
+               if (!ah->av)
+                       return -ENOMEM;
+
+               av = ah->av;
+       }
+
+       ah->key = pd->ntmr.ibmr.lkey;
+
+       RtlZeroMemory(av, MTHCA_AV_SIZE);
+
+       av->port_pd = cl_hton32(pd->pd_num | (ah_attr->port_num << 24));
+       av->g_slid  = ah_attr->src_path_bits;
+       av->dlid    = cl_hton16(ah_attr->dlid);
+       av->msg_sr  = (3 << 4) | /* 2K message */
+               ah_attr->static_rate;
+       av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28);
+       if (ah_attr->ah_flags & IB_AH_GRH) {
+               av->g_slid |= 0x80;
+               av->gid_index = (u8)((ah_attr->port_num - 1) * dev->limits.gid_table_len +
+                       ah_attr->grh.sgid_index);
+               av->hop_limit = ah_attr->grh.hop_limit;
+               av->sl_tclass_flowlabel |=
+                       cl_hton32((ah_attr->grh.traffic_class << 20) |
+                                   ah_attr->grh.flow_label);
+               memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
+       } else {
+               /* Arbel workaround -- low byte of GID must be 2 */
+               av->dgid[3] = cl_hton32(2);
+       }
+
+#if 0
+       {
+               int j;
+
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Created UDAV at %p/%08lx:\n",
+                         av, (unsigned long) ah->avdma));
+               for (j = 0; j < 8; ++j)
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("  [%2x] %08x\n",
+                              j * 4, cl_ntoh32(((__be32 *) av)[j])));
+       }
+#endif
+
+       if (ah->type == MTHCA_AH_ON_HCA) {
+               memcpy_toio((u8*)dev->av_table.av_map + index * MTHCA_AV_SIZE,
+                           av, MTHCA_AV_SIZE);
+               kfree(av);
+       }
+
+       return 0;
+}
+
+int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
+{
+       switch (ah->type) {
+       case MTHCA_AH_ON_HCA:
+               mthca_free(&dev->av_table.alloc,
+                       (u32)( (ah->avdma - dev->av_table.ddr_av_base) /MTHCA_AV_SIZE));
+               break;
+
+       case MTHCA_AH_PCI_POOL:
+               pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
+               break;
+
+       case MTHCA_AH_KMALLOC:
+               kfree(ah->av);
+               break;
+       }
+
+       return 0;
+}
+
+int mthca_ah_grh_present(struct mthca_ah *ah)
+{
+       return !!(ah->av->g_slid & 0x80);
+}
+
+int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
+                 struct ib_ud_header *header)
+{
+       if (ah->type == MTHCA_AH_ON_HCA)
+               return -EINVAL;
+
+       header->lrh.service_level   = (u8)(cl_ntoh32(ah->av->sl_tclass_flowlabel) >> 28);
+       header->lrh.destination_lid = ah->av->dlid;
+       header->lrh.source_lid      = cl_hton16(ah->av->g_slid & 0x7f);
+       header->grh_present = mthca_ah_grh_present(ah);
+       if (header->grh_present) {
+               header->grh.traffic_class =
+                       (u8)((cl_ntoh32(ah->av->sl_tclass_flowlabel) >> 20) & 0xff);
+               header->grh.flow_label    =
+                       (u8)(ah->av->sl_tclass_flowlabel & cl_hton32(0xfffff));
+               ib_get_cached_gid(&dev->ib_dev,
+                                 (u8) (cl_ntoh32(ah->av->port_pd) >> 24),
+                       ah->av->gid_index % dev->limits.gid_table_len,
+                                 &header->grh.source_gid);
+               memcpy(header->grh.destination_gid.raw,
+                      ah->av->dgid, 16);
+       }
+
+       return 0;
+}
+
+int mthca_init_av_table(struct mthca_dev *dev)
+{
+       int err;
+
+       if (mthca_is_memfree(dev))
+               return 0;
+
+       err = mthca_alloc_init(&dev->av_table.alloc,
+                              dev->av_table.num_ddr_avs,
+                              dev->av_table.num_ddr_avs - 1,
+                              0);
+       if (err)
+               return err;
+
+       dev->av_table.pool = pci_pool_create("mthca_av", dev,
+                                            MTHCA_AV_SIZE,
+                                            MTHCA_AV_SIZE, 0);
+       if (!dev->av_table.pool)
+               goto out_free_alloc;
+
+       if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
+               dev->av_table.av_map = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_DDR) +
+                                              dev->av_table.ddr_av_base -
+                                              dev->ddr_start,
+                                              dev->av_table.num_ddr_avs *
+                                              MTHCA_AV_SIZE,
+                                              &dev->av_table.av_map_size);
+               if (!dev->av_table.av_map)
+                       goto out_free_pool;
+       } else
+               dev->av_table.av_map = NULL;
+
+       return 0;
+
+ out_free_pool:
+       pci_pool_destroy(dev->av_table.pool);
+
+ out_free_alloc:
+       mthca_alloc_cleanup(&dev->av_table.alloc);
+       return -ENOMEM;
+}
+
+void mthca_cleanup_av_table(struct mthca_dev *dev)
+{
+       if (mthca_is_memfree(dev))
+               return;
+
+       if (dev->av_table.av_map)
+               iounmap(dev->av_table.av_map, dev->av_table.av_map_size);
+       pci_pool_destroy(dev->av_table.pool);
+       mthca_alloc_cleanup(&dev->av_table.alloc);
+}
+
+//NB: temporary, for support of query_qp
+void mthca_get_av_params(      struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits )
+{
+       struct mthca_av *av_p = ah_p->av;
+       *port_num       = (u8) (cl_ntoh32(av_p->port_pd) >> 24);
+       *dlid                           = av_p->dlid;
+       *sr                                     = av_p->msg_sr & 0x0f;
+       *path_bits              = av_p->g_slid & 0x7f;
+}
+
+//NB: temporary, for support of modify_qp
+void mthca_set_av_params(      struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr )
+{
+       struct mthca_av *av      = ah_p->av;
+       struct ib_ah *ib_ah_p = (struct ib_ah *)ah_p;
+       struct mthca_pd *pd = (struct mthca_pd *)ib_ah_p->pd;
+
+       // taken from mthca_create_av
+       av->port_pd = cl_hton32(pd->pd_num | (ah_attr->port_num << 24));
+       av->g_slid      = ah_attr->src_path_bits;
+       av->dlid                = cl_hton16(ah_attr->dlid);
+       av->msg_sr      = (3 << 4) | /* 2K message */
+               ah_attr->static_rate;
+       av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28);
+       if (ah_attr->ah_flags & IB_AH_GRH) {
+               av->g_slid |= 0x80;
+               av->gid_index = (u8)((ah_attr->port_num - 1) * dev->limits.gid_table_len +
+                       ah_attr->grh.sgid_index);
+               av->hop_limit = ah_attr->grh.hop_limit;
+               av->sl_tclass_flowlabel |=
+                       cl_hton32((ah_attr->grh.traffic_class << 20) |
+                                               ah_attr->grh.flow_label);
+               memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
+       } else {
+               /* Arbel workaround -- low byte of GID must be 2 */
+               av->dgid[3] = cl_hton32(2);
+       }
+}
+       
+
diff --git a/trunk/hw/mthca/kernel/mthca_catas.c b/trunk/hw/mthca/kernel/mthca_catas.c
new file mode 100644 (file)
index 0000000..fc7e90c
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id$
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_catas.tmh"
+#endif
+
+enum {
+       MTHCA_CATAS_POLL_INTERVAL       = 5 * HZ,
+
+       MTHCA_CATAS_TYPE_INTERNAL       = 0,
+       MTHCA_CATAS_TYPE_UPLINK         = 3,
+       MTHCA_CATAS_TYPE_DDR            = 4,
+       MTHCA_CATAS_TYPE_PARITY         = 5,
+};
+
+static spinlock_t catas_lock;
+
+static void handle_catas(struct mthca_dev *dev)
+{
+       struct ib_event event;
+       const char *type;
+       int i;
+
+       event.device = &dev->ib_dev;
+       event.event  = IB_EVENT_DEVICE_FATAL;
+       event.element.port_num = 0;
+
+       ib_dispatch_event(&event);
+
+       switch (_byteswap_ulong(readl(dev->catas_err.map)) >> 24) {
+       case MTHCA_CATAS_TYPE_INTERNAL:
+               type = "internal error";
+               break;
+       case MTHCA_CATAS_TYPE_UPLINK:
+               type = "uplink bus error";
+               break;
+       case MTHCA_CATAS_TYPE_DDR:
+               type = "DDR data error";
+               break;
+       case MTHCA_CATAS_TYPE_PARITY:
+               type = "internal parity error";
+               break;
+       default:
+               type = "unknown error";
+               break;
+       }
+
+       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Catastrophic error detected: %s\n", type));
+       for (i = 0; i < (int)dev->catas_err.size; ++i)
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("  buf[%02x]: %08x\n",
+                         i, _byteswap_ulong(readl(dev->catas_err.map + i))));
+}
+
+static void poll_catas(struct mthca_dev *dev)
+{
+       int i;
+       SPIN_LOCK_PREP(lh);
+
+       for (i = 0; i < (int)dev->catas_err.size; ++i)
+               if (readl(dev->catas_err.map + i)) {
+                       handle_catas(dev);
+                       return;
+               }
+
+       spin_lock_dpc(&catas_lock, &lh);
+       if (!dev->catas_err.stop) {
+               KeSetTimerEx( &dev->catas_err.timer, dev->catas_err.interval, 
+                       0, &dev->catas_err.timer_dpc );
+       }
+       spin_unlock_dpc(&lh);
+
+       return;
+}
+
+static void  timer_dpc(
+    IN struct _KDPC  *Dpc,
+    IN PVOID  DeferredContext,
+    IN PVOID  SystemArgument1,
+    IN PVOID  SystemArgument2
+    )
+{
+       struct mthca_dev *dev = (struct mthca_dev *)DeferredContext;
+       UNREFERENCED_PARAMETER(Dpc);
+       UNREFERENCED_PARAMETER(SystemArgument1);
+       UNREFERENCED_PARAMETER(SystemArgument2);
+       poll_catas( dev );
+}
+
+
+void mthca_start_catas_poll(struct mthca_dev *dev)
+{
+       u64 addr;
+
+       dev->catas_err.stop = 0;
+       dev->catas_err.map  = NULL;
+
+       addr = pci_resource_start(dev, HCA_BAR_TYPE_HCR) +
+               ((pci_resource_len(dev, HCA_BAR_TYPE_HCR) - 1) &
+                dev->catas_err.addr);
+
+       dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4, &dev->catas_err.map_size );
+       if (!dev->catas_err.map) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("couldn't map catastrophic error region "
+                          "at 0x%I64x/0x%x\n", addr, dev->catas_err.size * 4));
+               return;
+       }
+
+       spin_lock_init( &catas_lock );
+       KeInitializeDpc(  &dev->catas_err.timer_dpc, timer_dpc, dev );
+       KeInitializeTimer( &dev->catas_err.timer );
+       dev->catas_err.interval.QuadPart  = (-10)* (__int64)MTHCA_CATAS_POLL_INTERVAL;
+       KeSetTimerEx( &dev->catas_err.timer, dev->catas_err.interval, 
+               0, &dev->catas_err.timer_dpc );
+}
+
+void mthca_stop_catas_poll(struct mthca_dev *dev)
+{
+       SPIN_LOCK_PREP(lh);
+       
+       spin_lock_irq(&catas_lock, &lh);
+       dev->catas_err.stop = 1;
+       spin_unlock_irq(&lh);
+
+       KeCancelTimer(&dev->catas_err.timer);
+
+       if (dev->catas_err.map) {
+               iounmap(dev->catas_err.map, dev->catas_err.map_size);
+       }
+}
diff --git a/trunk/hw/mthca/kernel/mthca_cmd.c b/trunk/hw/mthca/kernel/mthca_cmd.c
new file mode 100644 (file)
index 0000000..0820e97
--- /dev/null
@@ -0,0 +1,1809 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_cmd.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include <ib_mad.h>
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_cmd.tmh"
+#endif
+#include "mthca_config_reg.h"
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+
+#define CMD_POLL_TOKEN 0xffff
+
+enum {
+       HCR_IN_PARAM_OFFSET    = 0x00,
+       HCR_IN_MODIFIER_OFFSET = 0x08,
+       HCR_OUT_PARAM_OFFSET   = 0x0c,
+       HCR_TOKEN_OFFSET       = 0x14,
+       HCR_STATUS_OFFSET      = 0x18,
+
+       HCR_OPMOD_SHIFT        = 12,
+       HCA_E_BIT              = 22,
+       HCR_GO_BIT             = 23
+};
+
+enum {
+       /* initialization and general commands */
+       CMD_SYS_EN          = 0x1,
+       CMD_SYS_DIS         = 0x2,
+       CMD_MAP_FA          = 0xfff,
+       CMD_UNMAP_FA        = 0xffe,
+       CMD_RUN_FW          = 0xff6,
+       CMD_MOD_STAT_CFG    = 0x34,
+       CMD_QUERY_DEV_LIM   = 0x3,
+       CMD_QUERY_FW        = 0x4,
+       CMD_ENABLE_LAM      = 0xff8,
+       CMD_DISABLE_LAM     = 0xff7,
+       CMD_QUERY_DDR       = 0x5,
+       CMD_QUERY_ADAPTER   = 0x6,
+       CMD_INIT_HCA        = 0x7,
+       CMD_CLOSE_HCA       = 0x8,
+       CMD_INIT_IB         = 0x9,
+       CMD_CLOSE_IB        = 0xa,
+       CMD_QUERY_HCA       = 0xb,
+       CMD_SET_IB          = 0xc,
+       CMD_ACCESS_DDR      = 0x2e,
+       CMD_MAP_ICM         = 0xffa,
+       CMD_UNMAP_ICM       = 0xff9,
+       CMD_MAP_ICM_AUX     = 0xffc,
+       CMD_UNMAP_ICM_AUX   = 0xffb,
+       CMD_SET_ICM_SIZE    = 0xffd,
+
+       /* TPT commands */
+       CMD_SW2HW_MPT       = 0xd,
+       CMD_QUERY_MPT       = 0xe,
+       CMD_HW2SW_MPT       = 0xf,
+       CMD_READ_MTT        = 0x10,
+       CMD_WRITE_MTT       = 0x11,
+       CMD_SYNC_TPT        = 0x2f,
+
+       /* EQ commands */
+       CMD_MAP_EQ          = 0x12,
+       CMD_SW2HW_EQ        = 0x13,
+       CMD_HW2SW_EQ        = 0x14,
+       CMD_QUERY_EQ        = 0x15,
+
+       /* CQ commands */
+       CMD_SW2HW_CQ        = 0x16,
+       CMD_HW2SW_CQ        = 0x17,
+       CMD_QUERY_CQ        = 0x18,
+       CMD_RESIZE_CQ       = 0x2c,
+
+       /* SRQ commands */
+       CMD_SW2HW_SRQ       = 0x35,
+       CMD_HW2SW_SRQ       = 0x36,
+       CMD_QUERY_SRQ       = 0x37,
+       CMD_ARM_SRQ         = 0x40,
+
+       /* QP/EE commands */
+       CMD_RST2INIT_QPEE   = 0x19,
+       CMD_INIT2RTR_QPEE   = 0x1a,
+       CMD_RTR2RTS_QPEE    = 0x1b,
+       CMD_RTS2RTS_QPEE    = 0x1c,
+       CMD_SQERR2RTS_QPEE  = 0x1d,
+       CMD_2ERR_QPEE       = 0x1e,
+       CMD_RTS2SQD_QPEE    = 0x1f,
+       CMD_SQD2SQD_QPEE    = 0x38,
+       CMD_SQD2RTS_QPEE    = 0x20,
+       CMD_ERR2RST_QPEE    = 0x21,
+       CMD_QUERY_QPEE      = 0x22,
+       CMD_INIT2INIT_QPEE  = 0x2d,
+       CMD_SUSPEND_QPEE    = 0x32,
+       CMD_UNSUSPEND_QPEE  = 0x33,
+       /* special QPs and management commands */
+       CMD_CONF_SPECIAL_QP = 0x23,
+       CMD_MAD_IFC         = 0x24,
+
+       /* multicast commands */
+       CMD_READ_MGM        = 0x25,
+       CMD_WRITE_MGM       = 0x26,
+       CMD_MGID_HASH       = 0x27,
+
+       /* miscellaneous commands */
+       CMD_DIAG_RPRT       = 0x30,
+       CMD_NOP             = 0x31,
+
+       /* debug commands */
+       CMD_QUERY_DEBUG_MSG = 0x2a,
+       CMD_SET_DEBUG_MSG   = 0x2b,
+};
+
+/*
+ * According to Mellanox code, FW may be starved and never complete
+ * commands.  So we can't use strict timeouts described in PRM -- we
+ * just arbitrarily select 60 seconds for now.
+ */
+#define CMD_POLL_N_TRIES               60
+
+enum {
+       CMD_TIME_CLASS_A = 60 * HZ,
+       CMD_TIME_CLASS_B = 60 * HZ,
+       CMD_TIME_CLASS_C = 60 * HZ
+};
+
+enum {
+       GO_BIT_TIMEOUT = 10 * HZ
+};
+
+#define GO_BIT_N_TRIES         5
+#define GO_BIT_STALL_TIMEOUT           ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES)            /* usecs */
+
+struct mthca_cmd_context {
+       KEVENT  event;
+       int               result;
+       int               next;
+       u64               out_param;
+       u16               token;
+       u8                status;
+};
+
+static inline int go_bit(struct mthca_dev *dev)
+{
+       return readl(dev->hcr + HCR_STATUS_OFFSET) &
+               _byteswap_ulong(1 << HCR_GO_BIT);
+}
+
+/* 
+*      Function: performs busy-wait loop, while polling GO bit
+*      Return: 0 when GO bit was extinguished in time 
+*/
+static int poll_go_bit(struct mthca_dev *dev)
+{
+       int i=0; /* init must be here !*/
+       
+       if (!go_bit(dev)) 
+               return 0;
+
+       for (; i<GO_BIT_N_TRIES; i++) {
+               /* Nope, stall for a little bit and try again. */
+               KeStallExecutionProcessor( GO_BIT_STALL_TIMEOUT );
+               if (!go_bit(dev))
+                       return 0;
+       }               
+       
+       return 1;
+}
+
+/* 
+* Function: put thread on hold, while polling GO bit
+* Return: 0 when GO bit was extinguished in time 
+* Note: the functions make c. CMD_POLL_N_TRIES polls
+*/
+static int wait_go_bit(struct mthca_dev *dev, unsigned long timeout_usecs)
+{
+       u64 start, end;
+       LARGE_INTEGER  interval;
+
+       if (!go_bit(dev))       return 0;
+
+       interval.QuadPart = -(__int64)(((u64)(timeout_usecs) * 10) /    CMD_POLL_N_TRIES);
+       start = MT_time_get_stamp();
+       end = start + MT_USECS_TO_TICKS(timeout_usecs);
+       while (go_bit(dev) && time_before( MT_time_get_stamp(), end )) {
+               KeDelayExecutionThread( KernelMode, FALSE, &interval );
+       }
+
+       if (!go_bit(dev))       return 0;
+       return 1;       
+}
+
+
+static int mthca_cmd_post(struct mthca_dev *dev,
+                         u64 in_param,
+                         u64 out_param,
+                         u32 in_modifier,
+                         u8 op_modifier,
+                         u16 op,
+                         u16 token,
+                         int event)
+{
+       int err = 0;
+
+       if (down_interruptible(&dev->cmd.hcr_mutex))
+               return -EINTR;
+
+       if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) {
+               err = -EAGAIN;
+               goto out;
+       }
+
+       /*
+        * We use writel (instead of something like memcpy_toio)
+        * because writes of less than 32 bits to the HCR don't work
+        * (and some architectures such as ia64 implement memcpy_toio
+        * in terms of writeb).
+        */
+       __raw_writel((u32) cl_hton32((u32)(in_param >> 32)),           (u8 *)dev->hcr + 0 * 4);
+       __raw_writel((u32) cl_hton32((u32)(in_param & 0xfffffffful)), (u8 *) dev->hcr + 1 * 4);
+       __raw_writel((u32) cl_hton32(in_modifier),              (u8 *)dev->hcr + 2 * 4);
+       __raw_writel((u32) cl_hton32((u32)(out_param >> 32)),          (u8 *)dev->hcr + 3 * 4);
+       __raw_writel((u32) cl_hton32((u32)(out_param & 0xfffffffful)), (u8 *)dev->hcr + 4 * 4);
+       __raw_writel((u32) cl_hton32(token << 16),              (u8 *)dev->hcr + 5 * 4);
+
+       /* __raw_writel may not order writes. */
+       wmb();
+
+       __raw_writel((u32) cl_hton32((1 << HCR_GO_BIT)                |
+                                              (event ? (1 << HCA_E_BIT) : 0)   |
+                                              (op_modifier << HCR_OPMOD_SHIFT) |
+                                              op),                       (u8 *)dev->hcr + 6 * 4);
+
+out:
+       up(&dev->cmd.hcr_mutex);
+       return err;
+}
+
+
+static int mthca_cmd_poll(struct mthca_dev *dev,
+                         u64 in_param,
+                         u64 *out_param,
+                         int out_is_imm,
+                         u32 in_modifier,
+                         u8 op_modifier,
+                         u16 op,
+                         unsigned long timeout,
+                         u8 *status)
+{
+       int err = 0;
+
+       if (sem_down_interruptible(&dev->cmd.poll_sem))
+               return -EINTR;
+
+       err = mthca_cmd_post(dev, in_param,
+                            out_param ? *out_param : 0,
+                            in_modifier, op_modifier,
+                            op, CMD_POLL_TOKEN, 0);
+       if (err)
+               goto out;
+
+       if (wait_go_bit(dev,timeout)) {
+               err = -EBUSY;
+               goto out;
+       }
+       
+       if (out_is_imm)
+               *out_param = 
+                       (u64) cl_ntoh32((__be32)
+                                         __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
+                       (u64) cl_ntoh32((__be32)
+                                         __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
+
+       *status = (u8)(cl_ntoh32((__be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24);
+       if (*status)
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
+                         op, *status));
+
+out:
+       sem_up(&dev->cmd.poll_sem);
+       return err;
+}
+
+void mthca_cmd_event(struct mthca_dev *dev,
+                    u16 token,
+                    u8  status,
+                    u64 out_param)
+{
+       struct mthca_cmd_context *context =
+               &dev->cmd.context[token & dev->cmd.token_mask];
+
+       /* previously timed out command completing at long last */
+       if (token != context->token)
+               return;
+
+       context->result    = 0;
+       context->status    = status;
+       context->out_param = out_param;
+
+       context->token += dev->cmd.token_mask + 1;
+
+       ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       KeSetEvent( &context->event, 0, FALSE );
+}
+
+static int mthca_cmd_wait(struct mthca_dev *dev,
+                         u64 in_param,
+                         u64 *out_param,
+                         int out_is_imm,
+                         u32 in_modifier,
+                         u8 op_modifier,
+                         u16 op,
+                         unsigned long timeout,
+                         u8 *status)
+{
+       int err = 0;
+       struct mthca_cmd_context *context;
+       SPIN_LOCK_PREP(lh);
+
+       if (sem_down_interruptible(&dev->cmd.event_sem))
+               return -EINTR;
+
+       spin_lock( &dev->cmd.context_lock, &lh );
+       BUG_ON(dev->cmd.free_head < 0);
+       context = &dev->cmd.context[dev->cmd.free_head];
+       dev->cmd.free_head = context->next;
+       spin_unlock( &lh );
+
+       KeClearEvent(   &context->event );
+       err = mthca_cmd_post(dev, in_param,
+                            out_param ? *out_param : 0,
+                            in_modifier, op_modifier,
+                            op, context->token, 1);
+       if (err)
+               goto out;
+
+       {
+               //TODO: Questions:
+               // Can it once be on behalf of user request, which would require UserRequest and UserMode
+               // Can it be alertable ?
+               NTSTATUS res;
+               LARGE_INTEGER  interval;
+               interval.QuadPart = (-10)* (__int64)timeout;
+               res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE,  &interval );
+               if (res != STATUS_SUCCESS) {
+                       err = -EBUSY;
+                       goto out;
+               }
+       }
+
+       *status = context->status;
+       if (*status)
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
+                         op, *status));
+
+       if (out_is_imm)
+               *out_param = context->out_param;
+
+out:
+       spin_lock(&dev->cmd.context_lock, &lh);
+       context->next = dev->cmd.free_head;
+       dev->cmd.free_head = (int)(context - dev->cmd.context);
+       spin_unlock(&lh);
+
+       sem_up( &dev->cmd.event_sem );
+
+       return err;
+}
+
+/* Invoke a command with an output mailbox */
+static int mthca_cmd_box(struct mthca_dev *dev,
+                        u64 in_param,
+                        u64 out_param,
+                        u32 in_modifier,
+                        u8 op_modifier,
+                        u16 op,
+                        unsigned long timeout,
+                        u8 *status)
+{
+       if (dev->cmd.use_events)
+               return mthca_cmd_wait(dev, in_param, &out_param, 0,
+                                     in_modifier, op_modifier, op,
+                                     timeout, status);
+       else
+               return mthca_cmd_poll(dev, in_param, &out_param, 0,
+                                     in_modifier, op_modifier, op,
+                                     timeout, status);
+}
+
+/* Invoke a command with no output parameter */
+static int mthca_cmd(struct mthca_dev *dev,
+                    u64 in_param,
+                    u32 in_modifier,
+                    u8 op_modifier,
+                    u16 op,
+                    unsigned long timeout,
+                    u8 *status)
+{
+       return mthca_cmd_box(dev, in_param, 0, in_modifier,
+                            op_modifier, op, timeout, status);
+}
+
+/*
+ * Invoke a command with an immediate output parameter (and copy the
+ * output into the caller's out_param pointer after the command
+ * executes).
+ */
+static int mthca_cmd_imm(struct mthca_dev *dev,
+                        u64 in_param,
+                        u64 *out_param,
+                        u32 in_modifier,
+                        u8 op_modifier,
+                        u16 op,
+                        unsigned long timeout,
+                        u8 *status)
+{
+       if (dev->cmd.use_events)
+               return mthca_cmd_wait(dev, in_param, out_param, 1,
+                                     in_modifier, op_modifier, op,
+                                     timeout, status);
+       else
+               return mthca_cmd_poll(dev, in_param, out_param, 1,
+                                     in_modifier, op_modifier, op,
+                                     timeout, status);
+}
+
+int mthca_cmd_init(struct mthca_dev *dev)
+{
+       KeInitializeMutex(&dev->cmd.hcr_mutex, 0);
+       sem_init(&dev->cmd.poll_sem, 1, 1);
+       dev->cmd.use_events = 0;
+
+       dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE,
+                          MTHCA_HCR_SIZE, &dev->hcr_size);
+       if (!dev->hcr) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map command register."));
+               return -ENOMEM;
+       }
+
+       dev->cmd.pool = pci_pool_create("mthca_cmd", dev,
+                                       MTHCA_MAILBOX_SIZE,
+                                       MTHCA_MAILBOX_SIZE, 0);
+       if (!dev->cmd.pool) {
+               iounmap(dev->hcr, dev->hcr_size);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void mthca_cmd_cleanup(struct mthca_dev *dev)
+{
+       pci_pool_destroy(dev->cmd.pool);
+       iounmap(dev->hcr, dev->hcr_size);
+}
+
+/*
+ * Switch to using events to issue FW commands (should be called after
+ * event queue to command events has been initialized).
+ */
+int mthca_cmd_use_events(struct mthca_dev *dev)
+{
+       int i;
+
+       dev->cmd.context = kmalloc(dev->cmd.max_cmds *
+                                  sizeof (struct mthca_cmd_context),
+                                  GFP_KERNEL);
+       if (!dev->cmd.context)
+               return -ENOMEM;
+
+       for (i = 0; i < dev->cmd.max_cmds; ++i) {
+               dev->cmd.context[i].token = (u16)i;
+               dev->cmd.context[i].next = i + 1;
+       KeInitializeEvent(      &dev->cmd.context[i].event, NotificationEvent , FALSE );
+       }
+
+       dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
+       dev->cmd.free_head = 0;
+
+       sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX);
+       spin_lock_init(&dev->cmd.context_lock);
+
+       for (dev->cmd.token_mask = 1;
+            dev->cmd.token_mask < dev->cmd.max_cmds;
+            dev->cmd.token_mask <<= 1)
+               ; /* nothing */
+       --dev->cmd.token_mask;
+
+       dev->cmd.use_events = 1;
+       sem_down(&dev->cmd.poll_sem);
+
+       return 0;
+}
+
+/*
+ * Switch back to polling (used when shutting down the device)
+ */
+void mthca_cmd_use_polling(struct mthca_dev *dev)
+{
+       int i;
+
+       dev->cmd.use_events = 0;
+
+       for (i = 0; i < dev->cmd.max_cmds; ++i)
+               sem_down(&dev->cmd.event_sem);
+
+       kfree(dev->cmd.context);
+
+       sem_up(&dev->cmd.poll_sem);
+}
+
+struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
+                                         unsigned int gfp_mask)
+{
+       struct mthca_mailbox *mailbox;
+
+       mailbox = kmalloc(sizeof *mailbox, gfp_mask);
+       if (!mailbox)
+               return ERR_PTR(-ENOMEM);
+
+       mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
+       if (!mailbox->buf) {
+               kfree(mailbox);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return mailbox;
+}
+
+void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
+{
+       if (!mailbox)
+               return;
+
+       pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
+       kfree(mailbox);
+}
+
+int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
+{
+       u64 out;
+       int ret;
+
+       ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
+
+       if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SYS_EN DDR error: syn=%x, sock=%d, "
+                          "sladdr=%d, SPD source=%s\n",
+                          (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
+                          (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"));
+
+       return ret;
+}
+
+int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
+}
+
+static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
+                        u64 virt, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       struct mthca_icm_iter iter;
+       __be64 *pages;
+       int lg;
+       int nent = 0;
+       unsigned long i;
+       int err = 0;
+       int ts = 0, tc = 0;
+       CPU_2_BE64_PREP;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE);
+       pages = mailbox->buf;
+
+       for (mthca_icm_first(icm, &iter);
+            !mthca_icm_last(&iter);
+            mthca_icm_next(&iter)) {
+               /*
+                * We have to pass pages that are aligned to their
+                * size, so find the least significant 1 in the
+                * address or size and use that as our log2 size.
+                */
+               i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter);
+               lg = ffs(i) - 1;
+               if (lg < 12) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
+                                  (unsigned long long) mthca_icm_addr(&iter),
+                                  mthca_icm_size(&iter)));
+                       err = -EINVAL;
+                       goto out;
+               }
+               for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
+                       if (virt != -1) {
+                               pages[nent * 2] = cl_hton64(virt);
+                               virt += 1ULL << lg;
+                       }
+                       pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
+                                                          (i << lg)) | (lg - 12));
+                       ts += 1 << (lg - 10);
+                       ++tc;
+
+                       if (++nent == MTHCA_MAILBOX_SIZE / 16) {
+                               err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
+                                               CMD_TIME_CLASS_B, status);
+                               if (err || *status)
+                                       goto out;
+                               nent = 0;
+                       }
+               }
+       }
+
+       if (nent)
+               err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
+                               CMD_TIME_CLASS_B, status);
+
+       switch (op) {
+       case CMD_MAP_FA:
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for FW.\n", tc, ts));
+               break;
+       case CMD_MAP_ICM_AUX:
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for ICM aux.\n", tc, ts));
+               break;
+       case CMD_MAP_ICM:
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
+                         tc, ts, (unsigned long long) virt - (ts << 10)));
+               break;
+       }
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+{
+       return mthca_map_cmd(dev, CMD_MAP_FA, icm, (u64)-1, status);
+}
+
+int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
+}
+
+int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
+}
+
+int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u32 *outbox;
+       int err = 0;
+       u8 lg;
+
+#define QUERY_FW_OUT_SIZE             0x100
+#define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_MAX_CMD_OFFSET        0x0f
+#define QUERY_FW_ERR_START_OFFSET      0x30
+#define QUERY_FW_ERR_SIZE_OFFSET       0x38
+
+#define QUERY_FW_START_OFFSET          0x20
+#define QUERY_FW_END_OFFSET            0x28
+
+#define QUERY_FW_SIZE_OFFSET           0x00
+#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
+#define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
+#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
+                           CMD_TIME_CLASS_A, status);
+
+       if (err)
+               goto out;
+
+       MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
+       /*
+        * FW subSIZE_Tor version is at more signifant bits than minor
+        * version, so swap here.
+        */
+       dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
+               ((dev->fw_ver & 0xffff0000ull) >> 16) |
+               ((dev->fw_ver & 0x0000ffffull) << 16);
+
+       MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
+       dev->cmd.max_cmds = 1 << lg;
+       MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);      
+       MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
+                 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
+               (unsigned long long) dev->catas_err.addr, dev->catas_err.size));
+
+
+       if (mthca_is_memfree(dev)) {
+               MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
+               MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
+               MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
+               MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("FW size %d KB\n", dev->fw.arbel.fw_pages << 2));
+
+               /*
+                * Arbel page size is always 4 KB; round up number of
+                * system pages needed.
+                */
+               dev->fw.arbel.fw_pages =
+                       ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
+                               (PAGE_SHIFT - 12);
+
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
+                         (unsigned long long) dev->fw.arbel.clr_int_base,
+                         (unsigned long long) dev->fw.arbel.eq_arm_base,
+                         (unsigned long long) dev->fw.arbel.eq_set_ci_base));
+       } else {
+               MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
+               MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
+
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
+                         (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
+                         (unsigned long long) dev->fw.tavor.fw_start,
+                         (unsigned long long) dev->fw.tavor.fw_end));
+       }
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u8 info;
+       u32 *outbox;
+       int err = 0;
+
+#define ENABLE_LAM_OUT_SIZE         0x100
+#define ENABLE_LAM_START_OFFSET     0x00
+#define ENABLE_LAM_END_OFFSET       0x08
+#define ENABLE_LAM_INFO_OFFSET      0x13
+
+#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
+#define ENABLE_LAM_INFO_ECC_MASK    0x3
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
+                           CMD_TIME_CLASS_C, status);
+
+       if (err)
+               goto out;
+
+       if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
+               goto out;
+
+       MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
+       MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
+       MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
+
+       if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
+           !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
+                          "is %s hidden; does not match PCI config\n",
+                          (info & ENABLE_LAM_INFO_HIDDEN_FLAG)?
+                          "" : "not"));
+       }
+       if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
+                 (int) ((dev->ddr_end - dev->ddr_start) >> 10),
+                 (unsigned long long) dev->ddr_start,
+                 (unsigned long long) dev->ddr_end));
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+}
+
+int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u8 info;
+       u32 *outbox;
+       int err = 0;
+
+#define QUERY_DDR_OUT_SIZE         0x100
+#define QUERY_DDR_START_OFFSET     0x00
+#define QUERY_DDR_END_OFFSET       0x08
+#define QUERY_DDR_INFO_OFFSET      0x13
+
+#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
+#define QUERY_DDR_INFO_ECC_MASK    0x3
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
+                           CMD_TIME_CLASS_A, status);
+
+       if (err)
+               goto out;
+
+       MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
+       MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
+       MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
+
+       if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
+           !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
+
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
+                          "is %s hidden; does not match PCI config\n",
+                          (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
+                          "" : "not"));
+       }
+       if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
+                 (int) ((dev->ddr_end - dev->ddr_start) >> 10),
+                 (unsigned long long) dev->ddr_start,
+                 (unsigned long long) dev->ddr_end));
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
+                       struct mthca_dev_lim *dev_lim, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u32 *outbox;
+       u8 field;
+       u16 size;
+       int err;
+
+#define QUERY_DEV_LIM_OUT_SIZE             0x100
+#define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
+#define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
+#define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
+#define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
+#define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
+#define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
+#define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
+#define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
+#define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
+#define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
+#define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
+#define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
+#define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
+#define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
+#define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
+#define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
+#define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
+#define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
+#define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
+#define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
+#define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
+#define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
+#define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
+#define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
+#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
+#define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
+#define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
+#define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
+#define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
+#define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
+#define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
+#define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
+#define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
+#define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
+#define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
+#define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
+#define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
+#define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
+#define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
+#define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
+#define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
+#define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
+#define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
+#define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
+#define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
+#define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
+#define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
+#define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
+#define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
+#define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
+#define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
+#define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
+#define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
+#define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
+#define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
+#define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
+#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
+                           CMD_TIME_CLASS_A, status);
+
+       if (err)
+               goto out;
+
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
+       dev_lim->reserved_qps = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
+       dev_lim->max_qps = 1 << (field & 0x1f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
+       dev_lim->reserved_srqs = 1 << (field >> 4);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
+       dev_lim->max_srqs = 1 << (field & 0x1f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
+       dev_lim->reserved_eecs = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
+       dev_lim->max_eecs = 1 << (field & 0x1f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
+       dev_lim->max_cq_sz = 1 << field;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
+       dev_lim->reserved_cqs = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
+       dev_lim->max_cqs = 1 << (field & 0x1f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
+       dev_lim->max_mpts = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
+       dev_lim->reserved_eqs = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
+       dev_lim->max_eqs = 1 << (field & 0x7);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
+       dev_lim->reserved_mtts = 1 << (field >> 4);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
+       dev_lim->max_mrw_sz = 1 << field;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
+       dev_lim->reserved_mrws = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
+       dev_lim->max_mtt_seg = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
+       dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
+       dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
+       dev_lim->max_rdma_global = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
+       dev_lim->local_ca_ack_delay = field & 0x1f;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
+       dev_lim->max_mtu        = field >> 4;
+       dev_lim->max_port_width = field & 0xf;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
+       dev_lim->max_vl    = field >> 4;
+       dev_lim->num_ports = field & 0xf;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
+       dev_lim->max_gids = 1 << (field & 0xf);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
+       dev_lim->max_pkeys = 1 << (field & 0xf);
+       MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
+       dev_lim->reserved_uars = field >> 4;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
+       dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
+       dev_lim->min_page_sz = 1 << field;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
+       dev_lim->max_sg = field;
+
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
+       dev_lim->max_desc_sz = size;
+
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
+       dev_lim->max_qp_per_mcg = 1 << field;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
+       dev_lim->reserved_mgms = field & 0xf;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
+       dev_lim->max_mcgs = 1 << field;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
+       dev_lim->reserved_pds = field >> 4;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
+       dev_lim->max_pds = 1 << (field & 0x3f);
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
+       dev_lim->reserved_rdds = field >> 4;
+       MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
+       dev_lim->max_rdds = 1 << (field & 0x3f);
+
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
+       dev_lim->eec_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
+       dev_lim->qpc_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
+       dev_lim->eeec_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
+       dev_lim->eqpc_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
+       dev_lim->eqc_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
+       dev_lim->cqc_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
+       dev_lim->srq_entry_sz = size;
+       MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
+       dev_lim->uar_scratch_entry_sz = size;
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+                 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+                 dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+                 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+                 dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
+                 dev_lim->reserved_mrws, dev_lim->reserved_mtts));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+                 dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
+                 dev_lim->max_pds, dev_lim->reserved_mgms));
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+                 dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Flags: %08x\n", dev_lim->flags));
+
+       if (mthca_is_memfree(dev)) {
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
+               dev_lim->max_srq_sz = 1 << field;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
+               dev_lim->max_qp_sz = 1 << field;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
+               dev_lim->hca.arbel.resize_srq = field & 1;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
+               dev_lim->max_sg = min(field, dev_lim->max_sg);
+               MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
+               dev_lim->max_desc_sz = min((int)size, dev_lim->max_desc_sz);            
+               MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
+               dev_lim->mpt_entry_sz = size;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
+               dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
+               MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
+                         QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
+               MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
+                         QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
+               dev_lim->hca.arbel.lam_required = field & 1;
+               MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
+                         QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
+
+               if (dev_lim->hca.arbel.bmme_flags & 1){
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Base MM extensions: yes "
+                                 "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
+                                 dev_lim->hca.arbel.bmme_flags,
+                                 dev_lim->hca.arbel.max_pbl_sz,
+                                 dev_lim->hca.arbel.reserved_lkey));
+               }else{
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Base MM extensions: no\n"));
+               }
+
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
+                         (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20));
+       } 
+       else {
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
+               dev_lim->max_srq_sz = (1 << field) - 1;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
+               dev_lim->max_qp_sz = (1 << field) - 1;
+               MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
+               dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
+               dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
+       }
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+static void get_board_id(u8 *vsd, char *board_id)
+{
+       int i;
+
+#define VSD_OFFSET_SIG1                0x00
+#define VSD_OFFSET_SIG2                0xde
+#define VSD_OFFSET_MLX_BOARD_ID        0xd0
+#define VSD_OFFSET_TS_BOARD_ID 0x20
+
+#define VSD_SIGNATURE_TOPSPIN  0x5ad
+
+       RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN);
+
+       if (cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG1)) == VSD_SIGNATURE_TOPSPIN &&
+           cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG2)) == VSD_SIGNATURE_TOPSPIN) {
+               strlcpy(board_id, (const char *)(vsd + VSD_OFFSET_TS_BOARD_ID), MTHCA_BOARD_ID_LEN);
+       } else {
+               /*
+                * The board ID is a string but the firmware byte
+                * swaps each 4-byte word before passing it back to
+                * us.  Therefore we need to swab it before printing.
+                */
+               for (i = 0; i < 4; ++i)
+                       ((u32 *) board_id)[i] =
+                               _byteswap_ulong(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+       }
+}
+
+int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
+                       struct mthca_adapter *adapter, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u32 *outbox;
+       int err;
+
+#define QUERY_ADAPTER_OUT_SIZE             0x100
+#define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
+#define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
+#define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
+#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
+#define QUERY_ADAPTER_VSD_OFFSET           0x20
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
+                           CMD_TIME_CLASS_A, status);
+
+       if (err)
+               goto out;
+
+       MTHCA_GET(adapter->vendor_id, outbox,   QUERY_ADAPTER_VENDOR_ID_OFFSET);
+       MTHCA_GET(adapter->device_id, outbox,   QUERY_ADAPTER_DEVICE_ID_OFFSET);
+       MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
+       MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
+
+       get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET,
+                    adapter->board_id);
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_INIT_HCA(struct mthca_dev *dev,
+                  struct mthca_init_hca_param *param,
+                  u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       __be32 *inbox;
+       int err;
+
+#define INIT_HCA_IN_SIZE                0x200
+#define INIT_HCA_FLAGS_OFFSET           0x014
+#define INIT_HCA_QPC_OFFSET             0x020
+#define  INIT_HCA_QPC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x10)
+#define  INIT_HCA_LOG_QP_OFFSET         (INIT_HCA_QPC_OFFSET + 0x17)
+#define  INIT_HCA_EEC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x20)
+#define  INIT_HCA_LOG_EEC_OFFSET        (INIT_HCA_QPC_OFFSET + 0x27)
+#define  INIT_HCA_SRQC_BASE_OFFSET      (INIT_HCA_QPC_OFFSET + 0x28)
+#define  INIT_HCA_LOG_SRQ_OFFSET        (INIT_HCA_QPC_OFFSET + 0x2f)
+#define  INIT_HCA_CQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x30)
+#define  INIT_HCA_LOG_CQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x37)
+#define  INIT_HCA_EQPC_BASE_OFFSET      (INIT_HCA_QPC_OFFSET + 0x40)
+#define  INIT_HCA_EEEC_BASE_OFFSET      (INIT_HCA_QPC_OFFSET + 0x50)
+#define  INIT_HCA_EQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x60)
+#define  INIT_HCA_LOG_EQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x67)
+#define  INIT_HCA_RDB_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x70)
+#define INIT_HCA_UDAV_OFFSET            0x0b0
+#define  INIT_HCA_UDAV_LKEY_OFFSET      (INIT_HCA_UDAV_OFFSET + 0x0)
+#define  INIT_HCA_UDAV_PD_OFFSET        (INIT_HCA_UDAV_OFFSET + 0x4)
+#define INIT_HCA_MCAST_OFFSET           0x0c0
+#define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
+#define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
+#define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
+#define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_TPT_OFFSET              0x0f0
+#define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
+#define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
+#define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
+#define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
+#define INIT_HCA_UAR_OFFSET              0x120
+#define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
+#define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
+#define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
+#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
+#define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
+#define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       RtlZeroMemory(inbox, INIT_HCA_IN_SIZE);
+
+#if defined(__LITTLE_ENDIAN)
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cl_hton32(1 << 1);
+#elif defined(__BIG_ENDIAN)
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1 << 1);
+#else
+#error Host endianness not defined
+#endif
+       /* Check port for UD address vector: */
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1);
+
+       /* We leave wqe_quota, responder_exu, etc as 0 (default) */
+
+       /* QPC/EEC/CQC/EQC/RDB attributes */
+
+       MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
+       MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
+       MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
+       MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
+       MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
+       MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
+
+       /* UD AV attributes */
+
+       /* multicast attributes */
+
+       MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
+       MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+       MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
+       MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+       /* TPT attributes */
+
+       MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
+       if (!mthca_is_memfree(dev))
+               MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
+       MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
+
+       /* UAR attributes */
+       {
+               u8 uar_page_sz = PAGE_SHIFT - 12;
+               MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       }
+
+       MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
+
+       if (mthca_is_memfree(dev)) {
+               MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
+               MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
+               MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
+       }
+
+       err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
+
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_INIT_IB(struct mthca_dev *dev,
+                 struct mthca_init_ib_param *param,
+                 int port, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u32 *inbox;
+       int err;
+       u32 flags;
+
+#define INIT_IB_IN_SIZE                                        56
+#define INIT_IB_FLAGS_OFFSET                   0x00
+#define INIT_IB_FLAG_SIG                                       (1 << 18)
+#define INIT_IB_FLAG_NG                                        (1 << 17)
+#define INIT_IB_FLAG_G0                                        (1 << 16)
+#define INIT_IB_VL_SHIFT                                       4
+#define INIT_IB_PORT_WIDTH_SHIFT       8
+#define INIT_IB_MTU_SHIFT                              12
+#define INIT_IB_MAX_GID_OFFSET                 0x06
+#define INIT_IB_MAX_PKEY_OFFSET        0x0a
+#define INIT_IB_GUID0_OFFSET                   0x10
+#define INIT_IB_NODE_GUID_OFFSET       0x18
+#define INIT_IB_SI_GUID_OFFSET                 0x20
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       RtlZeroMemory(inbox, INIT_IB_IN_SIZE);
+
+       flags = 0;
+       flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
+       flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
+       flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
+       flags |= param->vl_cap << INIT_IB_VL_SHIFT;
+       flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
+       flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
+       MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
+
+       MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
+       MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
+       MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
+       MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
+       MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
+
+       err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
+                       CMD_TIME_CLASS_A, status);
+
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
+{
+       return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
+}
+
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status);
+}
+
+int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
+                int port, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       u32 *inbox;
+       int err;
+       u32 flags = 0;
+
+#define SET_IB_IN_SIZE         0x40
+#define SET_IB_FLAGS_OFFSET    0x00
+#define SET_IB_FLAG_SIG        (1 << 18)
+#define SET_IB_FLAG_RQK        (1 <<  0)
+#define SET_IB_CAP_MASK_OFFSET 0x04
+#define SET_IB_SI_GUID_OFFSET  0x08
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       RtlZeroMemory(inbox, SET_IB_IN_SIZE);
+
+       flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
+       flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
+       MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
+
+       MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
+       MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
+
+       err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
+                       CMD_TIME_CLASS_B, status);
+
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
+{
+       return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
+}
+
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
+{
+       struct mthca_mailbox *mailbox;
+       __be64 *inbox;
+       int err;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       inbox[0] = cl_hton64(virt);
+       inbox[1] = cl_hton64(dma_addr);
+
+       err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
+                       CMD_TIME_CLASS_B, status);
+
+       mthca_free_mailbox(dev, mailbox);
+
+       if (!err)
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
+                         (unsigned long long) dma_addr, (unsigned long long) virt));
+
+       return err;
+}
+
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
+{
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
+                 page_count, (unsigned long long) virt));
+
+       return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
+}
+
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+{
+       return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, (u64)-1, status);
+}
+
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
+}
+
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
+                      u8 *status)
+{
+       int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
+                               CMD_TIME_CLASS_A, status);
+
+       if (ret || status)
+               return ret;
+
+       /*
+        * Arbel page size is always 4 KB; round up number of system
+        * pages needed.
+        */
+       *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
+       *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
+
+       return 0;
+}
+
+int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int mpt_index, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
+                        CMD_TIME_CLASS_B, status);
+}
+
+int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int mpt_index, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
+                            (u8)!mailbox, CMD_HW2SW_MPT,
+                            CMD_TIME_CLASS_B, status);
+}
+
+int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int num_mtt, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
+                        CMD_TIME_CLASS_B, status);
+}
+
+int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
+}
+
+int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
+                int eq_num, u8 *status)
+{
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
+                 unmap ? "Clearing" : "Setting",
+                 (unsigned long long) event_mask, eq_num));
+       return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
+                        0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
+}
+
+int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int eq_num, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
+                        CMD_TIME_CLASS_A, status);
+}
+
+int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int eq_num, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
+                            CMD_HW2SW_EQ,
+                            CMD_TIME_CLASS_A, status);
+}
+
+int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int cq_num, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
+                       CMD_TIME_CLASS_A, status);
+}
+
+int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int cq_num, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
+                            CMD_HW2SW_CQ,
+                            CMD_TIME_CLASS_A, status);
+}
+
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int srq_num, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
+                       CMD_TIME_CLASS_A, status);
+}
+
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int srq_num, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
+                            CMD_HW2SW_SRQ,
+                            CMD_TIME_CLASS_A, status);
+}
+
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
+{
+       return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
+                        CMD_TIME_CLASS_B, status);
+}
+
+int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
+                   int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
+                   u8 *status)
+{
+       enum {
+               MTHCA_TRANS_INVALID = 0,
+               MTHCA_TRANS_RST2INIT,
+               MTHCA_TRANS_INIT2INIT,
+               MTHCA_TRANS_INIT2RTR,
+               MTHCA_TRANS_RTR2RTS,
+               MTHCA_TRANS_RTS2RTS,
+               MTHCA_TRANS_SQERR2RTS,
+               MTHCA_TRANS_ANY2ERR,
+               MTHCA_TRANS_RTS2SQD,
+               MTHCA_TRANS_SQD2SQD,
+               MTHCA_TRANS_SQD2RTS,
+               MTHCA_TRANS_ANY2RST,
+       };
+       static const u16 op[] = {
+               0,                                                                                      /* MTHCA_TRANS_INVALID */
+               CMD_RST2INIT_QPEE,              /* MTHCA_TRANS_RST2INIT */
+               CMD_INIT2INIT_QPEE,             /* MTHCA_TRANS_INIT2INIT */
+               CMD_INIT2RTR_QPEE,              /* MTHCA_TRANS_INIT2RTR */
+               CMD_RTR2RTS_QPEE,               /* MTHCA_TRANS_RTR2RTS */
+               CMD_RTS2RTS_QPEE,               /* MTHCA_TRANS_RTS2RTS */
+               CMD_SQERR2RTS_QPEE,     /* MTHCA_TRANS_SQERR2RTS */
+               CMD_2ERR_QPEE,                          /* MTHCA_TRANS_ANY2ERR */
+               CMD_RTS2SQD_QPEE,               /* MTHCA_TRANS_RTS2SQD */
+               CMD_SQD2SQD_QPEE,               /* MTHCA_TRANS_SQD2SQD */
+               CMD_SQD2RTS_QPEE,               /* MTHCA_TRANS_SQD2RTS */
+               CMD_ERR2RST_QPEE                        /* MTHCA_TRANS_ANY2RST */
+       };
+       u8 op_mod = 0;
+       int my_mailbox = 0;
+       int err;
+
+       UNREFERENCED_PARAMETER(optmask);
+
+       if (trans < 0 || trans >= ARRAY_SIZE(op))
+               return -EINVAL;
+
+       if (trans == MTHCA_TRANS_ANY2RST) {
+               op_mod = 3;     /* don't write outbox, any->reset */
+
+               /* For debugging */
+               if (!mailbox) {
+                       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+                       if (!IS_ERR(mailbox)) {
+                               my_mailbox = 1;
+                               op_mod     = 2; /* write outbox, any->reset */
+                       } else
+                               mailbox = NULL;
+               }
+       } else {
+       #if 0
+               {
+                       int i;
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Dumping QP context:\n"));
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("  opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
+                       for (i = 0; i < 0x100 / 4; ++i) {
+                               if (i % 8 == 0)
+                                       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("  [%02x] ", i * 4));
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,(" %08x",
+                                      cl_ntoh32(((__be32 *) mailbox->buf)[i + 2])));
+                               if ((i + 1) % 8 == 0)
+                                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("\n"));
+                       }
+               }
+       #endif
+       }
+
+       if (trans == MTHCA_TRANS_ANY2RST) {
+               err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
+                                   (!!is_ee << 24) | num, op_mod,
+                                   op[trans], CMD_TIME_CLASS_C, status);
+
+       #if 0
+               if (mailbox) {
+                       int i;
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Dumping QP context:\n"));
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,(" %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
+                       for (i = 0; i < 0x100 / 4; ++i) {
+                               if (i % 8 == 0)
+                                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("[%02x] ", i * 4));
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,(" %08x",
+                                      cl_ntoh32(((__be32 *) mailbox->buf)[i + 2])));
+                               if ((i + 1) % 8 == 0)
+                                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("\n"));
+                       }
+               }
+       #endif
+       } else
+               err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
+                               op_mod, op[trans], CMD_TIME_CLASS_C, status);
+
+       if (my_mailbox)
+               mthca_free_mailbox(dev, mailbox);
+
+       return err;
+}
+
+int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
+                  struct mthca_mailbox *mailbox, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
+                            CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
+}
+
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
+                         u8 *status)
+{
+       u8 op_mod;
+
+       switch (type) {
+       case IB_QPT_QP0:
+               op_mod = 0;
+               break;
+       case IB_QPT_QP1:
+               op_mod = 1;
+               break;
+       case IB_QPT_RAW_IPV6:
+               op_mod = 2;
+               break;
+       case IB_QPT_RAW_ETHER:
+               op_mod = 3;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
+                        CMD_TIME_CLASS_B, status);
+}
+
+int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+                 int port, struct _ib_wc *in_wc, struct ib_grh *in_grh,
+                 void *in_mad, void *response_mad, u8 *status)
+{
+       struct mthca_mailbox *inmailbox, *outmailbox;
+       u8 *inbox;
+       int err;
+       u32 in_modifier = port;
+       u8 op_modifier = 0;
+
+#define MAD_IFC_BOX_SIZE      0x400
+#define MAD_IFC_MY_QPN_OFFSET 0x100
+#define MAD_IFC_RQPN_OFFSET   0x104
+#define MAD_IFC_SL_OFFSET     0x108
+#define MAD_IFC_G_PATH_OFFSET 0x109
+#define MAD_IFC_RLID_OFFSET   0x10a
+#define MAD_IFC_PKEY_OFFSET   0x10e
+#define MAD_IFC_GRH_OFFSET    0x140
+
+       inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(inmailbox))
+               return PTR_ERR(inmailbox);
+       inbox = inmailbox->buf;
+
+       outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(outmailbox)) {
+               mthca_free_mailbox(dev, inmailbox);
+               return PTR_ERR(outmailbox);
+       }
+
+       memcpy(inbox, in_mad, 256);
+
+       /*
+        * Key check traps can't be generated unless we have in_wc to
+        * tell us where to send the trap.
+        */
+       if (ignore_mkey || !in_wc)
+               op_modifier |= 0x1;
+       if (ignore_bkey || !in_wc)
+               op_modifier |= 0x2;
+
+       if (in_wc) {
+               u8 val;
+
+               RtlZeroMemory(inbox + 256, 256);
+
+               MTHCA_PUT(inbox, in_wc->qp_num,     MAD_IFC_MY_QPN_OFFSET);
+               MTHCA_PUT(inbox, in_wc->recv.ud.remote_qp,     MAD_IFC_RQPN_OFFSET);
+
+               val = in_wc->recv.ud.remote_sl << 4;
+               MTHCA_PUT(inbox, val,               MAD_IFC_SL_OFFSET);
+
+               val = in_wc->recv.ud.path_bits |
+                       (in_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ? 0x80 : 0);
+               MTHCA_PUT(inbox, val,               MAD_IFC_GRH_OFFSET);
+
+               MTHCA_PUT(inbox, in_wc->recv.ud.remote_lid,       MAD_IFC_RLID_OFFSET);
+               MTHCA_PUT(inbox, in_wc->recv.ud.pkey_index, MAD_IFC_PKEY_OFFSET);
+
+               if (in_grh)
+                       memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
+
+               op_modifier |= 0x10;
+
+               in_modifier |= in_wc->recv.ud.remote_lid << 16;
+       }
+
+       err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
+                           in_modifier, op_modifier,
+                           CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
+
+       if (!err && !*status)
+               memcpy(response_mad, outmailbox->buf, 256);
+
+       mthca_free_mailbox(dev, inmailbox);
+       mthca_free_mailbox(dev, outmailbox);
+       return err;
+}
+
+int mthca_READ_MGM(struct mthca_dev *dev, int index,
+                  struct mthca_mailbox *mailbox, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
+                            CMD_READ_MGM, CMD_TIME_CLASS_A, status);
+}
+
+int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+                   struct mthca_mailbox *mailbox, u8 *status)
+{
+       return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
+                        CMD_TIME_CLASS_A, status);
+}
+
+int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   u16 *hash, u8 *status)
+{
+       u64 imm;
+       int err;
+
+       err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
+                           CMD_TIME_CLASS_A, status);
+
+       *hash = (u16)imm;
+       return err;
+}
+
+int mthca_NOP(struct mthca_dev *dev, u8 *status)
+{
+       return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status);     /* 100 msecs */
+}
diff --git a/trunk/hw/mthca/kernel/mthca_cmd.h b/trunk/hw/mthca/kernel/mthca_cmd.h
new file mode 100644 (file)
index 0000000..8f8d811
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_cmd.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_CMD_H
+#define MTHCA_CMD_H
+
+#include <ib_verbs.h>
+
+#define MTHCA_MAILBOX_SIZE 4096
+
+enum {
+       /* command completed successfully: */
+       MTHCA_CMD_STAT_OK             = 0x00,
+       /* Internal error (such as a bus error) occurred while processing command: */
+       MTHCA_CMD_STAT_INTERNAL_ERR   = 0x01,
+       /* Operation/command not supported or opcode modifier not supported: */
+       MTHCA_CMD_STAT_BAD_OP         = 0x02,
+       /* Parameter not supported or parameter out of range: */
+       MTHCA_CMD_STAT_BAD_PARAM      = 0x03,
+       /* System not enabled or bad system state: */
+       MTHCA_CMD_STAT_BAD_SYS_STATE  = 0x04,
+       /* Attempt to access reserved or unallocaterd resource: */
+       MTHCA_CMD_STAT_BAD_RESOURCE   = 0x05,
+       /* Requested resource is currently executing a command, or is otherwise busy: */
+       MTHCA_CMD_STAT_RESOURCE_BUSY  = 0x06,
+       /* memory error: */
+       MTHCA_CMD_STAT_DDR_MEM_ERR    = 0x07,
+       /* Required capability exceeds device limits: */
+       MTHCA_CMD_STAT_EXCEED_LIM     = 0x08,
+       /* Resource is not in the appropriate state or ownership: */
+       MTHCA_CMD_STAT_BAD_RES_STATE  = 0x09,
+       /* Index out of range: */
+       MTHCA_CMD_STAT_BAD_INDEX      = 0x0a,
+       /* FW image corrupted: */
+       MTHCA_CMD_STAT_BAD_NVMEM      = 0x0b,
+       /* Attempt to modify a QP/EE which is not in the presumed state: */
+       MTHCA_CMD_STAT_BAD_QPEE_STATE = 0x10,
+       /* Bad segment parameters (Address/Size): */
+       MTHCA_CMD_STAT_BAD_SEG_PARAM  = 0x20,
+       /* Memory Region has Memory Windows bound to: */
+       MTHCA_CMD_STAT_REG_BOUND      = 0x21,
+       /* HCA local attached memory not present: */
+       MTHCA_CMD_STAT_LAM_NOT_PRE    = 0x22,
+        /* Bad management packet (silently discarded): */
+       MTHCA_CMD_STAT_BAD_PKT        = 0x30,
+        /* More outstanding CQEs in CQ than new CQ size: */
+       MTHCA_CMD_STAT_BAD_SIZE       = 0x40
+};
+
+enum {
+       MTHCA_TRANS_INVALID = 0,
+       MTHCA_TRANS_RST2INIT,
+       MTHCA_TRANS_INIT2INIT,
+       MTHCA_TRANS_INIT2RTR,
+       MTHCA_TRANS_RTR2RTS,
+       MTHCA_TRANS_RTS2RTS,
+       MTHCA_TRANS_SQERR2RTS,
+       MTHCA_TRANS_ANY2ERR,
+       MTHCA_TRANS_RTS2SQD,
+       MTHCA_TRANS_SQD2SQD,
+       MTHCA_TRANS_SQD2RTS,
+       MTHCA_TRANS_ANY2RST,
+};
+
+enum {
+       DEV_LIM_FLAG_RC                 = 1 << 0,
+       DEV_LIM_FLAG_UC                 = 1 << 1,
+       DEV_LIM_FLAG_UD                 = 1 << 2,
+       DEV_LIM_FLAG_RD                 = 1 << 3,
+       DEV_LIM_FLAG_RAW_IPV6           = 1 << 4,
+       DEV_LIM_FLAG_RAW_ETHER          = 1 << 5,
+       DEV_LIM_FLAG_SRQ                = 1 << 6,
+       DEV_LIM_FLAG_BAD_PKEY_CNTR      = 1 << 8,
+       DEV_LIM_FLAG_BAD_QKEY_CNTR      = 1 << 9,
+       DEV_LIM_FLAG_MW                 = 1 << 16,
+       DEV_LIM_FLAG_AUTO_PATH_MIG      = 1 << 17,
+       DEV_LIM_FLAG_ATOMIC             = 1 << 18,
+       DEV_LIM_FLAG_RAW_MULTI          = 1 << 19,
+       DEV_LIM_FLAG_UD_AV_PORT_ENFORCE = 1 << 20,
+       DEV_LIM_FLAG_UD_MULTI           = 1 << 21,
+};
+
+struct mthca_mailbox {
+       dma_addr_t dma;
+       void      *buf;
+};
+
+struct mthca_dev_lim {
+       int max_srq_sz;
+       int max_qp_sz;
+       int reserved_qps;
+       int max_qps;
+       int reserved_srqs;
+       int max_srqs;
+       int reserved_eecs;
+       int max_eecs;
+       int max_cq_sz;
+       int reserved_cqs;
+       int max_cqs;
+       int max_mpts;
+       int reserved_eqs;
+       int max_eqs;
+       int reserved_mtts;
+       int max_mrw_sz;
+       int reserved_mrws;
+       int max_mtt_seg;
+       int max_requester_per_qp;
+       int max_responder_per_qp;
+       int max_rdma_global;
+       int local_ca_ack_delay;
+       int max_mtu;
+       int max_port_width;
+       int max_vl;
+       int num_ports;
+       int max_gids;
+       int max_pkeys;
+       u32 flags;
+       int reserved_uars;
+       int uar_size;
+       int min_page_sz;
+       int max_sg;
+       int max_desc_sz;
+       int max_qp_per_mcg;
+       int reserved_mgms;
+       int max_mcgs;
+       int reserved_pds;
+       int max_pds;
+       int reserved_rdds;
+       int max_rdds;
+       int eec_entry_sz;
+       int qpc_entry_sz;
+       int eeec_entry_sz;
+       int eqpc_entry_sz;
+       int eqc_entry_sz;
+       int cqc_entry_sz;
+       int srq_entry_sz;
+       int uar_scratch_entry_sz;
+       int mpt_entry_sz;
+       union {
+               struct {
+                       int max_avs;
+               } tavor;
+               struct {
+                       int resize_srq;
+                       int max_pbl_sz;
+                       u8  bmme_flags;
+                       u32 reserved_lkey;
+                       int lam_required;
+                       u64 max_icm_sz;
+               } arbel;
+       } hca;
+};
+
+struct mthca_adapter {
+       u32  vendor_id;
+       u32  device_id;
+       u32  revision_id;
+       char board_id[MTHCA_BOARD_ID_LEN];
+       u8   inta_pin;
+};
+
+struct mthca_init_hca_param {
+       u64 qpc_base;
+       u64 eec_base;
+       u64 srqc_base;
+       u64 cqc_base;
+       u64 eqpc_base;
+       u64 eeec_base;
+       u64 eqc_base;
+       u64 rdb_base;
+       u64 mc_base;
+       u64 mpt_base;
+       u64 mtt_base;
+       u64 uar_scratch_base;
+       u64 uarc_base;
+       u16 log_mc_entry_sz;
+       u16 mc_hash_sz;
+       u8  log_num_qps;
+       u8  log_num_eecs;
+       u8  log_num_srqs;
+       u8  log_num_cqs;
+       u8  log_num_eqs;
+       u8  log_mc_table_sz;
+       u8  mtt_seg_sz;
+       u8  log_mpt_sz;
+       u8  log_uar_sz;
+       u8  log_uarc_sz;
+};
+
+struct mthca_init_ib_param {
+       int port_width;
+       int vl_cap;
+       int mtu_cap;
+       u16 gid_cap;
+       u16 pkey_cap;
+       int set_guid0;
+       u64 guid0;
+       int set_node_guid;
+       u64 node_guid;
+       int set_si_guid;
+       u64 si_guid;
+};
+
+struct mthca_set_ib_param {
+       int set_si_guid;
+       int reset_qkey_viol;
+       u64 si_guid;
+       u32 cap_mask;
+};
+
+int mthca_cmd_init(struct mthca_dev *dev);
+void mthca_cmd_cleanup(struct mthca_dev *dev);
+int mthca_cmd_use_events(struct mthca_dev *dev);
+void mthca_cmd_use_polling(struct mthca_dev *dev);
+void mthca_cmd_event(struct mthca_dev *dev, u16 token,
+                    u8  status, u64 out_param);
+
+struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
+                                         unsigned int gfp_mask);
+void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
+
+int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
+int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
+int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status);
+int mthca_RUN_FW(struct mthca_dev *dev, u8 *status);
+int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status);
+int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status);
+int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status);
+int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status);
+int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
+                       struct mthca_dev_lim *dev_lim, u8 *status);
+int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
+                       struct mthca_adapter *adapter, u8 *status);
+int mthca_INIT_HCA(struct mthca_dev *dev,
+                  struct mthca_init_hca_param *param,
+                  u8 *status);
+int mthca_INIT_IB(struct mthca_dev *dev,
+                 struct mthca_init_ib_param *param,
+                 int port, u8 *status);
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status);
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status);
+int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
+                int port, u8 *status);
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status);
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status);
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status);
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
+                      u8 *status);
+int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int mpt_index, u8 *status);
+int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int mpt_index, u8 *status);
+int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int num_mtt, u8 *status);
+int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
+int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
+                int eq_num, u8 *status);
+int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int eq_num, u8 *status);
+int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int eq_num, u8 *status);
+int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int cq_num, u8 *status);
+int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int cq_num, u8 *status);
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int srq_num, u8 *status);
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int srq_num, u8 *status);
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
+int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
+                   int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
+                   u8 *status);
+int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
+                  struct mthca_mailbox *mailbox, u8 *status);
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
+                         u8 *status);
+int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+                 int port, struct _ib_wc *in_wc, struct ib_grh *in_grh,
+                 void *in_mad, void *response_mad, u8 *status);
+int mthca_READ_MGM(struct mthca_dev *dev, int index,
+                  struct mthca_mailbox *mailbox, u8 *status);
+int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+                   struct mthca_mailbox *mailbox, u8 *status);
+int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   u16 *hash, u8 *status);
+int mthca_NOP(struct mthca_dev *dev, u8 *status);
+
+#endif /* MTHCA_CMD_H */
diff --git a/trunk/hw/mthca/kernel/mthca_config_reg.h b/trunk/hw/mthca/kernel/mthca_config_reg.h
new file mode 100644 (file)
index 0000000..d12084a
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_config_reg.h 2803 2005-07-05 15:58:55Z roland $
+ */
+
+#ifndef MTHCA_CONFIG_REG_H
+#define MTHCA_CONFIG_REG_H
+
+#define MTHCA_HCR_BASE         0x80680
+#define MTHCA_HCR_SIZE         0x0001c
+#define MTHCA_ECR_BASE         0x80700
+#define MTHCA_ECR_SIZE         0x00008
+#define MTHCA_ECR_CLR_BASE     0x80708
+#define MTHCA_ECR_CLR_SIZE     0x00008
+#define MTHCA_MAP_ECR_SIZE     (MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE)
+#define MTHCA_CLR_INT_BASE     0xf00d8
+#define MTHCA_CLR_INT_SIZE     0x00008
+#define MTHCA_EQ_SET_CI_SIZE   (8 * 32)
+
+#endif /* MTHCA_CONFIG_REG_H */
diff --git a/trunk/hw/mthca/kernel/mthca_cq.c b/trunk/hw/mthca/kernel/mthca_cq.c
new file mode 100644 (file)
index 0000000..6f01ac1
--- /dev/null
@@ -0,0 +1,983 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_cq.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include <ib_pack.h>
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_cq.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_cq_table)
+#pragma alloc_text (PAGE, mthca_cleanup_cq_table)
+#endif
+
+enum {
+       MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
+};
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+#pragma pack(push,1)
+struct mthca_cq_context {
+       __be32 flags;
+       __be64 start;
+       __be32 logsize_usrpage;
+       __be32 error_eqn;       /* Tavor only */
+       __be32 comp_eqn;
+       __be32 pd;
+       __be32 lkey;
+       __be32 last_notified_index;
+       __be32 solicit_producer_index;
+       __be32 consumer_index;
+       __be32 producer_index;
+       __be32 cqn;
+       __be32 ci_db;           /* Arbel only */
+       __be32 state_db;        /* Arbel only */
+       u32    reserved;
+};
+#pragma pack(pop)
+
+#define MTHCA_CQ_STATUS_OK          ( 0 << 28)
+#define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)
+#define MTHCA_CQ_STATUS_WRITE_FAIL  (10 << 28)
+#define MTHCA_CQ_FLAG_TR            ( 1 << 18)
+#define MTHCA_CQ_FLAG_OI            ( 1 << 17)
+#define MTHCA_CQ_STATE_DISARMED     ( 0 <<  8)
+#define MTHCA_CQ_STATE_ARMED        ( 1 <<  8)
+#define MTHCA_CQ_STATE_ARMED_SOL    ( 4 <<  8)
+#define MTHCA_EQ_STATE_FIRED        (10 <<  8)
+
+enum {
+       MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
+};
+
+enum {
+       SYNDROME_LOCAL_LENGTH_ERR        = 0x01,
+       SYNDROME_LOCAL_QP_OP_ERR         = 0x02,
+       SYNDROME_LOCAL_EEC_OP_ERR        = 0x03,
+       SYNDROME_LOCAL_PROT_ERR          = 0x04,
+       SYNDROME_WR_FLUSH_ERR            = 0x05,
+       SYNDROME_MW_BIND_ERR             = 0x06,
+       SYNDROME_BAD_RESP_ERR            = 0x10,
+       SYNDROME_LOCAL_ACCESS_ERR        = 0x11,
+       SYNDROME_REMOTE_INVAL_REQ_ERR    = 0x12,
+       SYNDROME_REMOTE_ACCESS_ERR       = 0x13,
+       SYNDROME_REMOTE_OP_ERR           = 0x14,
+       SYNDROME_RETRY_EXC_ERR           = 0x15,
+       SYNDROME_RNR_RETRY_EXC_ERR       = 0x16,
+       SYNDROME_LOCAL_RDD_VIOL_ERR      = 0x20,
+       SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
+       SYNDROME_REMOTE_ABORTED_ERR      = 0x22,
+       SYNDROME_INVAL_EECN_ERR          = 0x23,
+       SYNDROME_INVAL_EEC_STATE_ERR     = 0x24
+};
+
+struct mthca_cqe {
+       __be32 my_qpn;
+       __be32 my_ee;
+       __be32 rqpn;
+       __be16 sl_g_mlpath;
+       __be16 rlid;
+       __be32 imm_etype_pkey_eec;
+       __be32 byte_cnt;
+       __be32 wqe;
+       u8     opcode;
+       u8     is_send;
+       u8     reserved;
+       u8     owner;
+};
+
+struct mthca_err_cqe {
+       __be32 my_qpn;
+       u32    reserved1[3];
+       u8     syndrome;
+       u8     vendor_err;
+       __be16 db_cnt;
+       u32    reserved2;
+       __be32 wqe;
+       u8     opcode;
+       u8     reserved3[2];
+       u8     owner;
+};
+
+#define MTHCA_CQ_ENTRY_OWNER_SW      (0 << 7)
+#define MTHCA_CQ_ENTRY_OWNER_HW      (1 << 7)
+
+#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)
+#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
+
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
+
+static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
+{
+       if (cq->is_direct)
+               return (struct mthca_cqe *)((u8*)cq->queue.direct.page + (entry * MTHCA_CQ_ENTRY_SIZE));
+       else
+               return (struct mthca_cqe *)((u8*)cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].page
+                       + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE);
+}
+
+static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
+{
+       struct mthca_cqe *cqe = get_cqe(cq, i);
+       return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
+}
+
+static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
+{
+       return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);
+}
+
+static inline void set_cqe_hw(struct mthca_cqe *cqe)
+{
+       cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
+}
+
+static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
+{
+       __be32 *cqe = cqe_ptr;
+       UNREFERENCED_PARAMETER(dev);
+
+       (void) cqe;     /* avoid warning if mthca_dbg compiled away... */
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("CQE contents \n"));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x\n",0,
+               cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3])));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x \n",16,
+               cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7])));
+}
+
+/*
+ * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
+ * should be correct before calling update_cons_index().
+ */
+static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
+                                    int incr)
+{
+       __be32 doorbell[2];
+
+       if (mthca_is_memfree(dev)) {
+               *cq->set_ci_db = cl_hton32(cq->cons_index);
+               wmb();
+       } else {
+               doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
+               doorbell[1] = cl_hton32(incr - 1);
+
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_CQ_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+}
+
+void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
+{
+       struct mthca_cq *cq;
+
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
+
+       if (!cq) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Completion event for bogus CQ %08x\n", cqn));
+               return;
+       }
+
+       ++cq->arm_sn;
+
+       cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
+                   enum ib_event_type event_type)
+{
+       struct mthca_cq *cq;
+       struct ib_event event;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock(&dev->cq_table.lock, &lh);
+
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
+
+       if (cq)
+               atomic_inc(&cq->refcount);
+       spin_unlock(&lh);
+
+       if (!cq) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Async event for bogus CQ %08x\n", cqn));
+               return;
+       }
+
+       event.device      = &dev->ib_dev;
+       event.event       = event_type;
+       event.element.cq  = &cq->ibcq;
+       if (cq->ibcq.event_handler)
+               cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
+
+       if (atomic_dec_and_test(&cq->refcount))
+               wake_up(&cq->wait);
+}
+
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+                   struct mthca_srq *srq)
+{
+       struct mthca_cq *cq;
+       struct mthca_cqe *cqe;
+       u32 prod_index;
+       int nfreed = 0;
+       SPIN_LOCK_PREP(lht);
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irq(&dev->cq_table.lock, &lht);
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
+       if (cq)
+               atomic_inc(&cq->refcount);
+       spin_unlock_irq(&lht);
+
+       if (!cq)
+               return;
+
+       spin_lock_irq(&cq->lock, &lh);
+
+       /*
+        * First we need to find the current producer index, so we
+        * know where to start cleaning from.  It doesn't matter if HW
+        * adds new entries after this loop -- the QP we're worried
+        * about is already in RESET, so the new entries won't come
+        * from our QP and therefore don't need to be checked.
+        */
+       for (prod_index = cq->cons_index;
+            cqe_sw(cq, prod_index & cq->ibcq.cqe);
+            ++prod_index)
+               if (prod_index == cq->cons_index + cq->ibcq.cqe)
+                       break;
+
+       #if 0
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
+                         qpn, cqn, cq->cons_index, prod_index));
+       #endif
+
+       /*
+        * Now sweep backwards through the CQ, removing CQ entries
+        * that match our QP by copying older entries on top of them.
+        */
+       while ((int) --prod_index - (int) cq->cons_index >= 0) {
+               cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+               if (cqe->my_qpn == cl_hton32(qpn)) {
+                       if (srq)
+                               mthca_free_srq_wqe(srq, cl_ntoh32(cqe->wqe));
+                       ++nfreed;
+               } 
+               else 
+               if (nfreed) {
+                       memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
+                               cqe, MTHCA_CQ_ENTRY_SIZE);
+               }
+       }
+
+       if (nfreed) {
+               wmb();
+               cq->cons_index += nfreed;
+               update_cons_index(dev, cq, nfreed);
+       }
+
+       spin_unlock_irq(&lh);
+       if (atomic_dec_and_test(&cq->refcount))
+               wake_up(&cq->wait);
+}
+
+static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
+                           struct mthca_qp *qp, int wqe_index, int is_send,
+                           struct mthca_err_cqe *cqe,
+                           struct _ib_wc *entry, int *free_cqe)
+{
+       int err;
+       int dbd;
+       __be32 new_wqe;
+
+       UNREFERENCED_PARAMETER(cq);
+       
+       if (cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Completion with errro "
+                         "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
+                         cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe),
+                         cq->cqn, cq->cons_index));
+               dump_cqe(dev, cqe);
+       }
+
+
+       /*
+        * For completions in error, only work request ID, status, vendor error
+        * (and freed resource count for RD) have to be set.
+        */
+       switch (cqe->syndrome) {
+       case SYNDROME_LOCAL_LENGTH_ERR:
+               entry->status = IB_WCS_LOCAL_LEN_ERR;
+               break;
+       case SYNDROME_LOCAL_QP_OP_ERR:
+               entry->status = IB_WCS_LOCAL_OP_ERR;
+               break;
+       case SYNDROME_LOCAL_EEC_OP_ERR:
+               entry->status = IB_WCS_LOCAL_EEC_OP_ERR;
+               break;
+       case SYNDROME_LOCAL_PROT_ERR:
+               entry->status = IB_WCS_LOCAL_PROTECTION_ERR;
+               break;
+       case SYNDROME_WR_FLUSH_ERR:
+               entry->status = IB_WCS_WR_FLUSHED_ERR;
+               break;
+       case SYNDROME_MW_BIND_ERR:
+               entry->status = IB_WCS_MEM_WINDOW_BIND_ERR;
+               break;
+       case SYNDROME_BAD_RESP_ERR:
+               entry->status = IB_WCS_BAD_RESP_ERR;
+               break;
+       case SYNDROME_LOCAL_ACCESS_ERR:
+               entry->status = IB_WCS_LOCAL_ACCESS_ERR;
+               break;
+       case SYNDROME_REMOTE_INVAL_REQ_ERR:
+               entry->status = IB_WCS_REM_INV_REQ_ERR;
+               break;
+       case SYNDROME_REMOTE_ACCESS_ERR:
+               entry->status = IB_WCS_REM_ACCESS_ERR;
+               break;
+       case SYNDROME_REMOTE_OP_ERR:
+               entry->status = IB_WCS_REM_OP_ERR;
+               break;
+       case SYNDROME_RETRY_EXC_ERR:
+               entry->status = IB_WCS_TIMEOUT_RETRY_ERR;
+               break;
+       case SYNDROME_RNR_RETRY_EXC_ERR:
+               entry->status = IB_WCS_RNR_RETRY_ERR;
+               break;
+       case SYNDROME_LOCAL_RDD_VIOL_ERR:
+               entry->status = IB_WCS_LOCAL_RDD_VIOL_ERR;
+               break;
+       case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
+               entry->status = IB_WCS_REM_INVALID_REQ_ERR;
+               break;
+       case SYNDROME_REMOTE_ABORTED_ERR:
+               entry->status = IB_WCS_REM_ABORT_ERR;
+               break;
+       case SYNDROME_INVAL_EECN_ERR:
+               entry->status = IB_WCS_INV_EECN_ERR;
+               break;
+       case SYNDROME_INVAL_EEC_STATE_ERR:
+               entry->status = IB_WCS_INV_EEC_STATE_ERR;
+               break;
+       default:
+               entry->status = IB_WCS_GENERAL_ERR;
+               break;
+       }
+
+       entry->vendor_specific = cqe->vendor_err;
+       
+       /*
+        * Mem-free HCAs always generate one CQE per WQE, even in the
+        * error case, so we don't have to check the doorbell count, etc.
+        */
+       if (mthca_is_memfree(dev))
+               return 0;
+
+       err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
+       if (err)
+               return err;
+
+       /*
+        * If we're at the end of the WQE chain, or we've used up our
+        * doorbell count, free the CQE.  Otherwise just update it for
+        * the next poll operation.
+        */
+       if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))
+               return 0;
+
+       cqe->db_cnt   = cl_hton16(cl_ntoh16((u16)(cqe->db_cnt - dbd)));
+       cqe->wqe      = new_wqe;
+       cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
+
+       *free_cqe = 0;
+
+       return 0;
+}
+
+static inline int mthca_poll_one(struct mthca_dev *dev,
+                                struct mthca_cq *cq,
+                                struct mthca_qp **cur_qp,
+                                int *freed,
+                                struct _ib_wc *entry)
+{
+       struct mthca_wq *wq;
+       struct mthca_cqe *cqe;
+       unsigned  wqe_index;
+       int is_error;
+       int is_send;
+       int free_cqe = 1;
+       int err = 0;
+
+       HCA_ENTER(HCA_DBG_CQ);
+       cqe = next_cqe_sw(cq);
+       if (!cqe)
+               return -EAGAIN;
+
+       /*
+        * Make sure we read CQ entry contents after we've checked the
+        * ownership bit.
+        */
+       rmb();
+
+       #if 0
+       {
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_CQ,("%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
+                         cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn),
+                         cl_ntoh32(cqe->wqe)));
+               dump_cqe(dev, cqe);
+       }
+       #endif
+
+       is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
+               MTHCA_ERROR_CQE_OPCODE_MASK;
+       is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
+
+       if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->qpn) {
+               /*
+                * We do not have to take the QP table lock here,
+                * because CQs will be locked while QPs are removed
+                * from the table.
+                */
+               *cur_qp = mthca_array_get(&dev->qp_table.qp,
+                                         cl_ntoh32(cqe->my_qpn) &
+                                         (dev->limits.num_qps - 1));
+               if (!*cur_qp) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_CQ, ("CQ entry for unknown QP %06x\n",
+                                  cl_ntoh32(cqe->my_qpn) & 0xffffff));
+                       err = -EINVAL;
+                       goto out;
+               }
+       }
+
+       entry->qp_num = (*cur_qp)->qpn;
+
+       if (is_send) {
+               wq = &(*cur_qp)->sq;
+               wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset)
+                            >> wq->wqe_shift);
+               entry->wr_id = (*cur_qp)->wrid[wqe_index +
+                                              (*cur_qp)->rq.max];
+       } else if ((*cur_qp)->ibqp.srq) {
+               struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
+               u32 wqe = cl_ntoh32(cqe->wqe);
+               wq = NULL;
+               wqe_index = wqe >> srq->wqe_shift;
+               entry->wr_id = srq->wrid[wqe_index];
+               mthca_free_srq_wqe(srq, wqe);
+       } else {
+               wq = &(*cur_qp)->rq;
+               wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift;
+               entry->wr_id = (*cur_qp)->wrid[wqe_index];
+       }
+
+       if (wq) {
+               if (wq->last_comp < wqe_index)
+                       wq->tail += wqe_index - wq->last_comp;
+               else
+                       wq->tail += wqe_index + wq->max - wq->last_comp;
+
+               wq->last_comp = wqe_index;
+       }
+
+       if (is_send) {
+               entry->recv.ud.recv_opt = 0;
+               switch (cqe->opcode) {
+               case MTHCA_OPCODE_RDMA_WRITE:
+                       entry->wc_type    = IB_WC_RDMA_WRITE;
+                       break;
+               case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                       entry->wc_type    = IB_WC_RDMA_WRITE;
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
+                       break;
+               case MTHCA_OPCODE_SEND:
+                       entry->wc_type    = IB_WC_SEND;
+                       break;
+               case MTHCA_OPCODE_SEND_IMM:
+                       entry->wc_type    = IB_WC_SEND;
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
+                       break;
+               case MTHCA_OPCODE_RDMA_READ:
+                       entry->wc_type    = IB_WC_RDMA_READ;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_ATOMIC_CS:
+                       entry->wc_type    = IB_WC_COMPARE_SWAP;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_ATOMIC_FA:
+                       entry->wc_type    = IB_WC_FETCH_ADD;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_BIND_MW:
+                       entry->wc_type    = IB_WC_MW_BIND;
+                       break;
+               default:
+                       entry->wc_type    = MTHCA_OPCODE_INVALID;
+                       break;
+               }
+       } else {
+               entry->length = cl_ntoh32(cqe->byte_cnt);
+               switch (cqe->opcode & 0x1f) {
+               case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
+               case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
+                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
+                       entry->wc_type = IB_WC_RECV;
+                       break;
+               case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+               case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
+                       entry->wc_type = IB_WC_RECV_RDMA_WRITE;
+                       break;
+               default:
+                       entry->recv.ud.recv_opt = 0;
+                       entry->wc_type = IB_WC_RECV;
+                       break;
+               }
+               entry->recv.ud.remote_lid          = cqe->rlid;
+               entry->recv.ud.remote_qp           = cqe->rqpn & 0xffffff00;
+               entry->recv.ud.pkey_index  = (u16)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16);
+               entry->recv.ud.remote_sl           = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) >> 12);
+               entry->recv.ud.path_bits = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) & 0x7f);
+               entry->recv.ud.recv_opt   |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ?
+                                       IB_RECV_OPT_GRH_VALID : 0;
+       }
+
+       if (is_error) {
+               err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
+                                      (struct mthca_err_cqe *) cqe,
+                                      entry, &free_cqe);
+       }
+       else
+               entry->status = IB_WCS_SUCCESS;
+
+ out:
+       if (likely(free_cqe)) {
+               set_cqe_hw(cqe);
+               ++(*freed);
+               ++cq->cons_index;
+       }
+       HCA_EXIT(HCA_DBG_CQ);
+       return err;
+}
+
+int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
+                 struct _ib_wc *entry)
+{
+       struct mthca_dev *dev = to_mdev(ibcq->device);
+       struct mthca_cq *cq = to_mcq(ibcq);
+       struct mthca_qp *qp = NULL;
+       int err = 0;
+       int freed = 0;
+       int npolled;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&cq->lock, &lh);
+
+       for (npolled = 0; npolled < num_entries; ++npolled) {
+               err = mthca_poll_one(dev, cq, &qp,
+                                    &freed, entry + npolled);
+               if (err)
+                       break;
+       }
+
+       if (freed) {
+               wmb();
+               update_cons_index(dev, cq, freed);
+       }
+
+       spin_unlock_irqrestore(&lh);
+
+       return (err == 0 || err == -EAGAIN) ? npolled : err;
+}
+
+int mthca_poll_cq_list(
+       IN              struct ib_cq *ibcq, 
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,
+               OUT                     ib_wc_t** const                         pp_done_wclist )
+{
+       struct mthca_dev *dev = to_mdev(ibcq->device);
+       struct mthca_cq *cq = to_mcq(ibcq);
+       struct mthca_qp *qp = NULL;
+       int err = 0;
+       int freed = 0;
+       ib_wc_t         *wc_p, **next_pp;
+       SPIN_LOCK_PREP(lh);
+
+       HCA_ENTER(HCA_DBG_CQ);
+
+       spin_lock_irqsave(&cq->lock, &lh);
+
+       // loop through CQ
+       next_pp = pp_done_wclist;
+       wc_p = *pp_free_wclist;
+       while( wc_p ) {
+               // poll one CQE
+               err = mthca_poll_one(dev, cq, &qp, &freed, wc_p);
+               if (err)
+                       break;
+
+               // prepare for the next loop
+               *next_pp = wc_p;
+               next_pp = &wc_p->p_next;
+               wc_p = wc_p->p_next;
+       }
+
+       // prepare the results
+       *pp_free_wclist = wc_p;         /* Set the head of the free list. */
+       *next_pp = NULL;                                                /* Clear the tail of the done list. */
+
+       // update consumer index
+       if (freed) {
+               wmb();
+               update_cons_index(dev, cq, freed);
+       }
+
+       spin_unlock_irqrestore(&lh);
+       HCA_EXIT(HCA_DBG_CQ);
+       return (err == 0 || err == -EAGAIN)? 0 : err;
+}
+
+
+int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
+{
+       __be32 doorbell[2];
+
+       doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ?
+                                  MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
+                                  MTHCA_TAVOR_CQ_DB_REQ_NOT)      |
+                                 to_mcq(cq)->cqn);
+       doorbell[1] = (__be32) 0xffffffff;
+
+       mthca_write64(doorbell,
+                     to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
+                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
+
+       return 0;
+}
+
+int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
+{
+       struct mthca_cq *cq = to_mcq(ibcq);
+       __be32 doorbell[2];
+       u32 sn;
+       __be32 ci;
+
+       sn = cq->arm_sn & 3;
+       ci = cl_hton32(cq->cons_index);
+
+       doorbell[0] = ci;
+       doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
+                                 (notify == IB_CQ_SOLICITED ? 1 : 2));
+
+       mthca_write_db_rec(doorbell, cq->arm_db);
+
+       /*
+        * Make sure that the doorbell record in host memory is
+        * written before ringing the doorbell via PCI MMIO.
+        */
+       wmb();
+
+       doorbell[0] = cl_hton32((sn << 28)                       |
+                                 (notify == IB_CQ_SOLICITED ?
+                                  MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
+                                  MTHCA_ARBEL_CQ_DB_REQ_NOT)      |
+                                 cq->cqn);
+       doorbell[1] = ci;
+
+       mthca_write64(doorbell,
+                     to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
+                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
+
+       return 0;
+}
+
+static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
+{
+       mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
+                      &cq->queue, cq->is_direct, &cq->mr);
+}
+
+int mthca_init_cq(struct mthca_dev *dev, int nent,
+                 struct mthca_ucontext *ctx, u32 pdn,
+                 struct mthca_cq *cq)
+{
+       int size = nent * MTHCA_CQ_ENTRY_SIZE;
+       struct mthca_mailbox *mailbox;
+       struct mthca_cq_context *cq_context;
+       int err = -ENOMEM;
+       u8 status;
+       int i;
+       SPIN_LOCK_PREP(lh);
+
+       might_sleep();
+
+       cq->ibcq.cqe  = nent - 1;
+       cq->is_kernel = !ctx;
+
+       cq->cqn = mthca_alloc(&dev->cq_table.alloc);
+       if (cq->cqn == -1)
+               return -ENOMEM;
+
+       if (mthca_is_memfree(dev)) {
+               err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
+               if (err)
+                       goto err_out;
+
+               if (cq->is_kernel) {
+                       cq->arm_sn = 1;
+
+                       err = -ENOMEM;
+
+                       cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
+                                                            cq->cqn, &cq->set_ci_db);
+                       if (cq->set_ci_db_index < 0)
+                               goto err_out_icm;
+
+                       cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
+                                                         cq->cqn, &cq->arm_db);
+                       if (cq->arm_db_index < 0)
+                               goto err_out_ci;
+               }
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               goto err_out_arm;
+
+       cq_context = mailbox->buf;
+
+       if (cq->is_kernel) {
+               err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
+                                     &cq->queue, &cq->is_direct,
+                                     &dev->driver_pd, 1, &cq->mr);
+               if (err)
+                       goto err_out_mailbox;
+
+               for (i = 0; i < nent; ++i)
+                       set_cqe_hw(get_cqe(cq, i));
+       }
+
+       spin_lock_init(&cq->lock);
+       atomic_set(&cq->refcount, 1);
+       init_waitqueue_head(&cq->wait);
+
+       RtlZeroMemory(cq_context, sizeof *cq_context);
+       cq_context->flags           = cl_hton32(MTHCA_CQ_STATUS_OK      |
+                                                 MTHCA_CQ_STATE_DISARMED |
+                                                 MTHCA_CQ_FLAG_TR);
+       cq_context->logsize_usrpage = cl_hton32((ffs(nent) - 1) << 24);
+       if (ctx)
+               cq_context->logsize_usrpage |= cl_hton32(ctx->uar.index);
+       else
+               cq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);
+       cq_context->error_eqn       = cl_hton32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
+       cq_context->comp_eqn        = cl_hton32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
+       cq_context->pd              = cl_hton32(pdn);
+       cq_context->lkey            = cl_hton32(cq->mr.ibmr.lkey);
+       cq_context->cqn             = cl_hton32(cq->cqn);
+
+       if (mthca_is_memfree(dev)) {
+               cq_context->ci_db    = cl_hton32(cq->set_ci_db_index);
+               cq_context->state_db = cl_hton32(cq->arm_db_index);
+       }
+
+       err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("SW2HW_CQ failed (%d)\n", err));
+               goto err_out_free_mr;
+       }
+
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_CQ returned status 0x%02x\n",
+                          status));
+               err = -EINVAL;
+               goto err_out_free_mr;
+       }
+
+       spin_lock_irq(&dev->cq_table.lock, &lh);
+       if (mthca_array_set(&dev->cq_table.cq,
+                           cq->cqn & (dev->limits.num_cqs - 1),
+                           cq)) {
+               spin_unlock_irq(&lh);
+               goto err_out_free_mr;
+       }
+       spin_unlock_irq(&lh);
+
+       cq->cons_index = 0;
+
+       mthca_free_mailbox(dev, mailbox);
+
+       return 0;
+
+err_out_free_mr:
+       if (cq->is_kernel)
+               mthca_free_cq_buf(dev, cq);
+
+err_out_mailbox:
+       mthca_free_mailbox(dev, mailbox);
+
+err_out_arm:
+       if (cq->is_kernel && mthca_is_memfree(dev))
+               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
+
+err_out_ci:
+       if (cq->is_kernel && mthca_is_memfree(dev))
+               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
+
+err_out_icm:
+       mthca_table_put(dev, dev->cq_table.table, cq->cqn);
+
+err_out:
+       mthca_free(&dev->cq_table.alloc, cq->cqn);
+
+       return err;
+}
+
+void mthca_free_cq(struct mthca_dev *dev,
+                  struct mthca_cq *cq)
+{
+       struct mthca_mailbox *mailbox;
+       int err;
+       u8 status;
+       SPIN_LOCK_PREP(lh);
+
+       might_sleep();
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox)) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("No memory for mailbox to free CQ.\n"));
+               return;
+       }
+
+       err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
+       if (err){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ failed (%d)\n", err));
+       }
+       else if (status){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ returned status 0x%02x\n", status));
+       }
+
+       #if 0
+       {
+               __be32 *ctx = mailbox->buf;
+               int j;
+
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("context for CQN %x (cons index %x, next sw %d)\n",
+                      cq->cqn, cq->cons_index,
+                      cq->is_kernel ? !!next_cqe_sw(cq) : 0));
+               for (j = 0; j < 16; ++j)
+                       HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("[%2x] %08x\n", j * 4, cl_ntoh32(ctx[j])));
+       }
+       #endif
+
+       spin_lock_irq(&dev->cq_table.lock, &lh);
+       mthca_array_clear(&dev->cq_table.cq,
+                         cq->cqn & (dev->limits.num_cqs - 1));
+       spin_unlock_irq(&lh);
+
+       /* wait for all RUNNING DPCs on that EQ to complete */
+       {
+               ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+               // wait for DPCs, using this EQ, to complete
+               spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_COMP].lock);
+               //TODO: do we need that ? 
+               spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_ASYNC].lock );
+       }
+
+       atomic_dec(&cq->refcount);
+       wait_event(&cq->wait, !atomic_read(&cq->refcount));
+
+       if (cq->is_kernel) {
+               mthca_free_cq_buf(dev, cq);
+               if (mthca_is_memfree(dev)) {
+                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);
+                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
+               }
+       }
+
+       mthca_table_put(dev, dev->cq_table.table, cq->cqn);
+       mthca_free(&dev->cq_table.alloc, cq->cqn);
+       mthca_free_mailbox(dev, mailbox);
+}
+
+int mthca_init_cq_table(struct mthca_dev *dev)
+{
+       int err;
+
+       spin_lock_init(&dev->cq_table.lock);
+
+       err = mthca_alloc_init(&dev->cq_table.alloc,
+                              dev->limits.num_cqs,
+                              (1 << 24) - 1,
+                              dev->limits.reserved_cqs);
+       if (err)
+               return err;
+
+       err = mthca_array_init(&dev->cq_table.cq,
+                              dev->limits.num_cqs);
+       if (err)
+               mthca_alloc_cleanup(&dev->cq_table.alloc);
+
+       return err;
+}
+
+void mthca_cleanup_cq_table(struct mthca_dev *dev)
+{
+       mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
+       mthca_alloc_cleanup(&dev->cq_table.alloc);
+}
+
+
diff --git a/trunk/hw/mthca/kernel/mthca_dev.h b/trunk/hw/mthca/kernel/mthca_dev.h
new file mode 100644 (file)
index 0000000..954c753
--- /dev/null
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_dev.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_DEV_H
+#define MTHCA_DEV_H
+
+#include "hca_driver.h"
+#include "mthca_log.h"
+#include "mthca_provider.h"
+#include "mthca_doorbell.h"
+
+// must be synchronized with MTHCA.INF
+#define DRV_NAME       "mthca"
+#define PFX            DRV_NAME ": "
+#define DRV_VERSION    "1.0.4"
+#define DRV_RELDATE    "03/01/2006"
+
+#define HZ             1000000 /* 1 sec in usecs */
+
+enum {
+       MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
+       MTHCA_FLAG_SRQ        = 1 << 2,
+       MTHCA_FLAG_MSI        = 1 << 3,
+       MTHCA_FLAG_MSI_X      = 1 << 4,
+       MTHCA_FLAG_NO_LAM     = 1 << 5,
+       MTHCA_FLAG_FMR        = 1 << 6,
+       MTHCA_FLAG_MEMFREE    = 1 << 7,
+       MTHCA_FLAG_PCIE       = 1 << 8
+};
+
+enum {
+       MTHCA_MAX_PORTS = 2
+};
+
+enum {
+       MTHCA_BOARD_ID_LEN = 64
+};
+
+enum {
+       MTHCA_EQ_CONTEXT_SIZE =  0x40,
+       MTHCA_CQ_CONTEXT_SIZE =  0x40,
+       MTHCA_QP_CONTEXT_SIZE = 0x200,
+       MTHCA_RDB_ENTRY_SIZE  =  0x20,
+       MTHCA_AV_SIZE         =  0x20,
+       MTHCA_MGM_ENTRY_SIZE  =  0x40,
+
+       /* Arbel FW gives us these, but we need them for Tavor */
+       MTHCA_MPT_ENTRY_SIZE  =  0x40,
+       MTHCA_MTT_SEG_SIZE    =  0x40,
+
+       MTHCA_QP_PER_MGM      = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2)
+};
+
+enum {
+       MTHCA_EQ_CMD,
+       MTHCA_EQ_ASYNC,
+       MTHCA_EQ_COMP,
+       MTHCA_NUM_EQ
+};
+
+enum mthca_wr_opcode{
+       MTHCA_OPCODE_NOP            = 0x00,
+       MTHCA_OPCODE_RDMA_WRITE     = 0x08,
+       MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,
+       MTHCA_OPCODE_SEND           = 0x0a,
+       MTHCA_OPCODE_SEND_IMM       = 0x0b,
+       MTHCA_OPCODE_RDMA_READ      = 0x10,
+       MTHCA_OPCODE_ATOMIC_CS      = 0x11,
+       MTHCA_OPCODE_ATOMIC_FA      = 0x12,
+       MTHCA_OPCODE_BIND_MW        = 0x18,
+       MTHCA_OPCODE_INVALID        = 0xff
+};
+
+struct mthca_cmd {
+       struct pci_pool          *pool;
+       int                       use_events;
+       KMUTEX            hcr_mutex;
+       KSEMAPHORE              poll_sem;
+       KSEMAPHORE        event_sem;
+       int                       max_cmds;
+       spinlock_t                context_lock;
+       int                       free_head;
+       struct mthca_cmd_context *context;
+       u16                       token_mask;
+};
+
+struct mthca_limits {
+       int      num_ports;
+       int      vl_cap;
+       int      mtu_cap;
+       int      gid_table_len;
+       int      pkey_table_len;
+       int      local_ca_ack_delay;
+       int      num_uars;
+       int      max_sg;
+       int      num_qps;
+       int      max_wqes;
+       int      max_desc_sz;
+       int      max_qp_init_rdma;
+       int      reserved_qps;
+       int      num_srqs;
+       int      reserved_srqs;
+       int      max_srq_wqes;
+       int      num_eecs;
+       int      reserved_eecs;
+       int      num_cqs;
+       int      max_cqes;
+       int      reserved_cqs;
+       int      num_eqs;
+       int      reserved_eqs;
+       int      num_mpts;
+       int      num_mtt_segs;
+       int      fmr_reserved_mtts;
+       int      reserved_mtts;
+       int      reserved_mrws;
+       int      reserved_uars;
+       int      num_mgms;
+       int      num_amgms;
+       int      reserved_mcgs;
+       int      num_pds;
+       int      reserved_pds;
+       u32     page_size_cap;
+       u32      flags;
+       u8      port_width_cap;
+};
+
+struct mthca_alloc {
+       u32            last;
+       u32            top;
+       u32            max;
+       u32            mask;
+       spinlock_t     lock;
+       unsigned long *table;
+};
+
+struct mthca_array {
+       struct {
+               void    **page;
+               int       used;
+       } *page_list;
+};
+
+struct mthca_uar_table {
+       struct mthca_alloc alloc;
+       u64                uarc_base;
+       int                uarc_size;
+};
+
+struct mthca_pd_table {
+       struct mthca_alloc alloc;
+};
+
+struct mthca_buddy {
+       unsigned long **bits;
+       int             max_order;
+       spinlock_t      lock;
+};
+
+struct mthca_mr_table {
+       struct mthca_alloc      mpt_alloc;
+       struct mthca_buddy      mtt_buddy;
+       struct mthca_buddy     *fmr_mtt_buddy;
+       u64                     mtt_base;
+       u64                     mpt_base;
+       struct mthca_icm_table *mtt_table;
+       struct mthca_icm_table *mpt_table;
+       struct {
+               void __iomem   *mpt_base;
+               SIZE_T mpt_base_size;
+               void __iomem   *mtt_base;
+               SIZE_T mtt_base_size;
+               struct mthca_buddy mtt_buddy;
+       } tavor_fmr;
+};
+
+struct mthca_eq_table {
+       struct mthca_alloc alloc;
+       void __iomem      *clr_int;
+       u32                clr_mask;
+       u32                arm_mask;
+       struct mthca_eq    eq[MTHCA_NUM_EQ];
+       u64                icm_virt;
+       struct scatterlist sg;
+       int                have_irq;
+       u8                 inta_pin;
+       KLOCK_QUEUE_HANDLE  lockh;
+};
+
+struct mthca_cq_table {
+       struct mthca_alloc      alloc;
+       spinlock_t              lock;
+       struct mthca_array      cq;
+       struct mthca_icm_table *table;
+};
+
+struct mthca_srq_table {
+       struct mthca_alloc      alloc;
+       spinlock_t              lock;
+       struct mthca_array      srq;
+       struct mthca_icm_table *table;
+};
+
+struct mthca_qp_table {
+       struct mthca_alloc      alloc;
+       u32                     rdb_base;
+       int                     rdb_shift;
+       int                     sqp_start;
+       spinlock_t              lock;
+       struct mthca_array      qp;
+       struct mthca_icm_table *qp_table;
+       struct mthca_icm_table *eqp_table;
+       struct mthca_icm_table *rdb_table;
+};
+
+struct mthca_av_table {
+       struct pci_pool   *pool;
+       int                num_ddr_avs;
+       u64                ddr_av_base;
+       void __iomem      *av_map;
+       SIZE_T  av_map_size;
+       struct mthca_alloc alloc;
+};
+
+struct mthca_mcg_table {
+       KMUTEX          mutex;
+       struct mthca_alloc      alloc;
+       struct mthca_icm_table *table;
+};
+
+struct mthca_catas_err {
+       u64                     addr;
+       u32 __iomem            *map;
+       SIZE_T          map_size;
+       unsigned long           stop;
+       u32                     size;
+       KTIMER  timer;
+       KDPC  timer_dpc;
+       LARGE_INTEGER  interval;
+};
+
+struct mthca_dev {
+       struct ib_device  ib_dev;
+       hca_dev_ext_t *ext;
+
+       int              hca_type;
+       unsigned long    mthca_flags;
+       unsigned long    device_cap_flags;
+
+       u32              rev_id;
+       char             board_id[MTHCA_BOARD_ID_LEN];
+
+       /* firmware info */
+       u64              fw_ver;
+       union {
+               struct {
+                       u64 fw_start;
+                       u64 fw_end;
+               }        tavor;
+               struct {
+                       u64 clr_int_base;
+                       u64 eq_arm_base;
+                       u64 eq_set_ci_base;
+                       struct mthca_icm *fw_icm;
+                       struct mthca_icm *aux_icm;
+                       u16 fw_pages;
+               }        arbel;
+       }                fw;
+
+       u64              ddr_start;
+       u64              ddr_end;
+
+       MTHCA_DECLARE_DOORBELL_LOCK(doorbell_lock)
+       KMUTEX cap_mask_mutex;
+
+       u8 __iomem    *hcr;
+       SIZE_T          hcr_size;
+       u8 __iomem    *kar;
+       SIZE_T          kar_size;
+       u8 __iomem    *clr_base;
+       SIZE_T          clr_base_size;
+       union {
+               struct {
+                       void __iomem *ecr_base;
+                       SIZE_T ecr_base_size;
+               } tavor;
+               struct {
+                       void __iomem *eq_arm;
+                       SIZE_T eq_arm_size;
+                       void __iomem *eq_set_ci_base;
+                       SIZE_T eq_set_ci_base_size;
+               } arbel;
+       } eq_regs;
+
+       struct mthca_cmd    cmd;
+       struct mthca_limits limits;
+
+       struct mthca_uar_table uar_table;
+       struct mthca_pd_table  pd_table;
+       struct mthca_mr_table  mr_table;
+       struct mthca_eq_table  eq_table;
+       struct mthca_cq_table  cq_table;
+       struct mthca_srq_table srq_table;
+       struct mthca_qp_table  qp_table;
+       struct mthca_av_table  av_table;
+       struct mthca_mcg_table mcg_table;
+       struct mthca_catas_err catas_err;
+       struct mthca_uar       driver_uar;
+       struct mthca_db_table *db_tab;
+       struct mthca_pd        driver_pd;
+       struct mthca_mr        driver_mr;
+
+       struct ib_mad_agent  *send_agent[MTHCA_MAX_PORTS][2];
+       struct ib_ah         *sm_ah[MTHCA_MAX_PORTS];
+       spinlock_t            sm_lock;
+       u32     state;
+};
+
+// mthca_dev states
+enum {
+       MTHCA_DEV_UNINITIALIZED,
+       MTHCA_DEV_INITIALIZED,
+       MTHCA_DEV_FAILED
+};     
+
+enum {
+       MTHCA_CQ_ENTRY_SIZE = 0x20
+};
+
+               
+
+#define MTHCA_GET(dest, source, offset)                               \
+       {                                                          \
+               void *__p = (char *) (source) + (offset);             \
+               void *__q = &(dest);            \
+               switch (sizeof (dest)) {                              \
+                       case 1: *(u8 *)__q = *(u8 *) __p;       break;    \
+                       case 2: *(u16 *)__q = (u16)cl_ntoh16(*(u16 *)__p); break;    \
+                       case 4: *(u32 *)__q = (u32)cl_ntoh32(*(u32 *)__p); break;    \
+                       case 8: *(u64 *)__q = (u64)cl_ntoh64(*(u64 *)__p); break;    \
+                       default: ASSERT(0);          \
+               }                                                     \
+       } 
+
+
+#define MTHCA_PUT(dest, source, offset)                               \
+       {                                                          \
+               void *__d = ((char *) (dest) + (offset));             \
+               switch (sizeof(source)) {                             \
+               case 1: *(u8 *) __d = (u8)(source);                break; \
+               case 2: *(__be16 *) __d = cl_hton16((u16)source); break; \
+               case 4: *(__be32 *) __d = cl_hton32((u32)source); break; \
+               case 8: *(__be64 *) __d = cl_hton64((u64)source); break; \
+               default: ASSERT(0);          \
+               }                                                     \
+       } 
+
+NTSTATUS mthca_reset(struct mthca_dev *mdev);
+
+u32 mthca_alloc(struct mthca_alloc *alloc);
+void mthca_free(struct mthca_alloc *alloc, u32 obj);
+int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
+                    u32 reserved);
+void mthca_alloc_cleanup(struct mthca_alloc *alloc);
+void *mthca_array_get(struct mthca_array *array, int index);
+int mthca_array_set(struct mthca_array *array, int index, void *value);
+void mthca_array_clear(struct mthca_array *array, int index);
+int mthca_array_init(struct mthca_array *array, int nent);
+void mthca_array_cleanup(struct mthca_array *array, int nent);
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+                   union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+                   int hca_write, struct mthca_mr *mr);
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+                   int is_direct, struct mthca_mr *mr);
+
+int mthca_init_uar_table(struct mthca_dev *dev);
+int mthca_init_pd_table(struct mthca_dev *dev);
+int mthca_init_mr_table(struct mthca_dev *dev);
+int mthca_init_eq_table(struct mthca_dev *dev);
+int mthca_init_cq_table(struct mthca_dev *dev);
+int mthca_init_srq_table(struct mthca_dev *dev);
+int mthca_init_qp_table(struct mthca_dev *dev);
+int mthca_init_av_table(struct mthca_dev *dev);
+int mthca_init_mcg_table(struct mthca_dev *dev);
+
+void mthca_cleanup_uar_table(struct mthca_dev *dev);
+void mthca_cleanup_pd_table(struct mthca_dev *dev);
+void mthca_cleanup_mr_table(struct mthca_dev *dev);
+void mthca_cleanup_eq_table(struct mthca_dev *dev);
+void mthca_cleanup_cq_table(struct mthca_dev *dev);
+void mthca_cleanup_srq_table(struct mthca_dev *dev);
+void mthca_cleanup_qp_table(struct mthca_dev *dev);
+void mthca_cleanup_av_table(struct mthca_dev *dev);
+void mthca_cleanup_mcg_table(struct mthca_dev *dev);
+
+int mthca_register_device(struct mthca_dev *dev);
+void mthca_unregister_device(struct mthca_dev *dev);
+
+void mthca_start_catas_poll(struct mthca_dev *dev);
+void mthca_stop_catas_poll(struct mthca_dev *dev);
+
+int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
+void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
+
+int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd);
+void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
+
+struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size);
+void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt);
+int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
+                   int start_index, u64 *buffer_list, int list_len);
+int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+                  u64 iova, u64 total_size, mthca_mpt_access_t access, struct mthca_mr *mr);
+int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
+                          mthca_mpt_access_t access, struct mthca_mr *mr);
+int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+                       u64 *buffer_list, int buffer_size_shift,
+                       int list_len, u64 iova, u64 total_size,
+                       mthca_mpt_access_t access, struct mthca_mr *mr);
+void mthca_free_mr(struct mthca_dev *dev,  struct mthca_mr *mr);
+
+int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
+                   mthca_mpt_access_t access, struct mthca_fmr *fmr);
+int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+                            int list_len, u64 iova);
+void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
+int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+                            int list_len, u64 iova);
+void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
+int mthca_free_fmr(struct mthca_dev *dev,  struct mthca_fmr *fmr);
+
+int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
+void mthca_unmap_eq_icm(struct mthca_dev *dev);
+
+int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
+                 struct _ib_wc *entry);
+int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
+int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
+int mthca_init_cq(struct mthca_dev *dev, int nent,
+                 struct mthca_ucontext *ctx, u32 pdn,
+                 struct mthca_cq *cq);
+void mthca_free_cq(struct mthca_dev *dev,
+                  struct mthca_cq *cq);
+void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
+void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
+       enum ib_event_type event_type);
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+           struct mthca_srq *srq);
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+                   struct ib_srq_attr *attr, struct mthca_srq *srq);
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
+int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+       enum ib_srq_attr_mask attr_mask);
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+                    enum ib_event_type event_type);
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr);
+
+void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
+                   enum ib_event_type event_type);
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
+int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr);
+int mthca_tavor_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+                            struct _ib_recv_wr **bad_wr);
+int mthca_arbel_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr);
+int mthca_arbel_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+                            struct _ib_recv_wr **bad_wr);
+int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+                      int index, int *dbd, __be32 *new_wqe);
+int mthca_alloc_qp(struct mthca_dev *dev,
+                  struct mthca_pd *pd,
+                  struct mthca_cq *send_cq,
+                  struct mthca_cq *recv_cq,
+                  enum ib_qp_type_t type,
+                  enum ib_sig_type send_policy,
+                  struct ib_qp_cap *cap,
+                  struct mthca_qp *qp);
+int mthca_alloc_sqp(struct mthca_dev *dev,
+                   struct mthca_pd *pd,
+                   struct mthca_cq *send_cq,
+                   struct mthca_cq *recv_cq,
+                   enum ib_sig_type send_policy,
+                   struct ib_qp_cap *cap,
+                   int qpn,
+                   int port,
+                   struct mthca_sqp *sqp);
+void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
+int mthca_create_ah(struct mthca_dev *dev,
+                   struct mthca_pd *pd,
+                   struct ib_ah_attr *ah_attr,
+                   struct mthca_ah *ah);
+int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
+int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
+                 struct ib_ud_header *header);
+
+int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int mthca_process_mad(struct ib_device *ibdev,
+                     int mad_flags,
+                     u8 port_num,
+                     struct _ib_wc *in_wc,
+                     struct ib_grh *in_grh,
+                     struct ib_mad *in_mad,
+                     struct ib_mad *out_mad);
+
+static inline struct mthca_dev *to_mdev(struct ib_device *ibdev)
+{
+       return container_of(ibdev, struct mthca_dev, ib_dev);
+}
+
+static inline int mthca_is_memfree(struct mthca_dev *dev)
+{
+       return dev->mthca_flags & MTHCA_FLAG_MEMFREE;
+}
+
+void mthca_get_av_params(      struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits );
+
+void mthca_set_av_params(      struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr );
+
+int ib_uverbs_init(void);
+void ib_uverbs_cleanup(void);
+int mthca_ah_grh_present(struct mthca_ah *ah);
+
+
+
+
+
+VOID
+WriteEventLogEntry(
+       PVOID   pi_pIoObject,
+       ULONG   pi_ErrorCode,
+       ULONG   pi_UniqueErrorCode,
+       ULONG   pi_FinalStatus,
+       ULONG   pi_nDataItems,
+       ...
+       );
+
+VOID
+WriteEventLogEntryStr(
+       PVOID   pi_pIoObject,
+       ULONG   pi_ErrorCode,
+       ULONG   pi_UniqueErrorCode,
+       ULONG   pi_FinalStatus,
+       PWCHAR pi_InsertionStr,
+       ULONG   pi_nDataItems,
+       ...
+       );
+
+#endif /* MTHCA_DEV_H */
diff --git a/trunk/hw/mthca/kernel/mthca_doorbell.h b/trunk/hw/mthca/kernel/mthca_doorbell.h
new file mode 100644 (file)
index 0000000..e7a83f3
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_doorbell.h 2905 2005-07-25 18:26:52Z roland $
+ */
+
+#define MTHCA_RD_DOORBELL      0x00
+#define MTHCA_SEND_DOORBELL    0x10
+#define MTHCA_RECEIVE_DOORBELL 0x18
+#define MTHCA_CQ_DOORBELL      0x20
+#define MTHCA_EQ_DOORBELL      0x28
+
+#if BITS_PER_LONG == 64
+/*
+ * Assume that we can just write a 64-bit doorbell atomically.  s390
+ * actually doesn't have writeq() but S/390 systems don't even have
+ * PCI so we won't worry about it.
+ */
+
+#define MTHCA_DECLARE_DOORBELL_LOCK(name)
+#define MTHCA_INIT_DOORBELL_LOCK(ptr)    do { } while (0)
+#define MTHCA_GET_DOORBELL_LOCK(ptr)      (NULL)
+
+static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
+{
+       __raw_writeq((u64) val, dest);
+}
+
+static inline void mthca_write64(__be32 val[2], void __iomem *dest,
+                                spinlock_t *doorbell_lock)
+{
+       __raw_writeq(*(u64 *) val, dest);
+}
+
+static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
+{
+       *(u64 *) db = *(u64 *) val;
+}
+
+#else
+
+/*
+ * Just fall back to a spinlock to protect the doorbell if
+ * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
+ * MMIO writes.
+ */
+
+#define MTHCA_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
+#define MTHCA_INIT_DOORBELL_LOCK(ptr)     spin_lock_init(ptr)
+#define MTHCA_GET_DOORBELL_LOCK(ptr)      (ptr)
+
+static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
+{
+       __raw_writel(((u32 *) &val)[0], dest);
+       __raw_writel(((u32 *) &val)[1], (u8*)dest + 4);
+}
+
+static inline void mthca_write64(__be32 val[2], void __iomem *dest,
+                                spinlock_t *doorbell_lock)
+{
+       SPIN_LOCK_PREP(lh);
+       spin_lock_irqsave(doorbell_lock, &lh);
+       __raw_writel((u32) val[0], dest);
+       __raw_writel((u32) val[1], (u8*)dest + 4);
+       spin_unlock_irqrestore(&lh);
+}
+
+static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
+{
+       db[0] = val[0];
+       wmb();
+       db[1] = val[1];
+}
+
+#endif
diff --git a/trunk/hw/mthca/kernel/mthca_eq.c b/trunk/hw/mthca/kernel/mthca_eq.c
new file mode 100644 (file)
index 0000000..fcfb448
--- /dev/null
@@ -0,0 +1,1075 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_eq.c 2905 2005-07-25 18:26:52Z roland $
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_eq.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_config_reg.h"
+
+static int mthca_map_reg(struct mthca_dev *dev,
+                                  u64 offset, unsigned long size,
+                                  void __iomem **map, SIZE_T *map_size);
+static int mthca_map_eq_regs(struct mthca_dev *dev);
+static void mthca_unmap_eq_regs(struct mthca_dev *dev);
+static int mthca_create_eq(struct mthca_dev *dev,
+                                    int nent,
+                                    u8 intr,
+                                    struct mthca_eq *eq);
+
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_map_reg)
+#pragma alloc_text (PAGE, mthca_map_eq_regs)
+#pragma alloc_text (PAGE, mthca_init_eq_table)
+#pragma alloc_text (PAGE, mthca_unmap_eq_regs)
+#pragma alloc_text (PAGE, mthca_map_eq_icm)
+#pragma alloc_text (PAGE, mthca_unmap_eq_icm)
+#pragma alloc_text (PAGE, mthca_create_eq)
+#pragma alloc_text (PAGE, mthca_cleanup_eq_table)
+#endif
+
+enum {
+       MTHCA_NUM_ASYNC_EQE = 0x80,
+       MTHCA_NUM_CMD_EQE   = 0x80,
+       MTHCA_NUM_SPARE_EQE = 0x80,
+       MTHCA_EQ_ENTRY_SIZE = 0x20
+};
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+#pragma pack(push,1)
+struct mthca_eq_context {
+       __be32 flags;
+       __be64 start;
+       __be32 logsize_usrpage;
+       __be32 tavor_pd;        /* reserved for Arbel */
+       u8     reserved1[3];
+       u8     intr;
+       __be32 arbel_pd;        /* lost_count for Tavor */
+       __be32 lkey;
+       u32    reserved2[2];
+       __be32 consumer_index;
+       __be32 producer_index;
+       u32    reserved3[4];
+};
+#pragma pack(pop)
+
+#define MTHCA_EQ_STATUS_OK          ( 0 << 28)
+#define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)
+#define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)
+#define MTHCA_EQ_OWNER_SW           ( 0 << 24)
+#define MTHCA_EQ_OWNER_HW           ( 1 << 24)
+#define MTHCA_EQ_FLAG_TR            ( 1 << 18)
+#define MTHCA_EQ_FLAG_OI            ( 1 << 17)
+#define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)
+#define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)
+#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)
+#define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)
+
+enum {
+       MTHCA_EVENT_TYPE_COMP                                                   = 0x00,
+       MTHCA_EVENT_TYPE_PATH_MIG                                       = 0x01,
+       MTHCA_EVENT_TYPE_COMM_EST                                       = 0x02,
+       MTHCA_EVENT_TYPE_SQ_DRAINED                             = 0x03,
+       MTHCA_EVENT_TYPE_CQ_ERROR                                               = 0x04,
+       MTHCA_EVENT_TYPE_WQ_CATAS_ERROR                         = 0x05,
+       MTHCA_EVENT_TYPE_EEC_CATAS_ERROR                        = 0x06,
+       MTHCA_EVENT_TYPE_PATH_MIG_FAILED                        = 0x07,
+       MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR      = 0x08,
+       MTHCA_EVENT_TYPE_PORT_CHANGE                                    = 0x09,
+       MTHCA_EVENT_TYPE_CMD                                                                    = 0x0a,
+       MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+       MTHCA_EVENT_TYPE_ECC_DETECT                                     = 0x0e,
+       MTHCA_EVENT_TYPE_EQ_OVERFLOW                                    = 0x0f,
+       MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR                = 0x11,
+       MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR                        = 0x12,
+       MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE        = 0x13, 
+       MTHCA_EVENT_TYPE_SRQ_LIMIT                                      = 0x14  
+};
+
+#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG)           | \
+                               (1ULL << MTHCA_EVENT_TYPE_COMM_EST)           | \
+                               (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
+                               (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
+                               (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
+                               (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
+                               (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
+                               (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+                               (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
+                               (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
+                               (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
+                               (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
+#define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
+                                       (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)              | \
+                                       (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
+
+#define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)
+
+#define MTHCA_EQ_DB_INC_CI     (1 << 24)
+#define MTHCA_EQ_DB_REQ_NOT    (2 << 24)
+#define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)
+#define MTHCA_EQ_DB_SET_CI     (4 << 24)
+#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
+
+#pragma pack(push,1)
+struct mthca_eqe {
+       u8 reserved1;
+       u8 type;
+       u8 reserved2;
+       u8 subtype;
+       union {
+               u32 raw[6];
+               struct {
+                       __be32 cqn;
+               } comp;
+               struct {
+                       u16    reserved1;
+                       __be16 token;
+                       u32    reserved2;
+                       u8     reserved3[3];
+                       u8     status;
+                       __be64 out_param;
+               } cmd;
+               struct {
+                       __be32 qpn;
+               } qp;
+               struct {                        
+                       __be32 srqn;            
+               }       srq;
+               struct {
+                       __be32 cqn;
+                       u32    reserved1;
+                       u8     reserved2[3];
+                       u8     syndrome;
+               } cq_err;
+               struct {
+                       u32    reserved1[2];
+                       __be32 port;
+               } port_change;
+       } event;
+       u8 reserved3[3];
+       u8 owner;
+} ;
+#pragma pack(pop)
+
+#define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)
+#define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)
+
+static inline u64 async_mask(struct mthca_dev *dev)
+{
+       return dev->mthca_flags & MTHCA_FLAG_SRQ ?
+               MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
+               MTHCA_ASYNC_EVENT_MASK;
+}
+
+static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
+{
+       __be32 doorbell[2];
+
+       doorbell[0] = cl_hton32(MTHCA_EQ_DB_SET_CI | eq->eqn);
+       doorbell[1] = cl_hton32(ci & (eq->nent - 1));
+
+       /*
+        * This barrier makes sure that all updates to ownership bits
+        * done by set_eqe_hw() hit memory before the consumer index
+        * is updated.  set_eq_ci() allows the HCA to possibly write
+        * more EQ entries, and we want to avoid the exceedingly
+        * unlikely possibility of the HCA writing an entry and then
+        * having set_eqe_hw() overwrite the owner field.
+        */
+       wmb();
+       mthca_write64(doorbell,
+                     dev->kar + MTHCA_EQ_DOORBELL,
+                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+}
+
+static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
+{
+       /* See comment in tavor_set_eq_ci() above. */
+       wmb();
+       __raw_writel((u32) cl_hton32(ci),
+               (u8*)dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
+       /* We still want ordering, just not swabbing, so add a barrier */
+       mb();
+}
+
+static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
+{
+       if (mthca_is_memfree(dev))
+               arbel_set_eq_ci(dev, eq, ci);
+       else
+               tavor_set_eq_ci(dev, eq, ci);
+}
+
+static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
+{
+       __be32 doorbell[2];
+
+       doorbell[0] = cl_hton32(MTHCA_EQ_DB_REQ_NOT | eqn);
+       doorbell[1] = 0;
+
+       mthca_write64(doorbell,
+                     dev->kar + MTHCA_EQ_DOORBELL,
+                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+}
+
+static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
+{
+       writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
+}
+
+static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
+{
+       if (!mthca_is_memfree(dev)) {
+               __be32 doorbell[2];
+
+               doorbell[0] = cl_hton32(MTHCA_EQ_DB_DISARM_CQ | eqn);
+               doorbell[1] = cl_hton32(cqn);
+
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_EQ_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+}
+
+static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
+{
+       unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
+       return (struct mthca_eqe *)((u8*)eq->page_list[off / PAGE_SIZE].page + off % PAGE_SIZE);
+}
+
+static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
+{
+       struct mthca_eqe* eqe;
+       eqe = get_eqe(eq, eq->cons_index);
+       return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
+}
+
+static inline void set_eqe_hw(struct mthca_eqe *eqe)
+{
+       eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;
+}
+
+static void port_change(struct mthca_dev *dev, int port, int active)
+{
+       struct ib_event record;
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Port change to %s for port %d\n",
+                 active ? "active" : "down", port));
+
+       record.device = &dev->ib_dev;
+       record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+       record.element.port_num = (u8)port;
+       // Gen2 ib_core mechanism
+       ib_dispatch_event(&record);
+       // our callback
+       ca_event_handler( &record, &dev->ext->hca.hob );
+}
+
+static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
+{
+       int disarm_cqn;
+       int  eqes_found = 0;
+       int set_ci = 0;
+       struct mthca_eqe *eqe = next_eqe_sw(eq);
+
+       while (eqe) {
+
+               /*
+                * Make sure we read EQ entry contents after we've
+                * checked the ownership bit.
+                */
+               rmb();
+
+               switch (eqe->type) {
+               case MTHCA_EVENT_TYPE_COMP:
+                       disarm_cqn = cl_ntoh32(eqe->event.comp.cqn) & 0xffffff;
+                       disarm_cq(dev, eq->eqn, disarm_cqn);
+                       mthca_cq_completion(dev, disarm_cqn);
+                       break;
+
+               case MTHCA_EVENT_TYPE_PATH_MIG:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_PATH_MIG);
+                       break;
+
+               case MTHCA_EVENT_TYPE_COMM_EST:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_COMM_EST);
+                       break;
+
+               case MTHCA_EVENT_TYPE_SQ_DRAINED:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_SQ_DRAINED);
+                       break;
+
+               case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_QP_LAST_WQE_REACHED);
+                       break;
+
+               case MTHCA_EVENT_TYPE_SRQ_LIMIT:
+                       mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,
+                                               IB_EVENT_SRQ_LIMIT_REACHED);
+                       break;
+
+               case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_QP_FATAL);
+                       break;
+
+               case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_PATH_MIG_ERR);
+                       break;
+
+               case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_QP_REQ_ERR);
+                       break;
+
+               case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
+                                      IB_EVENT_QP_ACCESS_ERR);
+                       break;
+
+               case MTHCA_EVENT_TYPE_CMD:
+                       mthca_cmd_event(dev,
+                                       cl_ntoh16(eqe->event.cmd.token),
+                                       eqe->event.cmd.status,
+                                       cl_ntoh64(eqe->event.cmd.out_param));
+                       break;
+
+               case MTHCA_EVENT_TYPE_PORT_CHANGE:
+                       port_change(dev,
+                                   (cl_ntoh32(eqe->event.port_change.port) >> 28) & 3,
+                                   eqe->subtype == 0x4);
+                       break;
+
+               case MTHCA_EVENT_TYPE_CQ_ERROR:
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("CQ %s on CQN %06x (syndrome %d)\n",
+                                  eqe->event.cq_err.syndrome == 1 ?
+                                  "overrun" : "access violation",
+                                  cl_ntoh32(eqe->event.cq_err.cqn) & 0xffffff, eqe->event.cq_err.syndrome));
+                       mthca_cq_event(dev, cl_ntoh32(eqe->event.cq_err.cqn),
+                               IB_EVENT_CQ_ERR);
+                       break;
+
+               case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("EQ overrun on EQN %d\n", eq->eqn));
+                       break;
+
+               case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
+               case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
+               case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
+               case MTHCA_EVENT_TYPE_ECC_DETECT:
+               default:
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("Unhandled event %02x(%02x) on EQ %d\n",
+                                  eqe->type, eqe->subtype, eq->eqn));
+                       break;
+               };
+
+               set_eqe_hw(eqe);
+               ++eq->cons_index;
+               eqes_found = 1;
+               ++set_ci;
+
+               /*
+                * The HCA will think the queue has overflowed if we
+                * don't tell it we've been processing events.  We
+                * create our EQs with MTHCA_NUM_SPARE_EQE extra
+                * entries, so we must update our consumer index at
+                * least that often.
+                */
+               if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
+                       /*
+                        * Conditional on hca_type is OK here because
+                        * this is a rare case, not the fast path.
+                        */
+                       set_eq_ci(dev, eq, eq->cons_index);
+                       set_ci = 0;
+               }
+               eqe = next_eqe_sw(eq);
+       }
+
+       /*
+        * Rely on caller to set consumer index so that we don't have
+        * to test hca_type in our interrupt handling fast path.
+        */
+       return eqes_found;
+}
+
+static void mthca_tavor_dpc( PRKDPC dpc, 
+       PVOID ctx, PVOID arg1, PVOID arg2 )
+{
+       struct mthca_eq  *eq  = ctx;
+       struct mthca_dev *dev = eq->dev;
+       SPIN_LOCK_PREP(lh);
+
+       UNREFERENCED_PARAMETER(dpc);
+       UNREFERENCED_PARAMETER(arg1);
+       UNREFERENCED_PARAMETER(arg2);
+
+       spin_lock_dpc(&eq->lock, &lh);
+
+       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */
+       if (mthca_eq_int(dev, eq)) {
+               tavor_set_eq_ci(dev, eq, eq->cons_index);
+               tavor_eq_req_not(dev, eq->eqn);
+       }
+
+       spin_unlock_dpc(&lh);
+}
+
+static BOOLEAN mthca_tavor_interrupt(
+       PKINTERRUPT     int_obj, 
+       PVOID                           ctx
+       )
+{
+       struct mthca_dev *dev = ctx;
+       u32 ecr;
+       int i;
+
+       UNREFERENCED_PARAMETER(int_obj);
+
+       if (dev->eq_table.clr_mask)
+               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
+
+       ecr = readl((u8*)dev->eq_regs.tavor.ecr_base + 4);
+       if (!ecr)
+               return FALSE;
+
+       writel(ecr, (u8*)dev->eq_regs.tavor.ecr_base +
+              MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+
+       for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+               if (ecr & dev->eq_table.eq[i].eqn_mask &&
+                   next_eqe_sw(&dev->eq_table.eq[i])) {
+                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);
+               }
+       }
+
+       return TRUE;
+}
+
+#ifdef MSI_SUPPORT
+static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,
+                                        struct pt_regs *regs)
+{
+       struct mthca_eq  *eq  = eq_ptr;
+       struct mthca_dev *dev = eq->dev;
+
+       mthca_eq_int(dev, eq);
+       tavor_set_eq_ci(dev, eq, eq->cons_index);
+       tavor_eq_req_not(dev, eq->eqn);
+
+       /* MSI-X vectors always belong to us */
+       return IRQ_HANDLED;
+}
+#endif
+
+static void mthca_arbel_dpc( PRKDPC dpc, 
+       PVOID ctx, PVOID arg1, PVOID arg2 )
+{
+       struct mthca_eq  *eq  = ctx;
+       struct mthca_dev *dev = eq->dev;
+       SPIN_LOCK_PREP(lh);
+
+       UNREFERENCED_PARAMETER(dpc);
+       UNREFERENCED_PARAMETER(arg1);
+       UNREFERENCED_PARAMETER(arg2);
+
+       spin_lock_dpc(&eq->lock, &lh);
+
+       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */
+       if (mthca_eq_int(dev, eq))
+               arbel_set_eq_ci(dev, eq, eq->cons_index);
+       arbel_eq_req_not(dev, eq->eqn_mask);
+
+       spin_unlock_dpc(&lh);
+}
+
+static BOOLEAN mthca_arbel_interrupt(
+       PKINTERRUPT     int_obj, 
+       PVOID                           ctx
+       )
+{
+       struct mthca_dev *dev = ctx;
+       int work = 0;
+       int i;
+
+       UNREFERENCED_PARAMETER(int_obj);
+
+       if (dev->eq_table.clr_mask)
+               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
+
+       for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+               if (next_eqe_sw( &dev->eq_table.eq[i]) ) {
+                       work = 1;
+                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);
+               }
+       }
+
+       return (BOOLEAN)work;
+}
+
+#ifdef MSI_SUPPORT
+static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr,
+                                              struct pt_regs *regs)
+{
+       struct mthca_eq  *eq  = eq_ptr;
+       struct mthca_dev *dev = eq->dev;
+
+       mthca_eq_int(dev, eq);
+       arbel_set_eq_ci(dev, eq, eq->cons_index);
+       arbel_eq_req_not(dev, eq->eqn_mask);
+
+       /* MSI-X vectors always belong to us */
+       return IRQ_HANDLED;
+}
+#endif
+
+static int mthca_create_eq(struct mthca_dev *dev,
+                                    int nent,
+                                    u8 intr,
+                                    struct mthca_eq *eq)
+{
+       int npages;
+       u64 *dma_list = NULL;
+       struct mthca_mailbox *mailbox;
+       struct mthca_eq_context *eq_context;
+       int err = -ENOMEM;
+       int i;
+       u8 status;
+       
+       HCA_ENTER(HCA_DBG_INIT);
+       eq->dev  = dev; 
+       eq->nent = roundup_pow_of_two(max(nent, 2));
+       npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
+
+       eq->page_list = kmalloc(npages * sizeof *eq->page_list,
+                               GFP_KERNEL);
+       if (!eq->page_list)
+               goto err_out;
+
+       for (i = 0; i < npages; ++i)
+               eq->page_list[i].page = NULL;
+
+       dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+       if (!dma_list)
+               goto err_out_free;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               goto err_out_free;
+       eq_context = mailbox->buf;
+
+       for (i = 0; i < npages; ++i) {
+               alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &eq->page_list[i]);
+               if (!eq->page_list[i].page)
+                       goto err_out_free_pages;
+               dma_list[i] = eq->page_list[i].dma_address;
+       }
+
+       for (i = 0; i < eq->nent; ++i)
+               set_eqe_hw(get_eqe(eq, i));
+
+       eq->eqn = mthca_alloc(&dev->eq_table.alloc);
+       if (eq->eqn == -1)
+               goto err_out_free_pages;
+
+       err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
+                                 dma_list, PAGE_SHIFT, npages,
+                                 0, npages * PAGE_SIZE,
+                                 MTHCA_MPT_FLAG_LOCAL_WRITE |
+                                 MTHCA_MPT_FLAG_LOCAL_READ,
+                                 &eq->mr);
+       if (err)
+               goto err_out_free_eq;
+
+       RtlZeroMemory(eq_context, sizeof *eq_context);
+       eq_context->flags           = cl_hton32(MTHCA_EQ_STATUS_OK   |
+                                                 MTHCA_EQ_OWNER_HW    |
+                                                 MTHCA_EQ_STATE_ARMED |
+                                                 MTHCA_EQ_FLAG_TR);
+       if (mthca_is_memfree(dev))
+               eq_context->flags  |= cl_hton32(MTHCA_EQ_STATE_ARBEL);
+
+       eq_context->logsize_usrpage = cl_hton32((ffs(eq->nent) - 1) << 24);
+       if (mthca_is_memfree(dev)) {
+               eq_context->arbel_pd = cl_hton32(dev->driver_pd.pd_num);
+       } else {
+               eq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);
+               eq_context->tavor_pd         = cl_hton32(dev->driver_pd.pd_num);
+       }
+       eq_context->intr            = intr;
+       eq_context->lkey            = cl_hton32(eq->mr.ibmr.lkey);
+
+       err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_EQ failed (%d)\n", err));
+               goto err_out_free_mr;
+       }
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_EQ returned status 0x%02x\n",
+                          status));
+               err = -EINVAL;
+               goto err_out_free_mr;
+       }
+
+       kfree(dma_list);
+       mthca_free_mailbox(dev, mailbox);
+
+       eq->eqn_mask   = _byteswap_ulong(1 << eq->eqn);
+       eq->cons_index = 0;
+
+       dev->eq_table.arm_mask |= eq->eqn_mask;
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_INIT ,("Allocated EQ %d with %d entries\n",
+                 eq->eqn, eq->nent));
+
+       HCA_EXIT(HCA_DBG_INIT);
+       return err;
+
+ err_out_free_mr:
+       mthca_free_mr(dev, &eq->mr);
+
+ err_out_free_eq:
+       mthca_free(&dev->eq_table.alloc, eq->eqn);
+
+ err_out_free_pages:
+       for (i = 0; i < npages; ++i) {
+               if (eq->page_list[i].page) {
+                       free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+       mthca_free_mailbox(dev, mailbox);
+
+ err_out_free:
+       kfree(eq->page_list);
+       kfree(dma_list);
+
+ err_out:
+       HCA_EXIT(HCA_DBG_INIT);
+       return err;
+}
+
+static void mthca_free_eq(struct mthca_dev *dev,
+                         struct mthca_eq *eq)
+{
+       struct mthca_mailbox *mailbox;
+       int err;
+       u8 status;
+       int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
+               PAGE_SIZE;
+       int i;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return;
+
+       err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
+       if (err)
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ failed (%d)\n", err));
+       if (status)
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ returned status 0x%02x\n", status));
+
+       dev->eq_table.arm_mask &= ~eq->eqn_mask;
+
+       #if 0
+       {
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Dumping EQ context %02x:\n", eq->eqn));
+               for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
+                       if (i % 4 == 0)
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("[%02x] ", i * 4));
+                       HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,(" %08x", cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4))));
+                       if ((i + 1) % 4 == 0)
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("\n"));
+               }
+       }
+       #endif
+
+       mthca_free_mr(dev, &eq->mr);
+       for (i = 0; i < npages; ++i) {
+               free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);
+       }
+
+       kfree(eq->page_list);
+       mthca_free_mailbox(dev, mailbox);
+}
+
+static void mthca_free_irqs(struct mthca_dev *dev)
+{
+       if (dev->eq_table.have_irq)
+               free_irq(dev->ext->int_obj);
+#ifdef MSI_SUPPORT     
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)
+               if (dev->eq_table.eq[i].have_irq)
+                       free_irq(dev->eq_table.eq[i].msi_x_vector,
+                                dev->eq_table.eq + i);
+#endif         
+}
+
+static int mthca_map_reg(struct mthca_dev *dev,
+                                  u64 offset, unsigned long size,
+                                  void __iomem **map, SIZE_T *map_size)
+{
+       u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);
+       *map = ioremap(base + offset, size, map_size);
+       if (!*map) 
+               return -ENOMEM;
+       return 0;
+}
+
+static void mthca_unmap_reg(struct mthca_dev *dev, u64 offset,
+                           unsigned long size, void __iomem *map, SIZE_T map_size)
+{
+       UNREFERENCED_PARAMETER(dev);
+       UNREFERENCED_PARAMETER(size);
+       UNREFERENCED_PARAMETER(offset);
+       iounmap(map, map_size);
+}
+
+static int mthca_map_eq_regs(struct mthca_dev *dev)
+{
+       u64 mthca_base;
+
+       mthca_base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);
+
+       if (mthca_is_memfree(dev)) {
+               /*
+                * We assume that the EQ arm and EQ set CI registers
+                * fall within the first BAR.  We can't trust the
+                * values firmware gives us, since those addresses are
+                * valid on the HCA's side of the PCI bus but not
+                * necessarily the host side.
+                */
+               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                                 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
+                                 &dev->clr_base, &dev->clr_base_size)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "
+                                 "aborting.\n"));
+                       return -ENOMEM;
+               }
+
+               /*
+                * Add 4 because we limit ourselves to EQs 0 ... 31,
+                * so we only need the low word of the register.
+                */
+               if (mthca_map_reg(dev, ((pci_resource_len(dev, 0) - 1) &
+                                       dev->fw.arbel.eq_arm_base) + 4, 4,
+                                 &dev->eq_regs.arbel.eq_arm, &dev->eq_regs.arbel.eq_arm_size)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ arm register, aborting.\n"));
+                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
+                                       dev->clr_base, dev->clr_base_size);
+                       return -ENOMEM;
+               }
+
+               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                                 dev->fw.arbel.eq_set_ci_base,
+                                 MTHCA_EQ_SET_CI_SIZE,
+                                 &dev->eq_regs.arbel.eq_set_ci_base,
+                                 &dev->eq_regs.arbel.eq_set_ci_base_size
+                                 )) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ CI register, aborting.\n"));
+                       mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &
+                                             dev->fw.arbel.eq_arm_base) + 4, 4,
+                                       dev->eq_regs.arbel.eq_arm, dev->eq_regs.arbel.eq_arm_size);
+                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
+                                       dev->clr_base, dev->clr_base_size);
+                       return -ENOMEM;
+               }
+       } else {
+               if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
+                                 &dev->clr_base, &dev->clr_base_size)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "
+                                 "aborting.\n"));
+                       return -ENOMEM;
+               }
+
+               if (mthca_map_reg(dev, MTHCA_ECR_BASE,
+                                 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
+                                 &dev->eq_regs.tavor.ecr_base,  &dev->eq_regs.tavor.ecr_base_size)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map ecr register, "
+                                 "aborting.\n"));
+                       mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
+                                       dev->clr_base, dev->clr_base_size);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+
+}
+
+static void mthca_unmap_eq_regs(struct mthca_dev *dev)
+{
+       if (mthca_is_memfree(dev)) {
+               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                               dev->fw.arbel.eq_set_ci_base,
+                               MTHCA_EQ_SET_CI_SIZE,
+                               dev->eq_regs.arbel.eq_set_ci_base, 
+                               dev->eq_regs.arbel.eq_set_ci_base_size);
+               mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &
+                               dev->fw.arbel.eq_arm_base) + 4, 4,
+                       dev->eq_regs.arbel.eq_arm,
+                       dev->eq_regs.arbel.eq_arm_size);
+               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
+                               dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
+                               dev->clr_base, dev->clr_base_size);
+       } else {
+               mthca_unmap_reg(dev, MTHCA_ECR_BASE,
+                               MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
+                               dev->eq_regs.tavor.ecr_base, 
+                               dev->eq_regs.tavor.ecr_base_size);
+               mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
+                               dev->clr_base, dev->clr_base_size);
+       }
+}
+
+int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
+{
+       int ret;
+       u8 status;
+
+       /*
+        * We assume that mapping one page is enough for the whole EQ
+        * context table.  This is fine with all current HCAs, because
+        * we only use 32 EQs and each EQ uses 32 bytes of context
+        * memory, or 1 KB total.
+        */
+       dev->eq_table.icm_virt = icm_virt;
+       alloc_dma_zmem_map(dev,PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &dev->eq_table.sg);
+       if (!dev->eq_table.sg.page)
+               return -ENOMEM;
+
+       ret = mthca_MAP_ICM_page(dev, dev->eq_table.sg.dma_address, icm_virt, &status);
+       if (!ret && status)
+               ret = -EINVAL;
+       if (ret) 
+               free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );
+
+       return ret;
+}
+
+void mthca_unmap_eq_icm(struct mthca_dev *dev)
+{
+       u8 status;
+
+       mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
+       free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );
+}
+
+int mthca_init_eq_table(struct mthca_dev *dev)
+{
+       int err;
+       u8 status;
+       u8 intr;
+       int i;
+       
+       HCA_ENTER(HCA_DBG_INIT);
+       err = mthca_alloc_init(&dev->eq_table.alloc,
+                              dev->limits.num_eqs,
+                              dev->limits.num_eqs - 1,
+                              dev->limits.reserved_eqs);
+       if (err)
+               return err;
+
+       err = mthca_map_eq_regs(dev);
+       if (err)
+               goto err_out_free;
+
+#ifdef MSI_SUPPORT
+       if (dev->mthca_flags & MTHCA_FLAG_MSI ||
+           dev->mthca_flags & MTHCA_FLAG_MSI_X) {
+               dev->eq_table.clr_mask = 0;
+       } else
+#endif 
+       {
+               dev->eq_table.clr_mask =
+                       _byteswap_ulong(1 << (dev->eq_table.inta_pin & 31));
+               dev->eq_table.clr_int  = dev->clr_base +
+                       (dev->eq_table.inta_pin < 32 ? 4 : 0);
+       }
+
+       dev->eq_table.arm_mask = 0;
+
+       intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
+               128 : dev->eq_table.inta_pin;
+
+       err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
+                             &dev->eq_table.eq[MTHCA_EQ_COMP]);
+       if (err)
+               goto err_out_unmap;
+
+       err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
+                             &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
+       if (err)
+               goto err_out_comp;
+
+       err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
+                             &dev->eq_table.eq[MTHCA_EQ_CMD]);
+       if (err)
+               goto err_out_async;
+
+#ifdef MSI_SUPPORT
+       if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
+               static const char *eq_name[] = {
+                       [MTHCA_EQ_COMP]  = DRV_NAME " (comp)",
+                       [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
+                       [MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"
+               };
+
+               for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+                       err = request_irq(dev->eq_table.eq[i].msi_x_vector,
+                                         mthca_is_memfree(dev) ?
+                                         mthca_arbel_msi_x_interrupt :
+                                         mthca_tavor_msi_x_interrupt,
+                                         0, eq_name[i], dev->eq_table.eq + i);
+                       if (err)
+                               goto err_out_cmd;
+                       dev->eq_table.eq[i].have_irq = 1;
+                       /* init DPC stuff something like that */
+                       spin_lock_init( &dev->eq_table.eq[i].lock );    
+                       KeInitializeDpc(
+                               &dev->eq_table.eq[i].dpc,
+                               mthca_is_memfree(dev) ?
+                                       mthca_arbel_msi_x_dpc :
+                                       mthca_tavor_msi_x_dpc,
+                               dev->eq_table.eq + i);
+               }
+       } else 
+#endif 
+       {
+               spin_lock_init( &dev->ext->isr_lock );  
+               err = request_irq(
+                       &dev->ext->interruptInfo,
+                       &dev->ext->isr_lock.lock        ,
+                       mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt,
+                       dev,
+                       &dev->ext->int_obj
+                 );
+               if (err)
+                       goto err_out_cmd;
+               dev->eq_table.have_irq = 1;
+
+               /* init DPC stuff */
+               for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+                       spin_lock_init( &dev->eq_table.eq[i].lock );    
+                       KeInitializeDpc(
+                               &dev->eq_table.eq[i].dpc,
+                               mthca_is_memfree(dev) ?
+                                       mthca_arbel_dpc :
+                                       mthca_tavor_dpc,
+                               dev->eq_table.eq + i);
+               }
+       }
+
+       err = mthca_MAP_EQ(dev, async_mask(dev),
+                          0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+       if (err)
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for async EQ %d failed (%d)\n",
+                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err));
+       if (status)
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for async EQ %d returned status 0x%02x\n",
+                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status));
+       err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
+                          0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+       if (err)
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for cmd EQ %d failed (%d)\n",
+                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err));
+       if (status)
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for cmd EQ %d returned status 0x%02x\n",
+                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status));
+
+       for (i = 0; i < MTHCA_EQ_CMD; ++i)
+               if (mthca_is_memfree(dev))
+                       arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
+               else
+                       tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
+
+       return 0;
+
+err_out_cmd:
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
+
+err_out_async:
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
+
+err_out_comp:
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
+
+err_out_unmap:
+       mthca_unmap_eq_regs(dev);
+
+err_out_free:
+       mthca_alloc_cleanup(&dev->eq_table.alloc);
+       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("mthca_init_eq  failed %d",err));
+       return err;
+}
+
+void mthca_cleanup_eq_table(struct mthca_dev *dev)
+{
+       u8 status;
+       int i;
+
+       mthca_free_irqs(dev);
+
+       mthca_MAP_EQ(dev, async_mask(dev),
+                    1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+       mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
+                    1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)
+               mthca_free_eq(dev, &dev->eq_table.eq[i]);
+
+       mthca_unmap_eq_regs(dev);
+
+       mthca_alloc_cleanup(&dev->eq_table.alloc);
+}
+
+
diff --git a/trunk/hw/mthca/kernel/mthca_log.c b/trunk/hw/mthca/kernel/mthca_log.c
new file mode 100644 (file)
index 0000000..46825ff
--- /dev/null
@@ -0,0 +1,230 @@
+/*\r
+ * Copyright (c) 2005 Mellanox Technologies LTD.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ */\r
+\r
+// Author: Yossi Leybovich \r
+\r
+#include "hca_driver.h"\r
+\r
+\r
+VOID
+WriteEventLogEntry(
+       PVOID   pi_pIoObject,
+       ULONG   pi_ErrorCode,
+       ULONG   pi_UniqueErrorCode,
+       ULONG   pi_FinalStatus,
+       ULONG   pi_nDataItems,
+       ...
+       )
+/*++
+
+Routine Description:
+    Writes an event log entry to the event log.
+
+Arguments:
+
+       pi_pIoObject......... The IO object ( driver object or device object ).
+       pi_ErrorCode......... The error code.
+       pi_UniqueErrorCode... A specific error code.
+       pi_FinalStatus....... The final status.
+       pi_nDataItems........ Number of data items.
+       .
+       . data items values
+       .
+
+Return Value:
+
+       None .
+
+--*/
+{ /* WriteEventLogEntry */
+
+       /* Variable argument list */    
+       va_list                                 l_Argptr;
+       /* Pointer to an error log entry */
+       PIO_ERROR_LOG_PACKET    l_pErrorLogEntry; 
+
+       /* Init the variable argument list */   
+       va_start(l_Argptr, pi_nDataItems);
+
+       /* Allocate an error log entry */ 
+    l_pErrorLogEntry = 
+       (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
+                                                               pi_pIoObject,
+                                                               (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG))
+                                                               ); 
+       /* Check allocation */
+    if ( l_pErrorLogEntry != NULL) 
+       { /* OK */
+
+               /* Data item index */
+               USHORT l_nDataItem ;
+
+        /* Set the error log entry header */
+               l_pErrorLogEntry->ErrorCode                     = pi_ErrorCode; 
+        l_pErrorLogEntry->DumpDataSize         = (USHORT) (pi_nDataItems*sizeof(ULONG)); 
+        l_pErrorLogEntry->SequenceNumber       = 0; 
+        l_pErrorLogEntry->MajorFunctionCode = 0; 
+        l_pErrorLogEntry->IoControlCode                = 0; 
+        l_pErrorLogEntry->RetryCount           = 0; 
+        l_pErrorLogEntry->UniqueErrorValue     = pi_UniqueErrorCode; 
+        l_pErrorLogEntry->FinalStatus          = pi_FinalStatus; 
+
+        /* Insert the data items */
+               for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++) 
+               { /* Inset a data item */
+
+                       /* Current data item */
+                       int l_CurDataItem ;
+                               
+                       /* Get next data item */
+                       l_CurDataItem = va_arg( l_Argptr, int);
+
+            /* Put it into the data array */
+                       l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
+
+               } /* Inset a data item */
+
+        /* Write the packet */
+               IoWriteErrorLogEntry(l_pErrorLogEntry);
+
+    } /* OK */
+
+       /* Term the variable argument list */   
+       va_end(l_Argptr);
+
+} /* WriteEventLogEntry */
+
+/*------------------------------------------------------------------------------------------------------*/
+
+VOID
+WriteEventLogEntryStr(
+       PVOID   pi_pIoObject,
+       ULONG   pi_ErrorCode,
+       ULONG   pi_UniqueErrorCode,
+       ULONG   pi_FinalStatus,
+       PWCHAR pi_InsertionStr,
+       ULONG   pi_nDataItems,
+       ...
+       )
+/*++
+
+Routine Description:
+    Writes an event log entry to the event log.
+
+Arguments:
+
+       pi_pIoObject......... The IO object ( driver object or device object ).
+       pi_ErrorCode......... The error code.
+       pi_UniqueErrorCode... A specific error code.
+       pi_FinalStatus....... The final status.
+       pi_nDataItems........ Number of data items.
+       .
+       . data items values
+       .
+
+Return Value:
+
+       None .
+
+--*/
+{ /* WriteEventLogEntryStr */
+
+       /* Variable argument list */    
+       va_list                                 l_Argptr;
+       /* Pointer to an error log entry */
+       PIO_ERROR_LOG_PACKET    l_pErrorLogEntry; 
+       /* sizeof insertion string */
+       int     l_Size = (int)((pi_InsertionStr) ? ((wcslen(pi_InsertionStr) + 1) * sizeof( WCHAR )) : 0);\r
+
+       /* Init the variable argument list */   
+       va_start(l_Argptr, pi_nDataItems);
+
+       /* Allocate an error log entry */ 
+    l_pErrorLogEntry = 
+       (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(
+                                                               pi_pIoObject,
+                                                               (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG)+l_Size)
+                                                               ); 
+       /* Check allocation */
+    if ( l_pErrorLogEntry != NULL) 
+       { /* OK */
+
+               /* Data item index */
+               USHORT l_nDataItem ;
+
+        /* Set the error log entry header */
+               l_pErrorLogEntry->ErrorCode                     = pi_ErrorCode; 
+        l_pErrorLogEntry->DumpDataSize         = (USHORT) (pi_nDataItems*sizeof(ULONG)); 
+        l_pErrorLogEntry->SequenceNumber       = 0; 
+        l_pErrorLogEntry->MajorFunctionCode = 0; 
+        l_pErrorLogEntry->IoControlCode                = 0; 
+        l_pErrorLogEntry->RetryCount           = 0; 
+        l_pErrorLogEntry->UniqueErrorValue     = pi_UniqueErrorCode; 
+        l_pErrorLogEntry->FinalStatus          = pi_FinalStatus; 
+
+        /* Insert the data items */
+               for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++) 
+               { /* Inset a data item */
+
+                       /* Current data item */
+                       int l_CurDataItem ;
+                               
+                       /* Get next data item */
+                       l_CurDataItem = va_arg( l_Argptr, int);
+
+            /* Put it into the data array */
+                       l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ;
+
+               } /* Inset a data item */
+
+               /* add insertion string */
+               if (pi_InsertionStr) {
+                       char *ptr; 
+                       l_pErrorLogEntry->NumberOfStrings = 1;
+                       l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize;
+                       ptr = (char*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset;
+                       memcpy( ptr, pi_InsertionStr, l_Size );
+               }
+               
+        /* Write the packet */
+               IoWriteErrorLogEntry(l_pErrorLogEntry);
+
+    } /* OK */
+
+       /* Term the variable argument list */   
+       va_end(l_Argptr);
+
+} /* WriteEventLogEntry */
+\r
+\r
+\r
+\r
+\r
+\r
diff --git a/trunk/hw/mthca/kernel/mthca_log.mc b/trunk/hw/mthca/kernel/mthca_log.mc
new file mode 100644 (file)
index 0000000..08cbdda
--- /dev/null
@@ -0,0 +1,56 @@
+;/*++\r
+;=============================================================================\r
+;Copyright (c) 2001 Mellanox Technologies\r
+;\r
+;Module Name:\r
+;\r
+;    mthcalog.mc\r
+;\r
+;Abstract:\r
+;\r
+;    MTHCA Driver event log messages\r
+;\r
+;Authors:\r
+;\r
+;    Yossi Leybovich\r
+;\r
+;Environment:\r
+;\r
+;   Kernel Mode .\r
+;\r
+;=============================================================================\r
+;--*/\r
+;\r
+MessageIdTypedef = NTSTATUS\r
+\r
+SeverityNames = (\r
+       Success                 = 0x0:STATUS_SEVERITY_SUCCESS\r
+       Informational   = 0x1:STATUS_SEVERITY_INFORMATIONAL\r
+       Warning                 = 0x2:STATUS_SEVERITY_WARNING\r
+       Error                   = 0x3:STATUS_SEVERITY_ERROR\r
+       )\r
+\r
+FacilityNames = (\r
+       System                  = 0x0\r
+       RpcRuntime              = 0x2:FACILITY_RPC_RUNTIME\r
+       RpcStubs                = 0x3:FACILITY_RPC_STUBS\r
+       Io                              = 0x4:FACILITY_IO_ERROR_CODE\r
+       MTHCA                   = 0x7:FACILITY_MTHCA_ERROR_CODE\r
+       )\r
+\r
+\r
+MessageId=0x0001 Facility=MTHCA Severity=Informational SymbolicName=EVENT_MTHCA_ANY_INFO\r
+Language=English\r
+%2\r
+.\r
+\r
+MessageId=0x0002 Facility=MTHCA Severity=Warning SymbolicName=EVENT_MTHCA_ANY_WARN\r
+Language=English\r
+%2\r
+.\r
+\r
+MessageId=0x0003 Facility=MTHCA Severity=Error SymbolicName=EVENT_MTHCA_ANY_ERROR\r
+Language=English\r
+%2\r
+.\r
+\r
diff --git a/trunk/hw/mthca/kernel/mthca_log.rc b/trunk/hw/mthca/kernel/mthca_log.rc
new file mode 100644 (file)
index 0000000..116522b
--- /dev/null
@@ -0,0 +1,2 @@
+LANGUAGE 0x9,0x1\r
+1 11 MSG00001.bin\r
diff --git a/trunk/hw/mthca/kernel/mthca_mad.c b/trunk/hw/mthca/kernel/mthca_mad.c
new file mode 100644 (file)
index 0000000..3425d68
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_mad.c 2928 2005-07-28 18:45:56Z sean.hefty $
+ */
+
+#include <ib_verbs.h>
+#include <ib_mad.h>
+#include <ib_smi.h>
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_mad.tmh"
+#endif
+#include "mthca_cmd.h"
+
+enum {
+       MTHCA_VENDOR_CLASS1 = 0x9,
+       MTHCA_VENDOR_CLASS2 = 0xa
+};
+
+struct mthca_trap_mad {
+       struct scatterlist sg;
+};
+
+static void update_sm_ah(struct mthca_dev *dev,
+                        u8 port_num, u16 lid, u8 sl)
+{
+       struct ib_ah *new_ah;
+       struct ib_ah_attr ah_attr;
+       SPIN_LOCK_PREP(lh);
+
+       if (!dev->send_agent[port_num - 1][0])
+               return;
+
+       RtlZeroMemory(&ah_attr, sizeof ah_attr);
+       ah_attr.dlid     = lid;
+       ah_attr.sl       = sl;
+       ah_attr.port_num = port_num;
+
+       new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
+                             &ah_attr, NULL, NULL);
+       if (IS_ERR(new_ah))
+               return;
+
+       spin_lock_irqsave(&dev->sm_lock, &lh);
+       if (dev->sm_ah[port_num - 1]) {
+               ibv_destroy_ah(dev->sm_ah[port_num - 1]);
+       }
+       dev->sm_ah[port_num - 1] = new_ah;
+       spin_unlock_irqrestore(&lh);
+}
+
+/*
+ * Snoop SM MADs for port info and P_Key table sets, so we can
+ * synthesize LID change and P_Key change events.
+ */
+static void smp_snoop(struct ib_device *ibdev,
+                     u8 port_num,
+                     struct ib_mad *mad)
+{
+       struct ib_event event;
+
+       if ((mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+            mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+           mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {
+               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
+                       update_sm_ah(to_mdev(ibdev), port_num,
+                                    cl_ntoh16(*(__be16 *) (mad->data + 58)),
+                                    (*(u8 *) (mad->data + 76)) & 0xf);
+
+                       event.device           = ibdev;
+                       event.event            = IB_EVENT_LID_CHANGE;
+                       event.element.port_num = port_num;
+                       ib_dispatch_event(&event);
+               }
+
+               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
+                       event.device           = ibdev;
+                       event.event            = IB_EVENT_PKEY_CHANGE;
+                       event.element.port_num = port_num;
+                       ib_dispatch_event(&event);
+               }
+       }
+}
+
+static void forward_trap(struct mthca_dev *dev,
+                        u8 port_num,
+                        struct ib_mad *mad)
+{
+       int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
+       struct mthca_trap_mad *tmad;
+       struct ib_sge      gather_list;
+       struct _ib_send_wr wr;
+       struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
+       int ret;
+       SPIN_LOCK_PREP(lh);
+
+       /* fill the template */
+       wr.ds_array = (ib_local_ds_t* __ptr64)(void*)&gather_list;
+       wr.num_ds = 1;
+       wr.wr_type = WR_SEND;
+       wr.send_opt = IB_SEND_OPT_SIGNALED;
+       wr.dgrm.ud.remote_qp = cl_hton32(qpn);
+       wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0;
+       
+       if (agent) {
+               tmad = kmalloc(sizeof *tmad, GFP_KERNEL);
+               if (!tmad)
+                       return;
+
+               alloc_dma_zmem(dev, sizeof *mad, &tmad->sg);
+               if (!tmad->sg.page) {
+                       kfree(tmad);
+                       return;
+               }
+
+               memcpy(tmad->sg.page, mad, sizeof *mad);
+
+               wr.dgrm.ud.rsvd = (void* __ptr64)&((struct ib_mad *)tmad->sg.page)->mad_hdr;
+               wr.wr_id         = (u64)(ULONG_PTR)tmad;
+               gather_list.addr   = tmad->sg.dma_address;
+               gather_list.length = tmad->sg.length;
+               gather_list.lkey   = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
+
+               /*
+                * We rely here on the fact that MLX QPs don't use the
+                * address handle after the send is posted (this is
+                * wrong following the IB spec strictly, but we know
+                * it's OK for our devices).
+                */
+               spin_lock_irqsave(&dev->sm_lock, &lh);
+               wr.dgrm.ud.h_av      = (ib_av_handle_t)dev->sm_ah[port_num - 1];
+               if (wr.dgrm.ud.h_av) {
+                               HCA_PRINT( TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,(" forward_trap: ib_post_send_mad not ported \n" ));
+                               ret = -EINVAL;
+               }
+               else
+                       ret = -EINVAL;
+               spin_unlock_irqrestore(&lh);
+
+               if (ret) {
+                       free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
+                       kfree(tmad);
+               }
+       }
+}
+
+int mthca_process_mad(struct ib_device *ibdev,
+                     int mad_flags,
+                     u8 port_num,
+                     struct _ib_wc *in_wc,
+                     struct ib_grh *in_grh,
+                     struct ib_mad *in_mad,
+                     struct ib_mad *out_mad)
+{
+       int err;
+       u8 status;
+       u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE);
+
+
+#if 0
+       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW,("mthca_process_mad: \n\tin: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n"),
+               (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, 
+               (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, 
+               (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid ));
+#endif
+
+       /* Forward locally generated traps to the SM */
+       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
+           slid == 0) {
+               forward_trap(to_mdev(ibdev), port_num, in_mad);
+               HCA_PRINT( TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW   ,("mthca_process_mad: Not sent, but locally forwarded\n"));
+               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+       }
+
+       /*
+        * Only handle SM gets, sets and trap represses for SM class
+        *
+        * Only handle PMA and Mellanox vendor-specific class gets and
+        * sets for other classes.
+        */
+       if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+           in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+
+               if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
+                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
+                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS) {
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW   ,("mthca_process_mad: Skip some methods. Nothing done !\n"));
+                       return IB_MAD_RESULT_SUCCESS;
+               }
+
+               /*
+                * Don't process SMInfo queries or vendor-specific
+                * MADs -- the SMA can't handle them.
+                */
+               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+                   ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+                    IB_SMP_ATTR_VENDOR_MASK)) {
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW   ,("mthca_process_mad: Skip SMInfo queries or vendor-specific MADs. Nothing done !\n"));
+                       return IB_MAD_RESULT_SUCCESS;
+               }
+       } 
+       else {
+               if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
+                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+
+                       if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
+                           in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET) {
+                               HCA_PRINT( TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW  ,("mthca_process_mad: Skip some management methods. Nothing done !\n"));
+                               return IB_MAD_RESULT_SUCCESS;
+                       }
+               } 
+               else {
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW  ,("mthca_process_mad: Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n"));
+                       return IB_MAD_RESULT_SUCCESS;
+               }       
+       }
+
+       // send MAD
+       err = mthca_MAD_IFC(to_mdev(ibdev),
+                           mad_flags & IB_MAD_IGNORE_MKEY,
+                           mad_flags & IB_MAD_IGNORE_BKEY,
+                           port_num, in_wc, in_grh, in_mad, out_mad,
+                           &status);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MAD_IFC failed\n"));
+               return IB_MAD_RESULT_FAILURE;
+       }
+       if (status == MTHCA_CMD_STAT_BAD_PKT)
+               return IB_MAD_RESULT_SUCCESS;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("mthca_process_mad: MAD_IFC returned status %02x\n", status));
+               return IB_MAD_RESULT_FAILURE;
+       }
+
+       if (!out_mad->mad_hdr.status)
+               smp_snoop(ibdev, port_num, in_mad);
+
+#if 0
+       //NB: excluded, because it is set in the shim. Being set here, it prevents shim MAD cache to work !
+       /* set return bit in status of directed route responses */
+       if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+               out_mad->mad_hdr.status |= cl_hton16(1 << 15);
+#endif 
+
+#if 0
+       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW,("mthca_process_mad: \n\tout: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n",
+               (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, 
+               (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, 
+               (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid,
+               (u32)in_mad->mad_hdr.status ));
+#endif
+
+       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) {
+               /* no response for trap repress */
+               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+       }
+
+       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+                        struct ib_mad_send_wc *mad_send_wc)
+{
+       struct mthca_trap_mad *tmad =
+               (void *) (ULONG_PTR) mad_send_wc->wr_id;
+
+       free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
+       kfree(tmad);
+}
diff --git a/trunk/hw/mthca/kernel/mthca_main.c b/trunk/hw/mthca/kernel/mthca_main.c
new file mode 100644 (file)
index 0000000..e231e09
--- /dev/null
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_main.c 3056 2005-08-11 04:27:10Z roland $
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_main.tmh"
+#endif
+#include "mthca_config_reg.h"
+#include "mthca_cmd.h"
+#include "mthca_profile.h"
+#include "mthca_memfree.h"
+
+static const char mthca_version[] =
+       DRV_NAME ": HCA Driver v"
+       DRV_VERSION " (" DRV_RELDATE ")";
+
+static struct mthca_profile default_profile = {
+       1 << 16,                // num_qp
+       4,                                      // rdb_per_qp
+       0,                              // num_srq
+       1 << 16,                // num_cq
+       1 << 13,                // num_mcg
+       1 << 17,                // num_mpt
+       1 << 20,                // num_mtt
+       1 << 15,                // num_udav (Tavor only)
+       0,                                      // num_uar
+       1 << 18,                // uarc_size (Arbel only)
+       1 << 18,                // fmr_reserved_mtts (Tavor only)
+};
+
+/* Types of supported HCA */
+enum __hca_type {
+       TAVOR,                  /* MT23108                        */
+       ARBEL_COMPAT,           /* MT25208 in Tavor compat mode   */
+       ARBEL_NATIVE,           /* MT25208 with extended features */
+       SINAI                   /* MT25204 */
+};
+
+#define MTHCA_FW_VER(major, minor, subminor) \
+       (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
+
+static struct {
+       u64 max_unsupported_fw;
+       u64 min_supported_fw;
+       int is_memfree;
+       int is_pcie;
+} mthca_hca_table[] = {
+       { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 },
+       { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 },
+       { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 },
+       { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 }
+};
+
+
+#define HCA(v, d, t) \
+       { PCI_VENDOR_ID_##v,    PCI_DEVICE_ID_MELLANOX_##d, t }
+
+static struct pci_device_id {
+       unsigned                vendor;
+       unsigned                device;
+       enum __hca_type driver_data;
+} mthca_pci_table[] = {
+       HCA(MELLANOX, TAVOR,        TAVOR),
+       HCA(MELLANOX, ARBEL_COMPAT, ARBEL_COMPAT),
+       HCA(MELLANOX, ARBEL,        ARBEL_NATIVE),
+       HCA(MELLANOX, SINAI_OLD,    SINAI),
+       HCA(MELLANOX, SINAI,        SINAI),
+       HCA(TOPSPIN,  TAVOR,        TAVOR),
+       HCA(TOPSPIN,  ARBEL_COMPAT, TAVOR),
+       HCA(TOPSPIN,  ARBEL,        ARBEL_NATIVE),
+       HCA(TOPSPIN,  SINAI_OLD,    SINAI),
+       HCA(TOPSPIN,  SINAI,        SINAI),
+};
+#define MTHCA_PCI_TABLE_SIZE (sizeof(mthca_pci_table)/sizeof(struct pci_device_id))
+
+// wrapper to driver's hca_tune_pci
+static NTSTATUS mthca_tune_pci(struct mthca_dev *mdev)
+{
+       PDEVICE_OBJECT pdo = mdev->ext->cl_ext.p_self_do;
+       return hca_tune_pci(pdo);
+}
+
+int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id)
+{
+       struct ib_device_attr props;
+       struct ib_device *ib_dev = &mdev->ib_dev;
+       int err = (ib_dev->query_device )(ib_dev, &props );
+
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("can't get guid - mthca_query_port() failed (%08X)\n", err ));
+               return err;
+       }
+
+       //TODO: do we need to convert GUID to LE by cl_ntoh64(x) ?
+       *node_guid = ib_dev->node_guid;
+       *hw_id = props.hw_ver;
+       return 0;
+}
+
+static struct pci_device_id * mthca_find_pci_dev(unsigned ven_id, unsigned dev_id)
+{
+       struct pci_device_id *p_id = mthca_pci_table;
+       int i;
+
+       // find p_id (appropriate line in mthca_pci_table)
+       for (i = 0; i < MTHCA_PCI_TABLE_SIZE; ++i, ++p_id) {
+               if (p_id->device == dev_id && p_id->vendor ==  ven_id)
+                       return p_id;
+       }
+       return NULL;
+}
+
+
+static int  mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
+{
+       int err;
+       u8 status;
+
+       err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_DEV_LIM command failed, aborting.\n"));
+               return err;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_DEV_LIM returned status 0x%02x, "
+                         "aborting.\n", status));
+               return -EINVAL;
+       }
+       if (dev_lim->min_page_sz > PAGE_SIZE) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA minimum page size of %d bigger than "
+                         "kernel PAGE_SIZE of %ld, aborting.\n",
+                         dev_lim->min_page_sz, PAGE_SIZE));
+               return -ENODEV;
+       }
+       if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA has %d ports, but we only support %d, "
+                         "aborting.\n",
+                         dev_lim->num_ports, MTHCA_MAX_PORTS));
+               return -ENODEV;
+       }
+
+       mdev->limits.num_ports          = dev_lim->num_ports;
+       mdev->limits.vl_cap             = dev_lim->max_vl;
+       mdev->limits.mtu_cap            = dev_lim->max_mtu;
+       mdev->limits.gid_table_len      = dev_lim->max_gids;
+       mdev->limits.pkey_table_len     = dev_lim->max_pkeys;
+       mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
+       mdev->limits.max_sg             = dev_lim->max_sg;
+       mdev->limits.max_wqes           = dev_lim->max_qp_sz;
+       mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
+       mdev->limits.reserved_qps       = dev_lim->reserved_qps;
+       mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
+       mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
+       mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
+       mdev->limits.max_desc_sz      = dev_lim->max_desc_sz;
+       /*
+        * Subtract 1 from the limit because we need to allocate a
+        * spare CQE so the HCA HW can tell the difference between an
+        * empty CQ and a full CQ.
+        */
+       mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
+       mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
+       mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
+       mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
+       mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
+       mdev->limits.reserved_uars      = dev_lim->reserved_uars;
+       mdev->limits.reserved_pds       = dev_lim->reserved_pds;
+       mdev->limits.port_width_cap     = (u8)dev_lim->max_port_width;
+       mdev->limits.page_size_cap      = !(u32)(dev_lim->min_page_sz - 1);
+       mdev->limits.flags                              = dev_lim->flags;
+
+       /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
+          May be doable since hardware supports it for SRQ.
+
+          IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
+
+          IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
+          supported by driver. */
+       mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
+               IB_DEVICE_PORT_ACTIVE_EVENT |
+               IB_DEVICE_SYS_IMAGE_GUID |
+               IB_DEVICE_RC_RNR_NAK_GEN;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
+               mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
+               mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
+               mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
+               mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
+               mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
+
+       if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
+               mdev->mthca_flags |= MTHCA_FLAG_SRQ;
+
+       return 0;
+}
+
+static int  mthca_init_tavor(struct mthca_dev *mdev)
+{
+       u8 status;
+       int err;
+       struct mthca_dev_lim        dev_lim;
+       struct mthca_profile        profile;
+       struct mthca_init_hca_param init_hca;
+
+       err = mthca_SYS_EN(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SYS_EN command failed, aborting.\n"));
+               return err;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SYS_EN returned status 0x%02x, "
+                         "aborting.\n", status));
+               return -EINVAL;
+       }
+
+       err = mthca_QUERY_FW(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_FW command failed, aborting.\n"));
+               goto err_disable;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW returned status 0x%02x, "
+                         "aborting.\n", status));
+               err = -EINVAL;
+               goto err_disable;
+       }
+       err = mthca_QUERY_DDR(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_DDR command failed, aborting.\n"));
+               goto err_disable;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,( "QUERY_DDR returned status 0x%02x, "
+                         "aborting.\n", status));
+               err = -EINVAL;
+               goto err_disable;
+       }
+
+       err = mthca_dev_lim(mdev, &dev_lim);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,( "QUERY_DEV_LIM command failed, aborting.\n"));
+               goto err_disable;
+       }
+
+       profile = default_profile;
+       profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
+       profile.uarc_size = 0;
+       if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+               profile.num_srq = dev_lim.max_srqs;
+
+       err = (int)mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
+       if (err < 0)
+               goto err_disable;
+
+       err = (int)mthca_INIT_HCA(mdev, &init_hca, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("INIT_HCA command failed, aborting.\n"));
+               goto err_disable;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA returned status 0x%02x, "
+                         "aborting.\n", status));
+               err = -EINVAL;
+               goto err_disable;
+       }
+
+       return 0;
+
+err_disable:
+       mthca_SYS_DIS(mdev, &status);
+
+       return err;
+}
+
+static int  mthca_load_fw(struct mthca_dev *mdev)
+{
+       u8 status;
+       int err;
+
+       /* FIXME: use HCA-attached memory for FW if present */
+
+       mdev->fw.arbel.fw_icm =
+               mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
+                               GFP_HIGHUSER | __GFP_NOWARN);
+       if (!mdev->fw.arbel.fw_icm) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Couldn't allocate FW area, aborting.\n"));
+               return -ENOMEM;
+       }
+
+       err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MAP_FA command failed, aborting.\n"));
+               goto err_free;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MAP_FA returned status 0x%02x, aborting.\n", status));
+               err = -EINVAL;
+               goto err_free;
+       }
+       err = mthca_RUN_FW(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("RUN_FW command failed, aborting.\n"));
+               goto err_unmap_fa;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("RUN_FW returned status 0x%02x, aborting.\n", status));
+               err = -EINVAL;
+               goto err_unmap_fa;
+       }
+
+       return 0;
+
+err_unmap_fa:
+       mthca_UNMAP_FA(mdev, &status);
+
+err_free:
+       mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+       return err;
+}
+
+static int  mthca_init_icm(struct mthca_dev *mdev,
+                                   struct mthca_dev_lim *dev_lim,
+                                   struct mthca_init_hca_param *init_hca,
+                                   u64 icm_size)
+{
+       u64 aux_pages;
+       u8 status;
+       int err;
+
+       err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("SET_ICM_SIZE command failed, aborting.\n"));
+               return err;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SET_ICM_SIZE returned status 0x%02x, "
+                         "aborting.\n", status));
+               return -EINVAL;
+       }
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , ("%I64d KB of HCA context requires %I64d KB aux memory.\n",
+                 (unsigned long long) icm_size >> 10,
+                 (unsigned long long) aux_pages << 2));
+
+       mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, (int)aux_pages,
+                                                GFP_HIGHUSER | __GFP_NOWARN);
+       if (!mdev->fw.arbel.aux_icm) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Couldn't allocate aux memory, aborting.\n"));
+               return -ENOMEM;
+       }
+
+       err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MAP_ICM_AUX command failed, aborting.\n"));
+               goto err_free_aux;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("MAP_ICM_AUX returned status 0x%02x, aborting.\n", status));
+               err = -EINVAL;
+               goto err_free_aux;
+       }
+
+       err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map EQ context memory, aborting.\n"));
+               goto err_unmap_aux;
+       }
+
+       mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
+                                                        MTHCA_MTT_SEG_SIZE,
+                                                        mdev->limits.num_mtt_segs,
+                                                        mdev->limits.reserved_mtts, 1);
+       if (!mdev->mr_table.mtt_table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map MTT context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_eq;
+       }
+
+       mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
+                                                        dev_lim->mpt_entry_sz,
+                                                        mdev->limits.num_mpts,
+                                                        mdev->limits.reserved_mrws, 1);
+       if (!mdev->mr_table.mpt_table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map MPT context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_mtt;
+       }
+
+       mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
+                                                       dev_lim->qpc_entry_sz,
+                                                       mdev->limits.num_qps,
+                                                       mdev->limits.reserved_qps, 0);
+       if (!mdev->qp_table.qp_table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map QP context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_mpt;
+       }
+
+       mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
+                                                        dev_lim->eqpc_entry_sz,
+                                                        mdev->limits.num_qps,
+                                                        mdev->limits.reserved_qps, 0);
+       if (!mdev->qp_table.eqp_table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map EQP context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_qp;
+       }
+
+       mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
+                                                        MTHCA_RDB_ENTRY_SIZE,
+                                                        mdev->limits.num_qps <<
+                                                        mdev->qp_table.rdb_shift,
+                                                        0, 0);
+       if (!mdev->qp_table.rdb_table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map RDB context memory, aborting\n"));
+               err = -ENOMEM;
+               goto err_unmap_eqp;
+       }
+
+       mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
+                                                   dev_lim->cqc_entry_sz,
+                                                   mdev->limits.num_cqs,
+                                                   mdev->limits.reserved_cqs, 0);
+       if (!mdev->cq_table.table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map CQ context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_rdb;
+       }
+
+       if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
+               mdev->srq_table.table =
+                       mthca_alloc_icm_table(mdev, init_hca->srqc_base,
+                                             dev_lim->srq_entry_sz,
+                                             mdev->limits.num_srqs,
+                                             mdev->limits.reserved_srqs, 0);
+               if (!mdev->srq_table.table) {
+                       HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map SRQ context memory, "
+                                 "aborting.\n"));
+                       err = -ENOMEM;
+                       goto err_unmap_cq;
+               }
+       }
+
+       /*
+        * It's not strictly required, but for simplicity just map the
+        * whole multicast group table now.  The table isn't very big
+        * and it's a lot easier than trying to track ref counts.
+        */
+       mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
+                                                     MTHCA_MGM_ENTRY_SIZE,
+                                                     mdev->limits.num_mgms +
+                                                     mdev->limits.num_amgms,
+                                                     mdev->limits.num_mgms +
+                                                     mdev->limits.num_amgms,
+                                                     0);
+       if (!mdev->mcg_table.table) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to map MCG context memory, aborting.\n"));
+               err = -ENOMEM;
+               goto err_unmap_srq;
+       }
+
+       return 0;
+
+err_unmap_srq:
+       if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+               mthca_free_icm_table(mdev, mdev->srq_table.table);
+
+err_unmap_cq:
+       mthca_free_icm_table(mdev, mdev->cq_table.table);
+
+err_unmap_rdb:
+       mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+
+err_unmap_eqp:
+       mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+
+err_unmap_qp:
+       mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+
+err_unmap_mpt:
+       mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+
+err_unmap_mtt:
+       mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+
+err_unmap_eq:
+       mthca_unmap_eq_icm(mdev);
+
+err_unmap_aux:
+       mthca_UNMAP_ICM_AUX(mdev, &status);
+
+err_free_aux:
+       mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+
+       return err;
+}
+
+static int  mthca_init_arbel(struct mthca_dev *mdev)
+{
+       struct mthca_dev_lim        dev_lim;
+       struct mthca_profile        profile;
+       struct mthca_init_hca_param init_hca;
+       u64 icm_size;
+       u8 status;
+       int err;
+
+       err = mthca_QUERY_FW(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_FW command failed, aborting.\n"));
+               return err;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW returned status 0x%02x, "
+                         "aborting.\n", status));
+               return -EINVAL;
+       }
+
+       err = mthca_ENABLE_LAM(mdev, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("ENABLE_LAM command failed, aborting.\n"));
+               return err;
+       }
+       if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_LOW   ,("No HCA-attached memory (running in MemFree mode)\n"));
+               mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
+       } else if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("ENABLE_LAM returned status 0x%02x, "
+                         "aborting.\n", status));
+               return -EINVAL;
+       }
+
+       err = mthca_load_fw(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to start FW, aborting.\n"));
+               goto err_disable;
+       }
+
+       err = mthca_dev_lim(mdev, &dev_lim);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_DEV_LIM command failed, aborting.\n"));
+               goto err_stop_fw;
+       }
+
+       profile = default_profile;
+       profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
+       profile.num_udav = 0;
+       if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+               profile.num_srq = dev_lim.max_srqs;
+
+       icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
+       if ((int) icm_size < 0) {
+               err = (int)icm_size;
+               goto err_stop_fw;
+       }
+
+       err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
+       if (err)
+               goto err_stop_fw;
+
+       err = mthca_INIT_HCA(mdev, &init_hca, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("INIT_HCA command failed, aborting.\n"));
+               goto err_free_icm;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA returned status 0x%02x, "
+                         "aborting.\n", status));
+               err = -EINVAL;
+               goto err_free_icm;
+       }
+
+       return 0;
+
+err_free_icm:
+       if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+               mthca_free_icm_table(mdev, mdev->srq_table.table);
+       mthca_free_icm_table(mdev, mdev->cq_table.table);
+       mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+       mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+       mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+       mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+       mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+       mthca_unmap_eq_icm(mdev);
+
+       mthca_UNMAP_ICM_AUX(mdev, &status);
+       mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+
+err_stop_fw:
+       mthca_UNMAP_FA(mdev, &status);
+       mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+
+err_disable:
+       if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
+               mthca_DISABLE_LAM(mdev, &status);
+
+       return err;
+}
+
+static void mthca_close_hca(struct mthca_dev *mdev)
+{
+       u8 status;
+
+       mthca_CLOSE_HCA(mdev, 0, &status);
+
+       if (mthca_is_memfree(mdev)) {
+               if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+                       mthca_free_icm_table(mdev, mdev->srq_table.table);
+               mthca_free_icm_table(mdev, mdev->cq_table.table);
+               mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+               mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+               mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+               mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+               mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+               mthca_free_icm_table(mdev, mdev->mcg_table.table);
+               mthca_unmap_eq_icm(mdev);
+
+               mthca_UNMAP_ICM_AUX(mdev, &status);
+               mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+
+               mthca_UNMAP_FA(mdev, &status);
+               mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+
+               if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
+                       mthca_DISABLE_LAM(mdev, &status);
+       } else
+               mthca_SYS_DIS(mdev, &status);
+}
+
+static int  mthca_init_hca(struct mthca_dev *mdev)
+{
+       u8 status;
+       int err;
+       struct mthca_adapter adapter;
+
+       if (mthca_is_memfree(mdev))
+               err = mthca_init_arbel(mdev);
+       else
+               err = mthca_init_tavor(mdev);
+
+       if (err)
+               return err;
+
+       err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("QUERY_ADAPTER command failed, aborting.\n"));
+               goto err_close;
+       }
+       if (status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_ADAPTER returned status 0x%02x, "
+                         "aborting.\n", status));
+               err = -EINVAL;
+               goto err_close;
+       }
+
+       mdev->eq_table.inta_pin = adapter.inta_pin;
+       mdev->rev_id            = adapter.revision_id;
+       memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
+
+       return 0;
+
+err_close:
+       mthca_close_hca(mdev);
+       return err;
+}
+
+static int  mthca_setup_hca(struct mthca_dev *mdev)
+{
+       int err;
+       u8 status;
+
+       MTHCA_INIT_DOORBELL_LOCK(&mdev->doorbell_lock);
+
+       err = mthca_init_uar_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "user access region table, aborting.\n"));
+               return err;
+       }
+
+       err = mthca_uar_alloc(mdev, &mdev->driver_uar);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to allocate driver access region, "
+                         "aborting.\n"));
+               goto err_uar_table_free;
+       }
+
+       mdev->kar = ioremap(mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size);
+       if (!mdev->kar) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map kernel access region, "
+                         "aborting.\n"));
+               err = -ENOMEM;
+               goto err_uar_free;
+       }
+
+       err = mthca_init_pd_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "protection domain table, aborting.\n"));
+               goto err_kar_unmap;
+       }
+
+       err = mthca_init_mr_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "memory region table, aborting.\n"));
+               goto err_pd_table_free;
+       }
+
+       err = mthca_pd_alloc(mdev, 1, &mdev->driver_pd);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to create driver PD, "
+                         "aborting.\n"));
+               goto err_mr_table_free;
+       }
+
+       err = mthca_init_eq_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("Failed to initialize "
+                         "event queue table, aborting.\n"));
+               goto err_pd_free;
+       }
+
+       err = mthca_cmd_use_events(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to switch to event-driven "
+                         "firmware commands, aborting.\n"));
+               goto err_eq_table_free;
+       }
+
+       err = mthca_NOP(mdev, &status);
+       if (err || status) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("NOP command failed to generate interrupt, aborting.\n"));
+               if (mdev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)){
+                       HCA_PRINT_EV(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Try again with MSI/MSI-X disabled.\n"));
+               }else{
+                       HCA_PRINT_EV(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("BIOS or ACPI interrupt routing problem?\n"));
+               }
+
+               goto err_cmd_poll;
+       }
+
+       HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("NOP command IRQ test passed\n"));
+
+       err = mthca_init_cq_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "completion queue table, aborting.\n"));
+               goto err_cmd_poll;
+       }
+
+       err = mthca_init_srq_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "shared receive queue table, aborting.\n"));
+               goto err_cq_table_free;
+       }
+
+       err = mthca_init_qp_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("Failed to initialize "
+                         "queue pair table, aborting.\n"));
+               goto err_srq_table_free;
+       }
+
+       err = mthca_init_av_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "address vector table, aborting.\n"));
+               goto err_qp_table_free;
+       }
+
+       err = mthca_init_mcg_table(mdev);
+       if (err) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize "
+                         "multicast group table, aborting.\n"));
+               goto err_av_table_free;
+       }
+
+       return 0;
+
+err_av_table_free:
+       mthca_cleanup_av_table(mdev);
+
+err_qp_table_free:
+       mthca_cleanup_qp_table(mdev);
+
+err_srq_table_free:
+       mthca_cleanup_srq_table(mdev);
+
+err_cq_table_free:
+       mthca_cleanup_cq_table(mdev);
+
+err_cmd_poll:
+       mthca_cmd_use_polling(mdev);
+
+err_eq_table_free:
+       mthca_cleanup_eq_table(mdev);
+
+err_pd_free:
+       mthca_pd_free(mdev, &mdev->driver_pd);
+
+err_mr_table_free:
+       mthca_cleanup_mr_table(mdev);
+
+err_pd_table_free:
+       mthca_cleanup_pd_table(mdev);
+
+err_kar_unmap:
+       iounmap(mdev->kar, mdev->kar_size);
+
+err_uar_free:
+       mthca_uar_free(mdev, &mdev->driver_uar);
+
+err_uar_table_free:
+       mthca_cleanup_uar_table(mdev);
+       return err;
+}
+
+
+static int     mthca_check_fw(struct mthca_dev *mdev, struct pci_device_id *p_id)
+{
+       int err = 0;
+       
+       if (mdev->fw_ver <= mthca_hca_table[p_id->driver_data].max_unsupported_fw) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA FW version %d.%d.%d is not supported. Use %d.%d.%d or higher.\n",
+                          (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
+                          (int) (mdev->fw_ver & 0xffff),
+                          (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32),
+                          (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff,
+                          (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff)));
+               err = -EINVAL;
+       }
+       else 
+       if (mdev->fw_ver < mthca_hca_table[p_id->driver_data].min_supported_fw) {
+               HCA_PRINT_EV(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HCA FW version %d.%d.%d is too old. Use %d.%d.%d or higher.\nIf you have problems, try updating your HCA FW.\n",
+                                (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
+                                (int) (mdev->fw_ver & 0xffff),
+                                (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32),
+                                (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff,
+                                (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff)));
+       }
+       else {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("Current HCA FW version is %d.%d.%d. \n",
+                                (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
+                                (int) (mdev->fw_ver & 0xffff)));
+       }
+
+       return err;
+}
+
+NTSTATUS mthca_init_one(hca_dev_ext_t *ext)
+{
+       static int mthca_version_printed = 0;
+       int err;
+       NTSTATUS status;
+       struct mthca_dev *mdev;
+       struct pci_device_id *p_id;
+
+       /* print version */
+       if (!mthca_version_printed) {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("%s\n", mthca_version));
+               ++mthca_version_printed;
+       }
+
+       /* find the type of device */
+       p_id = mthca_find_pci_dev(
+               (unsigned)ext->hcaConfig.VendorID,
+               (unsigned)ext->hcaConfig.DeviceID);
+       if (p_id == NULL) {
+               status = STATUS_NO_SUCH_DEVICE;
+               goto end;
+       }
+
+       /* allocate mdev structure */
+       mdev = kmalloc(sizeof *mdev, GFP_KERNEL);
+       if (!mdev) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, "
+                       "aborting.\n"));
+               status = STATUS_INSUFFICIENT_RESOURCES;
+               goto end;
+       }
+        
+       /* set some fields */
+       RtlZeroMemory(mdev, sizeof *mdev);
+       mdev->ext = ext;                /* pointer to DEVICE OBJECT extension */
+       if (ext->hca_hidden)
+               mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
+       if (mthca_hca_table[p_id->driver_data].is_memfree)
+               mdev->mthca_flags |= MTHCA_FLAG_MEMFREE;
+       if (mthca_hca_table[p_id->driver_data].is_pcie)
+               mdev->mthca_flags |= MTHCA_FLAG_PCIE;
+
+//TODO: after we have a FW, capable of reset, 
+// write a routine, that only presses the button
+
+       /*
+        * Now reset the HCA before we touch the PCI capabilities or
+        * attempt a firmware command, since a boot ROM may have left
+        * the HCA in an undefined state.
+        */
+       status = hca_reset( mdev->ext->cl_ext.p_self_do, p_id->driver_data == TAVOR );
+       if ( !NT_SUCCESS( status ) ) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to reset HCA, aborting.\n"));
+               goto err_free_dev;
+       }
+
+       if (mthca_cmd_init(mdev)) {
+               HCA_PRINT_EV(TRACE_LEVEL_ERROR   ,HCA_DBG_LOW   ,("Failed to init command interface, aborting.\n"));
+               status = STATUS_DEVICE_DATA_ERROR;
+               goto err_free_dev;
+       }
+
+       status = mthca_tune_pci(mdev);
+       if ( !NT_SUCCESS( status ) ) {
+               goto err_cmd;
+       }
+
+       err = mthca_init_hca(mdev); 
+       if (err) {
+               status = STATUS_UNSUCCESSFUL;
+               goto err_cmd;
+       }
+
+       err = mthca_check_fw(mdev, p_id);
+       if (err) {
+               status = STATUS_UNSUCCESSFUL;
+               goto err_close;
+       }
+
+       err = mthca_setup_hca(mdev);
+       if (err) {
+               status = STATUS_UNSUCCESSFUL;
+               goto err_close;
+       }
+
+       err = mthca_register_device(mdev);
+       if (err) {
+               status = STATUS_UNSUCCESSFUL;
+               goto err_cleanup;
+       }
+
+       ext->hca.mdev = mdev;
+       mdev->state = MTHCA_DEV_INITIALIZED;
+       return 0;
+
+err_cleanup:
+       mthca_cleanup_mcg_table(mdev);
+       mthca_cleanup_av_table(mdev);
+       mthca_cleanup_qp_table(mdev);
+       mthca_cleanup_srq_table(mdev);
+       mthca_cleanup_cq_table(mdev);
+       mthca_cmd_use_polling(mdev);
+       mthca_cleanup_eq_table(mdev);
+
+       mthca_pd_free(mdev, &mdev->driver_pd);
+
+       mthca_cleanup_mr_table(mdev);
+       mthca_cleanup_pd_table(mdev);
+       mthca_cleanup_uar_table(mdev);
+
+err_close:
+       mthca_close_hca(mdev);
+
+err_cmd:
+       mthca_cmd_cleanup(mdev);
+
+err_free_dev:
+       kfree(mdev);
+       
+end:
+       return status;
+}
+
+void mthca_remove_one(hca_dev_ext_t *ext)
+{
+       struct mthca_dev *mdev = ext->hca.mdev;
+       u8 status;
+       int p;
+
+       if (mdev) {
+               mthca_unregister_device(mdev);
+
+               for (p = 1; p <= mdev->limits.num_ports; ++p)
+                       mthca_CLOSE_IB(mdev, p, &status);
+
+               mthca_cleanup_mcg_table(mdev);
+               mthca_cleanup_av_table(mdev);
+               mthca_cleanup_qp_table(mdev);
+               mthca_cleanup_srq_table(mdev);
+               mthca_cleanup_cq_table(mdev);
+               mthca_cmd_use_polling(mdev);
+               mthca_cleanup_eq_table(mdev);
+               mthca_pd_free(mdev, &mdev->driver_pd);
+               mthca_cleanup_mr_table(mdev);
+               mthca_cleanup_pd_table(mdev);
+               iounmap(mdev->kar, mdev->kar_size);
+               mthca_uar_free(mdev, &mdev->driver_uar);
+               mthca_cleanup_uar_table(mdev);
+               mthca_close_hca(mdev);
+               mthca_cmd_cleanup(mdev);
+
+               kfree(mdev);
+               ext->hca.mdev = NULL;
+       }
+}
+
+
+
diff --git a/trunk/hw/mthca/kernel/mthca_mcg.c b/trunk/hw/mthca/kernel/mthca_mcg.c
new file mode 100644 (file)
index 0000000..4000a4f
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_mcg.c 2905 2005-07-25 18:26:52Z roland $
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_mcg.tmh"
+#endif
+#include "mthca_cmd.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_mcg_table)
+#pragma alloc_text (PAGE, mthca_cleanup_mcg_table)
+#endif
+
+struct mthca_mgm {
+       __be32 next_gid_index;
+       u32    reserved[3];
+       u8     gid[16];
+       __be32 qp[MTHCA_QP_PER_MGM];
+};
+
+static const u8 zero_gid[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+/*
+ * Caller must hold MCG table semaphore.  gid and mgm parameters must
+ * be properly aligned for command interface.
+ *
+ *  Returns 0 unless a firmware command error occurs.
+ *
+ * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
+ * and *mgm holds MGM entry.
+ *
+ * if GID is found in AMGM, *index = index in AMGM, *prev = index of
+ * previous entry in hash chain and *mgm holds AMGM entry.
+ *
+ * If no AMGM exists for given gid, *index = -1, *prev = index of last
+ * entry in hash chain and *mgm holds end of hash chain.
+ */
+static int find_mgm(struct mthca_dev *dev,
+                   u8 *gid, struct mthca_mailbox *mgm_mailbox,
+                   u16 *hash, int *prev, int *index)
+{
+       struct mthca_mailbox *mailbox;
+       struct mthca_mgm *mgm = mgm_mailbox->buf;
+       u8 *mgid;
+       int err;
+       u8 status;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return -ENOMEM;
+       mgid = mailbox->buf;
+
+       memcpy(mgid, gid, 16);
+
+       err = mthca_MGID_HASH(dev, mailbox, hash, &status);
+       if (err)
+               goto out;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("MGID_HASH returned status %02x\n", status));
+               err = -EINVAL;
+               goto out;
+       }
+
+       #if 0
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Hash for %04x:%04x:%04x:%04x:"
+                         "%04x:%04x:%04x:%04x is %04x\n",
+                         cl_ntoh16(((__be16 *) gid)[0]),
+                         cl_ntoh16(((__be16 *) gid)[1]),
+                         cl_ntoh16(((__be16 *) gid)[2]),
+                         cl_ntoh16(((__be16 *) gid)[3]),
+                         cl_ntoh16(((__be16 *) gid)[4]),
+                         cl_ntoh16(((__be16 *) gid)[5]),
+                         cl_ntoh16(((__be16 *) gid)[6]),
+                         cl_ntoh16(((__be16 *) gid)[7]),
+                         *hash));
+       #endif
+
+       *index = *hash;
+       *prev  = -1;
+
+       do {
+               err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
+               if (err)
+                       goto out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("READ_MGM returned status %02x\n", status));
+                       err =  -EINVAL;
+                       goto out;
+               }
+
+               if (!memcmp(mgm->gid, zero_gid, 16)) {
+                       if (*index != *hash) {
+                               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Found zero MGID in AMGM.\n"));
+                               err = -EINVAL;
+                       }
+                       goto out;
+               }
+
+               if (!memcmp(mgm->gid, gid, 16))
+                       goto out;
+
+               *prev = *index;
+               *index = cl_ntoh32(mgm->next_gid_index) >> 6;
+       } while (*index);
+
+       *index = -1;
+
+ out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_mailbox *mailbox;
+       struct mthca_mgm *mgm;
+       u16 hash;
+       int index, prev;
+       int link = 0;
+       int i;
+       int err;
+       u8 status;
+
+       UNREFERENCED_PARAMETER(lid);
+       
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       mgm = mailbox->buf;
+
+       if (down_interruptible(&dev->mcg_table.mutex)) {
+               err = -EINTR;
+               goto err_sem;
+       }
+
+       err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
+       if (err)
+               goto out;
+
+       if (index != -1) {
+               if (!memcmp(mgm->gid, zero_gid, 16))
+                       memcpy(mgm->gid, gid->raw, 16);
+       } else {
+               link = 1;
+
+               index = mthca_alloc(&dev->mcg_table.alloc);
+               if (index == -1) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("No AMGM entries left\n"));
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               err = mthca_READ_MGM(dev, index, mailbox, &status);
+               if (err)
+                       goto out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("READ_MGM returned status %02x\n", status));
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               memset(mgm, 0, sizeof *mgm);
+               memcpy(mgm->gid, gid->raw, 16);
+               mgm->next_gid_index = 0;
+       }
+
+       for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
+               if (mgm->qp[i] == cl_hton32(ibqp->qp_num | (1 << 31))) {
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("QP %06x already a member of MGM\n", 
+                                 ibqp->qp_num));
+                       err = 0;
+                       goto out;
+               } else if (!(mgm->qp[i] & cl_hton32(1UL << 31))) {
+                       mgm->qp[i] = cl_hton32(ibqp->qp_num | (1 << 31));
+                       break;
+               }
+
+       if (i == MTHCA_QP_PER_MGM) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("MGM at index %x is full.\n", index));
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = mthca_WRITE_MGM(dev, index, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("WRITE_MGM returned status %02x\n", status));
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (!link)
+               goto out;
+
+       err = mthca_READ_MGM(dev, prev, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("READ_MGM returned status %02x\n", status));
+               err = -EINVAL;
+               goto out;
+       }
+
+       mgm->next_gid_index = cl_hton32(index << 6);
+
+       err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("WRITE_MGM returned status %02x\n", status));
+               err = -EINVAL;
+       }
+
+out:
+       if (err && link && index != -1) {
+               BUG_ON(index < dev->limits.num_mgms);
+               mthca_free(&dev->mcg_table.alloc, index);
+       }
+       KeReleaseMutex(&dev->mcg_table.mutex,FALSE);
+err_sem:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_mailbox *mailbox;
+       struct mthca_mgm *mgm;
+       u16 hash;
+       int prev, index;
+       int i, loc;
+       int err;
+       u8 status;
+
+       UNREFERENCED_PARAMETER(lid);
+       
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       mgm = mailbox->buf;
+
+       if (down_interruptible(&dev->mcg_table.mutex)) {
+               err = -EINTR;
+               goto err_sem;
+       }
+
+       err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
+       if (err)
+               goto out;
+
+       if (index == -1) {
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
+                         "not found\n",
+                         cl_ntoh16(((__be16 *) gid->raw)[0]),
+                         cl_ntoh16(((__be16 *) gid->raw)[1]),
+                         cl_ntoh16(((__be16 *) gid->raw)[2]),
+                         cl_ntoh16(((__be16 *) gid->raw)[3]),
+                         cl_ntoh16(((__be16 *) gid->raw)[4]),
+                         cl_ntoh16(((__be16 *) gid->raw)[5]),
+                         cl_ntoh16(((__be16 *) gid->raw)[6]),
+                         cl_ntoh16(((__be16 *) gid->raw)[7])));
+               err = -EINVAL;
+               goto out;
+       }
+
+       for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) {
+               if (mgm->qp[i] == cl_hton32(ibqp->qp_num | (1 << 31)))
+                       loc = i;
+               if (!(mgm->qp[i] & cl_hton32(1UL << 31)))
+                       break;
+       }
+
+       if (loc == -1) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("QP %06x not found in MGM\n", ibqp->qp_num));
+               err = -EINVAL;
+               goto out;
+       }
+
+       mgm->qp[loc]   = mgm->qp[i - 1];
+       mgm->qp[i - 1] = 0;
+
+       err = mthca_WRITE_MGM(dev, index, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("WRITE_MGM returned status %02x\n", status));
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (i != 1)
+               goto out;
+
+       if (prev == -1) {
+               /* Remove entry from MGM */
+               int amgm_index_to_free = cl_ntoh32(mgm->next_gid_index) >> 6;
+               if (amgm_index_to_free) {
+                       err = mthca_READ_MGM(dev, amgm_index_to_free,
+                                            mailbox, &status);
+                       if (err)
+                               goto out;
+                       if (status) {
+                               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("READ_MGM returned status %02x\n",
+                                         status));
+                               err = -EINVAL;
+                               goto out;
+                       }
+               } else
+                       RtlZeroMemory(mgm->gid, 16);
+
+               err = mthca_WRITE_MGM(dev, index, mailbox, &status);
+               if (err)
+                       goto out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("WRITE_MGM returned status %02x\n", status));
+                       err = -EINVAL;
+                       goto out;
+               }
+               if (amgm_index_to_free) {
+                       BUG_ON(amgm_index_to_free < dev->limits.num_mgms);
+                       mthca_free(&dev->mcg_table.alloc, amgm_index_to_free);
+               }
+       } else {
+               /* Remove entry from AMGM */
+               int curr_next_index = cl_ntoh32(mgm->next_gid_index) >> 6;
+               err = mthca_READ_MGM(dev, prev, mailbox, &status);
+               if (err)
+                       goto out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("READ_MGM returned status %02x\n", status));
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               mgm->next_gid_index = cl_hton32(curr_next_index << 6);
+
+               err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
+               if (err)
+                       goto out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("WRITE_MGM returned status %02x\n", status));
+                       err = -EINVAL;
+                       goto out;
+               }
+               BUG_ON(index < dev->limits.num_mgms);
+               mthca_free(&dev->mcg_table.alloc, index);
+       }
+
+ out:
+       KeReleaseMutex(&dev->mcg_table.mutex, FALSE);
+err_sem:       
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+int mthca_init_mcg_table(struct mthca_dev *dev)
+{
+       int err;
+       int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
+
+       err = mthca_alloc_init(&dev->mcg_table.alloc,
+               table_size,
+               table_size - 1,
+               dev->limits.num_mgms);
+
+       if (err)
+               return err;
+
+       KeInitializeMutex(&dev->mcg_table.mutex,0);
+
+       return 0;
+}
+
+void mthca_cleanup_mcg_table(struct mthca_dev *dev)
+{
+       mthca_alloc_cleanup(&dev->mcg_table.alloc);
+}
+
+
diff --git a/trunk/hw/mthca/kernel/mthca_memfree.c b/trunk/hw/mthca/kernel/mthca_memfree.c
new file mode 100644 (file)
index 0000000..e44a18f
--- /dev/null
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_memfree.c 3042 2005-08-09 20:56:58Z roland $
+ */
+
+#include "hca_driver.h"
+#include "mthca_memfree.h"
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_memfree.tmh"
+#endif
+#include "mthca_cmd.h"
+
+/*
+ * We allocate in as big chunks as we can, up to a maximum of 256 KB
+ * per chunk.
+ */
+enum {
+       MTHCA_ICM_ALLOC_SIZE   = 1 << 18,
+       MTHCA_TABLE_CHUNK_SIZE = 1 << 18
+};
+
+#pragma warning( disable : 4200)
+struct mthca_user_db_table {
+       KMUTEX   mutex;
+       struct {
+               u64                uvirt;
+               struct scatterlist mem;
+               int                refcount;
+       }                page[0];
+};
+#pragma warning( default  : 4200)
+
+void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
+{
+       struct mthca_icm_chunk *chunk, *tmp;
+       int i;
+
+       if (!icm)
+               return;
+
+       list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list,struct mthca_icm_chunk,struct mthca_icm_chunk) {
+               if (chunk->nsg > 0)
+                       pci_unmap_sg(dev, chunk->mem, chunk->npages,
+                                    PCI_DMA_BIDIRECTIONAL);
+
+               for (i = 0; i < chunk->npages; ++i)
+                       free_dma_mem_map(dev, &chunk->mem[i], PCI_DMA_BIDIRECTIONAL );
+
+               kfree(chunk);
+       }
+
+       kfree(icm);
+}
+
+/* allocate device memory of 'npages' pages as a list of chunks, each containing an array of
+ continuous buffers. Allocated physical pages, and then they are mapped to bus space !*/
+struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
+                                 unsigned int gfp_mask)
+{
+       struct mthca_icm *icm;
+       struct mthca_icm_chunk *chunk = NULL;
+       int cur_order;
+
+       icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+       if (!icm)
+               return icm;
+
+       icm->refcount = 0;
+       INIT_LIST_HEAD(&icm->chunk_list);
+
+       cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
+
+       while (npages > 0) {
+               /* allocate a new chunk */
+               if (!chunk) {
+                       chunk = kmalloc(sizeof *chunk,
+                                       gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+                       if (!chunk)
+                               goto fail;
+
+                       RtlZeroMemory( chunk, sizeof *chunk );
+                       list_add_tail(&chunk->list, &icm->chunk_list);
+               }
+
+               /* fill chunk with allocated consistent areas of integer pages each */
+               while (1 << cur_order > npages)
+                       /* try to take a max (required) number of pages */
+                       --cur_order;
+
+                       /* try to allocate a contiguous PHYSICAL buffer */
+                       alloc_dma_zmem( dev, PAGE_SIZE << cur_order,
+                               &chunk->mem[chunk->npages] );
+
+                       /* if succeded - proceed handling */
+                       if (chunk->mem[chunk->npages].page) {
+
+                               /* check, whether a chunk is full */
+                               if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
+                                       /* it's full --> map physical addresses to bus ones */ 
+                                       chunk->nsg = pci_map_sg(dev, chunk->mem,
+                                               chunk->npages, PCI_DMA_BIDIRECTIONAL );
+
+                                       if (chunk->nsg <= 0)
+                                               goto fail;
+
+                                       chunk = NULL;
+                               }
+
+                               /* calculate the remaining memory to be allocated */
+                               npages -= 1 << cur_order;
+                       } 
+                       /* failed to allocate - lets decrement buffer size and try once more */
+                       else {
+                               --cur_order;
+                               if (cur_order < 0)
+                                       goto fail;
+                       }
+       }
+
+       /* last, not full chunk: map physical addresses to bus ones */ 
+       if (chunk) {
+               chunk->nsg = pci_map_sg(dev, chunk->mem,
+                                       chunk->npages,
+                                       PCI_DMA_BIDIRECTIONAL);
+
+               if (chunk->nsg <= 0)
+                       goto fail;
+       }
+
+       return icm;
+
+fail:
+       mthca_free_icm(dev, icm);
+       return NULL;
+}
+
+int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
+{
+       int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+       int ret = 0;
+       u8 status;
+
+       down(&table->mutex);
+
+       if (table->icm[i]) {
+               ++table->icm[i]->refcount;
+               goto out;
+       }
+
+       table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+                                       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+                                       __GFP_NOWARN);
+       if (!table->icm[i]) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
+                         &status) || status) {
+               mthca_free_icm(dev, table->icm[i]);
+               table->icm[i] = NULL;
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ++table->icm[i]->refcount;
+
+out:
+       up(&table->mutex);
+       return ret;
+}
+
+void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
+{
+       int i;
+       u8 status;
+
+       if (!mthca_is_memfree(dev))
+               return;
+
+       i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+
+       down(&table->mutex);
+
+       if (--table->icm[i]->refcount == 0) {
+               mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
+                               MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+               mthca_free_icm(dev, table->icm[i]);
+               table->icm[i] = NULL;
+       }
+
+       up(&table->mutex);
+}
+
+void *mthca_table_find(struct mthca_icm_table *table, int obj)
+{
+       int idx, offset, i;
+       struct mthca_icm_chunk *chunk;
+       struct mthca_icm *icm;
+       struct page *page = NULL;
+
+       if (!table->lowmem)
+               return NULL;
+
+       down(&table->mutex);
+
+       idx = (obj & (table->num_obj - 1)) * table->obj_size;
+       icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
+       offset = idx % MTHCA_TABLE_CHUNK_SIZE;
+
+       if (!icm)
+               goto out;
+
+       list_for_each_entry(chunk, &icm->chunk_list, list,struct mthca_icm_chunk) {
+               for (i = 0; i < chunk->npages; ++i) {
+                       if ((int)chunk->mem[i].length >= offset) {
+                               page = chunk->mem[i].page;
+                               goto out;
+                       }
+                       offset -= chunk->mem[i].length;
+               }
+       }
+
+out:
+       up(&table->mutex);
+       return page ? (char*)page + offset : NULL;
+}
+
+int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
+                         int start, int end)
+{
+       int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
+       int i, err;
+
+       for (i = start; i <= end; i += inc) {
+               err = mthca_table_get(dev, table, i);
+               if (err)
+                       goto fail;
+       }
+
+       return 0;
+
+fail:
+       while (i > start) {
+               i -= inc;
+               mthca_table_put(dev, table, i);
+       }
+
+       return err;
+}
+
+void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
+                          int start, int end)
+{
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return;
+
+       for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
+               mthca_table_put(dev, table, i);
+}
+
+struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
+                                             u64 virt, int obj_size,
+                                             int nobj, int reserved,
+                                             int use_lowmem)
+{
+       struct mthca_icm_table *table;
+       int num_icm;
+       unsigned chunk_size;
+       int i;
+       u8 status;
+
+       num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE -1) / MTHCA_TABLE_CHUNK_SIZE;
+
+       table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
+       if (!table)
+               return NULL;
+
+       table->virt     = virt;
+       table->num_icm  = num_icm;
+       table->num_obj  = nobj;
+       table->obj_size = obj_size;
+       table->lowmem   = use_lowmem;
+       KeInitializeMutex( &table->mutex, 0 );
+
+       for (i = 0; i < num_icm; ++i)
+               table->icm[i] = NULL;
+
+       for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
+               chunk_size = MTHCA_TABLE_CHUNK_SIZE;
+               if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
+                       chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
+
+               table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
+                                               (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+                                               __GFP_NOWARN);
+               if (!table->icm[i])
+                       goto err;
+               if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
+                                 &status) || status) {
+                       mthca_free_icm(dev, table->icm[i]);
+                       table->icm[i] = NULL;
+                       goto err;
+               }
+
+               /*
+                * Add a reference to this ICM chunk so that it never
+                * gets freed (since it contains reserved firmware objects).
+                */
+               ++table->icm[i]->refcount;
+       }
+
+#if 0
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Allocated/max chunks %d:%d, reserved/max objects %#x:%#x, one/total size %#x:%#x at %lx \n",
+                 i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (unsigned long long) virt));
+#endif
+
+       return table;
+
+err:
+       for (i = 0; i < num_icm; ++i)
+               if (table->icm[i]) {
+                       mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
+                                       MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+                       mthca_free_icm(dev, table->icm[i]);
+               }
+
+       kfree(table);
+
+       return NULL;
+}
+
+void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
+{
+       int i;
+       u8 status;
+
+       for (i = 0; i < table->num_icm; ++i)
+               if (table->icm[i]) {
+                       mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
+                                       MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+                       mthca_free_icm(dev, table->icm[i]);
+               }
+
+#if 0
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW, ( "Released chunks %d, objects %#x, one/total size %#x:%#x at %lx \n",
+                 table->num_icm, table->num_obj, table->obj_size, table->num_obj * table->obj_size, (unsigned long long) table->virt));
+#endif
+       kfree(table);
+}
+
+static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
+{
+       return dev->uar_table.uarc_base +
+               uar->index * dev->uar_table.uarc_size +
+               page * 4096;
+}
+
+int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                     struct mthca_user_db_table *db_tab, int index, u64 uaddr)
+{
+       int ret = 0;
+       u8 status;
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return 0;
+
+       if (index < 0 || index > dev->uar_table.uarc_size / 8)
+               return -EINVAL;
+
+       down(&db_tab->mutex);
+
+       i = index / MTHCA_DB_REC_PER_PAGE;
+
+       if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
+           (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
+           (uaddr & 4095)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (db_tab->page[i].refcount) {
+               ++db_tab->page[i].refcount;
+               goto out;
+       }
+
+       ret = get_user_pages(dev, uaddr & PAGE_MASK, 1, 1,
+               &db_tab->page[i].mem);
+       if (ret < 0)
+               goto out;
+
+       db_tab->page[i].mem.length = 4096;
+       db_tab->page[i].mem.offset = (unsigned)(uaddr & ~PAGE_MASK);
+
+       ret = pci_map_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+       if (ret <= 0) {
+               put_page(&db_tab->page[i].mem);
+               goto out;
+       }
+
+       ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
+                                mthca_uarc_virt(dev, uar, i), &status);
+       if (!ret && status)
+               ret = -EINVAL;
+       if (ret) {
+               pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+               put_page(&db_tab->page[i].mem);
+               goto out;
+       }
+
+       db_tab->page[i].uvirt    = uaddr;
+       db_tab->page[i].refcount = 1;
+
+out:
+       up(&db_tab->mutex);
+       return ret;
+}
+
+void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                        struct mthca_user_db_table *db_tab, int index)
+{
+       UNREFERENCED_PARAMETER(uar);
+       
+       if (!mthca_is_memfree(dev))
+               return;
+
+       /*
+        * To make our bookkeeping simpler, we don't unmap DB
+        * pages until we clean up the whole db table.
+        */
+
+       down(&db_tab->mutex);
+
+       --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
+
+       up(&db_tab->mutex);
+}
+
+struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
+{
+       struct mthca_user_db_table *db_tab;
+       int npages;
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return NULL;
+
+       npages = dev->uar_table.uarc_size / 4096;
+       db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
+       if (!db_tab)
+               return ERR_PTR(-ENOMEM);
+
+       KeInitializeMutex(&db_tab->mutex,0);
+       for (i = 0; i < npages; ++i) {
+               db_tab->page[i].refcount = 0;
+               db_tab->page[i].uvirt    = 0;
+       }
+
+       return db_tab;
+}
+
+void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
+                              struct mthca_user_db_table *db_tab)
+{
+       int i;
+       u8 status;
+
+       if (!mthca_is_memfree(dev))
+               return;
+
+       for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) {
+               if (db_tab->page[i].uvirt) {
+                       mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
+                       pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+                       put_page(&db_tab->page[i].mem);
+               }
+       }
+
+       kfree(db_tab);
+}
+
+int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db)
+{
+       int group;
+       int start, end, dir;
+       int i, j;
+       struct mthca_db_page *page;
+       int ret = 0;
+       u8 status;
+       CPU_2_BE64_PREP;
+
+       down(&dev->db_tab->mutex);
+       switch (type) {
+       case MTHCA_DB_TYPE_CQ_ARM:
+       case MTHCA_DB_TYPE_SQ:
+               group = 0;
+               start = 0;
+               end   = dev->db_tab->max_group1;
+               dir   = 1;
+               break;
+
+       case MTHCA_DB_TYPE_CQ_SET_CI:
+       case MTHCA_DB_TYPE_RQ:
+       case MTHCA_DB_TYPE_SRQ:
+               group = 1;
+               start = dev->db_tab->npages - 1;
+               end   = dev->db_tab->min_group2;
+               dir   = -1;
+               break;
+
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* try to find an unused index for a new page (in the bitmap) */
+       for (i = start; i != end; i += dir)
+               if (dev->db_tab->page[i].db_rec &&
+                   !bitmap_full(dev->db_tab->page[i].used,
+                                MTHCA_DB_REC_PER_PAGE)) {
+                       page = dev->db_tab->page + i;
+                       goto found;
+               }
+
+       for (i = start; i != end; i += dir) {
+               if (!dev->db_tab->page[i].db_rec) {
+                       page = dev->db_tab->page + i;
+                       goto alloc;
+               }
+       }
+
+       /* if there are no more place for DBs - get out */
+       if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* fix limits indeces */
+       if (group == 0)
+               ++dev->db_tab->max_group1;
+       else
+               --dev->db_tab->min_group2;
+       
+       /* allocate page */
+       page = dev->db_tab->page + end;
+
+alloc:
+       alloc_dma_zmem_map(dev, 4096, PCI_DMA_BIDIRECTIONAL, &page->sg);
+       if (!page->sg.page) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       page->db_rec = (__be64*)page->sg.page;
+
+       ret = mthca_MAP_ICM_page(dev, page->sg.dma_address,
+                                mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
+       if (!ret && status)
+               ret = -EINVAL;
+       if (ret) {
+               free_dma_mem_map(dev, &page->sg, PCI_DMA_BIDIRECTIONAL);
+               goto out;
+       }
+
+       bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
+
+found:
+       j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
+       set_bit(j, (long*)page->used);
+
+       if (group == 1)
+               j = MTHCA_DB_REC_PER_PAGE - 1 - j;
+
+       ret = i * MTHCA_DB_REC_PER_PAGE + j;
+
+       page->db_rec[j] = CPU_2_BE64((((ULONGLONG)qn << 8) | (type << 5)));
+
+       *db = (__be32 *) &page->db_rec[j];
+out:
+       up(&dev->db_tab->mutex);
+
+       return ret;
+}
+
+void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
+{
+       int i, j;
+       struct mthca_db_page *page;
+       u8 status;
+
+       UNREFERENCED_PARAMETER(type);
+
+       i = db_index / MTHCA_DB_REC_PER_PAGE;
+       j = db_index % MTHCA_DB_REC_PER_PAGE;
+
+       page = dev->db_tab->page + i;
+
+       down(&dev->db_tab->mutex);
+
+       page->db_rec[j] = 0;
+       if (i >= dev->db_tab->min_group2)
+               j = MTHCA_DB_REC_PER_PAGE - 1 - j;
+       clear_bit(j, (long*)page->used);
+
+       if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
+           i >= dev->db_tab->max_group1 - 1) {
+               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+
+               free_dma_mem_map(dev, &page->sg, PCI_DMA_BIDIRECTIONAL);
+               page->db_rec = NULL;
+
+               if (i == dev->db_tab->max_group1) {
+                       --dev->db_tab->max_group1;
+                       /* XXX may be able to unmap more pages now */
+               }
+               if (i == dev->db_tab->min_group2)
+                       ++dev->db_tab->min_group2;
+       }
+
+       up(&dev->db_tab->mutex);
+}
+
+int mthca_init_db_tab(struct mthca_dev *dev)
+{
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return 0;
+
+       dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
+       if (!dev->db_tab)
+               return -ENOMEM;
+
+       KeInitializeMutex(&dev->db_tab->mutex, 0);
+       /* number of pages, needed for UAR context table */
+       dev->db_tab->npages     = dev->uar_table.uarc_size / 4096;
+       dev->db_tab->max_group1 = 0;
+       dev->db_tab->min_group2 = dev->db_tab->npages - 1;
+       /* allocate array of structures, containing descrpitors of UARC pages */
+       dev->db_tab->page = kmalloc(dev->db_tab->npages *
+                                   sizeof *dev->db_tab->page,
+                                   GFP_KERNEL);
+       if (!dev->db_tab->page) {
+               kfree(dev->db_tab);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < dev->db_tab->npages; ++i)
+               dev->db_tab->page[i].db_rec = NULL;
+
+       return 0;
+}
+
+void mthca_cleanup_db_tab(struct mthca_dev *dev)
+{
+       int i;
+       u8 status;
+
+       if (!mthca_is_memfree(dev))
+               return;
+
+       /*
+        * Because we don't always free our UARC pages when they
+        * become empty to make mthca_free_db() simpler we need to
+        * make a sweep through the doorbell pages and free any
+        * leftover pages now.
+        */
+       for (i = 0; i < dev->db_tab->npages; ++i) {
+               if (!dev->db_tab->page[i].db_rec)
+                       continue;
+
+               if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Kernel UARC page %d not empty\n", i));
+
+               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+
+               free_dma_mem_map(dev, &dev->db_tab->page[i].sg, PCI_DMA_BIDIRECTIONAL);
+       }
+
+       kfree(dev->db_tab->page);
+       kfree(dev->db_tab);
+}
diff --git a/trunk/hw/mthca/kernel/mthca_memfree.h b/trunk/hw/mthca/kernel/mthca_memfree.h
new file mode 100644 (file)
index 0000000..9b3d945
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_memfree.h 2905 2005-07-25 18:26:52Z roland $
+ */
+
+#ifndef MTHCA_MEMFREE_H
+#define MTHCA_MEMFREE_H
+
+
+#define MTHCA_ICM_CHUNK_LEN \
+       ((256 - sizeof (struct list_head) - 2 * sizeof (int)) /         \
+        (sizeof (struct scatterlist)))
+
+struct mthca_icm_chunk {
+       struct list_head   list;
+       int                npages;
+       int                nsg;
+       struct scatterlist mem[MTHCA_ICM_CHUNK_LEN];
+};
+
+struct mthca_icm {
+       struct list_head chunk_list;
+       int              refcount;
+};
+
+#pragma warning( disable : 4200)
+struct mthca_icm_table {
+       u64               virt;
+       int               num_icm;
+       int               num_obj;
+       int               obj_size;
+       int               lowmem;
+       KMUTEX                  mutex;
+       struct mthca_icm *icm[0];
+};
+#pragma warning( default  : 4200)
+
+struct mthca_icm_iter {
+       struct mthca_icm       *icm;
+       struct mthca_icm_chunk *chunk;
+       int                     page_idx;
+};
+
+struct mthca_dev;
+
+struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
+                                 unsigned int gfp_mask);
+void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
+
+struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
+                                             u64 virt, int obj_size,
+                                             int nobj, int reserved,
+                                             int use_lowmem);
+void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table);
+int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
+void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
+void *mthca_table_find(struct mthca_icm_table *table, int obj);
+int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
+                         int start, int end);
+void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
+                          int start, int end);
+
+static inline void mthca_icm_first(struct mthca_icm *icm,
+                                  struct mthca_icm_iter *iter)
+{
+       iter->icm      = icm;
+       iter->chunk    = list_empty(&icm->chunk_list) ?
+               NULL : list_entry(icm->chunk_list.next,
+                                 struct mthca_icm_chunk, list);
+       iter->page_idx = 0;
+}
+
+static inline int mthca_icm_last(struct mthca_icm_iter *iter)
+{
+       return !iter->chunk;
+}
+
+static inline void mthca_icm_next(struct mthca_icm_iter *iter)
+{
+       if (++iter->page_idx >= iter->chunk->nsg) {
+               if (iter->chunk->list.next == &iter->icm->chunk_list) {
+                       iter->chunk = NULL;
+                       return;
+               }
+
+               iter->chunk = list_entry(iter->chunk->list.next,
+                                        struct mthca_icm_chunk, list);
+               iter->page_idx = 0;
+       }
+}
+
+static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter)
+{
+       return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
+{
+       return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+}
+
+enum {
+       MTHCA_DB_REC_PER_PAGE = 4096 / 8
+};
+
+struct mthca_db_page {
+       DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
+       __be64    *db_rec;
+       struct scatterlist sg;
+};
+
+struct mthca_db_table {
+       int                   npages;
+       int                   max_group1;
+       int                   min_group2;
+       struct mthca_db_page *page;
+       KMUTEX          mutex;
+};
+
+enum mthca_db_type {
+       MTHCA_DB_TYPE_INVALID   = 0x0,
+       MTHCA_DB_TYPE_CQ_SET_CI = 0x1,
+       MTHCA_DB_TYPE_CQ_ARM    = 0x2,
+       MTHCA_DB_TYPE_SQ        = 0x3,
+       MTHCA_DB_TYPE_RQ        = 0x4,
+       MTHCA_DB_TYPE_SRQ       = 0x5,
+       MTHCA_DB_TYPE_GROUP_SEP = 0x7
+};
+
+struct mthca_user_db_table;
+struct mthca_uar;
+
+int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                     struct mthca_user_db_table *db_tab, int index, u64 uaddr);
+void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                        struct mthca_user_db_table *db_tab, int index);
+struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev);
+void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
+                              struct mthca_user_db_table *db_tab);
+
+int mthca_init_db_tab(struct mthca_dev *dev);
+void mthca_cleanup_db_tab(struct mthca_dev *dev);
+int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db);
+void mthca_free_db(struct mthca_dev *dev, int type, int db_index);
+
+#endif /* MTHCA_MEMFREE_H */
diff --git a/trunk/hw/mthca/kernel/mthca_mr.c b/trunk/hw/mthca/kernel/mthca_mr.c
new file mode 100644 (file)
index 0000000..b9766da
--- /dev/null
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_mr.c 2905 2005-07-25 18:26:52Z roland $
+ */
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_mr.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+
+static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order);
+static void mthca_buddy_cleanup(struct mthca_buddy *buddy);
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_buddy_init)
+#pragma alloc_text (PAGE, mthca_buddy_cleanup)
+#pragma alloc_text (PAGE, mthca_init_mr_table)
+#pragma alloc_text (PAGE, mthca_cleanup_mr_table)
+#endif
+
+struct mthca_mtt {
+       struct mthca_buddy *buddy;
+       int                 order;
+       u32                 first_seg;
+};
+
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+#pragma pack(push,1)
+struct mthca_mpt_entry {
+       __be32 flags;
+       __be32 page_size;
+       __be32 key;
+       __be32 pd;
+       __be64 start;
+       __be64 length;
+       __be32 lkey;
+       __be32 window_count;
+       __be32 window_count_limit;
+       __be64 mtt_seg;
+       __be32 mtt_sz;          /* Arbel only */
+       u32    reserved[2];
+} ;
+#pragma pack(pop)
+
+#define MTHCA_MPT_FLAG_SW_OWNS       (0xfUL << 28)
+#define MTHCA_MPT_FLAG_MIO           (1 << 17)
+#define MTHCA_MPT_FLAG_BIND_ENABLE   (1 << 15)
+#define MTHCA_MPT_FLAG_PHYSICAL      (1 <<  9)
+#define MTHCA_MPT_FLAG_REGION        (1 <<  8)
+
+#define MTHCA_MTT_FLAG_PRESENT       1
+
+#define MTHCA_MPT_STATUS_SW 0xF0
+#define MTHCA_MPT_STATUS_HW 0x00
+
+
+static void dump_mtt(__be64 *mtt_entry ,int list_len)
+{
+       int i;
+       UNREFERENCED_PARAMETER(mtt_entry);              // for release version
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Dumping MTT entry len %d :\n",list_len));
+       for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; i=i+4) {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("[%02x]  %016I64x %016I64x %016I64x %016I64x\n",i,
+                       cl_ntoh64(mtt_entry[i]),
+                       cl_ntoh64(mtt_entry[i+1]),
+                       cl_ntoh64(mtt_entry[i+2]),
+                       cl_ntoh64(mtt_entry[i+3])));
+       }
+}
+
+
+static void dump_mpt(struct mthca_mpt_entry *mpt_entry )
+{
+       int i;
+       UNREFERENCED_PARAMETER(mpt_entry);              // for release version
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Dumping MPT entry %08x :\n", mpt_entry->key));
+       for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; i=i+4) {
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("[%02x]  %08x %08x %08x %08x \n",i,
+                       cl_ntoh32(((__be32 *) mpt_entry)[i]),
+                       cl_ntoh32(((__be32 *) mpt_entry)[i+1]),
+                       cl_ntoh32(((__be32 *) mpt_entry)[i+2]),
+                       cl_ntoh32(((__be32 *) mpt_entry)[i+3])));
+       }
+}
+
+
+
+
+
+
+
+
+/*
+ * Buddy allocator for MTT segments (currently not very efficient
+ * since it doesn't keep a free list and just searches linearly
+ * through the bitmaps)
+ */
+
+static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
+{
+       int o;
+       u32 m;
+       u32 seg;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock(&buddy->lock, &lh);
+
+       for (o = order; o <= buddy->max_order; ++o) {
+               m = 1 << (buddy->max_order - o);
+               seg = find_first_bit(buddy->bits[o], m);
+               if (seg < m)
+                       goto found;
+       }
+
+       spin_unlock(&lh);
+       return (u32)-1;
+
+ found:
+       clear_bit(seg, (long*)buddy->bits[o]);
+
+       while (o > order) {
+               --o;
+               seg <<= 1;
+               set_bit(seg ^ 1, (long*)buddy->bits[o]);
+       }
+
+       spin_unlock(&lh);
+
+       seg <<= order;
+
+       return seg;
+}
+
+static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
+{
+       SPIN_LOCK_PREP(lh);
+
+       seg >>= order;
+
+       spin_lock(&buddy->lock, &lh);
+
+       while (test_bit(seg ^ 1, buddy->bits[order])) {
+               clear_bit(seg ^ 1, (long*)buddy->bits[order]);
+               seg >>= 1;
+               ++order;
+       }
+
+       set_bit(seg, (long*)buddy->bits[order]);
+
+       spin_unlock(&lh);
+}
+
+static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
+{
+       int i, s;
+
+       buddy->max_order = max_order;
+       spin_lock_init(&buddy->lock);
+
+       buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *),
+                             GFP_KERNEL);
+       if (!buddy->bits)
+               goto err_out;
+
+       RtlZeroMemory(buddy->bits, (buddy->max_order + 1) * sizeof (long *));
+
+       for (i = 0; i <= buddy->max_order; ++i) {
+               s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+               buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+               if (!buddy->bits[i])
+                       goto err_out_free;
+               bitmap_zero(buddy->bits[i],
+                           1 << (buddy->max_order - i));
+       }
+
+       set_bit(0, (long*)buddy->bits[buddy->max_order]);
+
+       return 0;
+
+err_out_free:
+       for (i = 0; i <= buddy->max_order; ++i)
+               kfree(buddy->bits[i]);
+
+       kfree(buddy->bits);
+
+err_out:
+       return -ENOMEM;
+}
+
+static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
+{
+       int i;
+
+       for (i = 0; i <= buddy->max_order; ++i)
+               kfree(buddy->bits[i]);
+
+       kfree(buddy->bits);
+}
+
+static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
+                                struct mthca_buddy *buddy)
+{
+       u32 seg = mthca_buddy_alloc(buddy, order);
+
+       if (seg == -1)
+               return (u32)-1;
+
+       if (mthca_is_memfree(dev))
+               if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
+                                         seg + (1 << order) - 1)) {
+                       mthca_buddy_free(buddy, seg, order);
+                       seg = (u32)-1;
+               }
+
+       return seg;
+}
+
+static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
+                                          struct mthca_buddy *buddy)
+{
+       struct mthca_mtt *mtt;
+       int i;
+       HCA_ENTER(HCA_DBG_MEMORY);
+       if (size <= 0)
+               return ERR_PTR(-EINVAL);
+
+       mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
+       if (!mtt)
+               return ERR_PTR(-ENOMEM);
+
+       mtt->buddy = buddy;
+       mtt->order = 0;
+       for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
+               ++mtt->order;
+
+       mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
+       if (mtt->first_seg == -1) {
+               kfree(mtt);
+               return ERR_PTR(-ENOMEM);
+       }
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return mtt;
+}
+
+struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
+{
+       return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
+}
+
+void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
+{
+       if (!mtt)
+               return;
+
+       mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
+
+       mthca_table_put_range(dev, dev->mr_table.mtt_table,
+                             mtt->first_seg,
+                             mtt->first_seg + (1 << mtt->order) - 1);
+
+       kfree(mtt);
+}
+
+int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
+                   int start_index, u64 *buffer_list, int list_len)
+{
+       struct mthca_mailbox *mailbox;
+       __be64 *mtt_entry;
+       int err = 0;
+       u8 status;
+       int i;
+       u64 val = 1;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       mtt_entry = mailbox->buf;
+
+       while (list_len > 0) {
+               val = dev->mr_table.mtt_base +
+                       mtt->first_seg * MTHCA_MTT_SEG_SIZE + start_index * 8;
+               //TODO: a workaround of bug in _byteswap_uint64
+               // in release version optimizer puts the above expression into the function call and generates incorrect code
+               // so we call the macro to work around that
+               mtt_entry[0] = CL_HTON64(val); 
+               mtt_entry[1] = 0;
+               for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) {
+                       val = buffer_list[i];
+                       // BUG in compiler:  it can't perform OR on u64 !!! We perform OR on the low dword
+                       *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT;
+                       mtt_entry[i + 2] = cl_hton64(val);
+               }
+
+               /*
+                * If we have an odd number of entries to write, add
+                * one more dummy entry for firmware efficiency.
+                */
+               if (i & 1)
+                       mtt_entry[i + 2] = 0;
+               
+               #if 0
+                       dump_mtt(mtt_entry ,i);
+               #endif
+               
+               err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
+               if (err) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("WRITE_MTT failed (%d)\n", err));
+                       goto out;
+               }
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("WRITE_MTT returned status 0x%02x\n",
+                                  status));
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               list_len    -= i;
+               start_index += i;
+               buffer_list += i;
+       }
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+static inline u32 tavor_hw_index_to_key(u32 ind)
+{
+       return ind;
+}
+
+static inline u32 tavor_key_to_hw_index(u32 key)
+{
+       return key;
+}
+
+static inline u32 arbel_hw_index_to_key(u32 ind)
+{
+       return (ind >> 24) | (ind << 8);
+}
+
+static inline u32 arbel_key_to_hw_index(u32 key)
+{
+       return (key << 24) | (key >> 8);
+}
+
+static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
+{
+       if (mthca_is_memfree(dev))
+               return arbel_hw_index_to_key(ind);
+       else
+               return tavor_hw_index_to_key(ind);
+}
+
+static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
+{
+       if (mthca_is_memfree(dev))
+               return arbel_key_to_hw_index(key);
+       else
+               return tavor_key_to_hw_index(key);
+}
+
+int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+                  u64 iova, u64 total_size, mthca_mpt_access_t access, struct mthca_mr *mr)
+{
+       struct mthca_mailbox *mailbox;
+       struct mthca_mpt_entry *mpt_entry;
+       u32 key;
+       int err;
+       u8 status;
+       CPU_2_BE64_PREP;
+
+       might_sleep();
+
+       WARN_ON(buffer_size_shift >= 32);
+
+       key = mthca_alloc(&dev->mr_table.mpt_alloc);
+       if (key == -1)
+               return -ENOMEM;
+       mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
+
+       if (mthca_is_memfree(dev)) {
+               err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
+               if (err)
+                       goto err_out_mpt_free;
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto err_out_table;
+       }
+       mpt_entry = mailbox->buf;
+
+       mpt_entry->flags = cl_hton32(MTHCA_MPT_FLAG_SW_OWNS     |
+                                      MTHCA_MPT_FLAG_MIO         |
+                                      MTHCA_MPT_FLAG_REGION      |
+                                      access);
+       if (!mr->mtt)
+               mpt_entry->flags |= cl_hton32(MTHCA_MPT_FLAG_PHYSICAL);
+
+       mpt_entry->page_size = cl_hton32(buffer_size_shift - 12);
+       mpt_entry->key       = cl_hton32(key);
+       mpt_entry->pd        = cl_hton32(pd);
+       mpt_entry->start     = cl_hton64(iova);
+       mpt_entry->length    = cl_hton64(total_size);
+
+       RtlZeroMemory(&mpt_entry->lkey, 
+               sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
+
+       if (mr->mtt)
+               mpt_entry->mtt_seg =
+                       CPU_2_BE64(dev->mr_table.mtt_base +
+                                   mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
+
+       {
+               dump_mpt(mpt_entry);
+       }
+
+       err = mthca_SW2HW_MPT(dev, mailbox,
+                             key & (dev->limits.num_mpts - 1),
+                             &status);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("SW2HW_MPT failed (%d)\n", err));
+               goto err_out_mailbox;
+       } else if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("SW2HW_MPT returned status 0x%02x\n",
+                          status));
+               err = -EINVAL;
+               goto err_out_mailbox;
+       }
+
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+
+err_out_mailbox:
+       mthca_free_mailbox(dev, mailbox);
+
+err_out_table:
+       mthca_table_put(dev, dev->mr_table.mpt_table, key);
+
+err_out_mpt_free:
+       mthca_free(&dev->mr_table.mpt_alloc, key);
+       return err;
+}
+
+int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
+                          mthca_mpt_access_t access, struct mthca_mr *mr)
+{
+       mr->mtt = NULL;
+       return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+}
+
+int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+                       u64 *buffer_list, int buffer_size_shift,
+                       int list_len, u64 iova, u64 total_size,
+                       mthca_mpt_access_t access, struct mthca_mr *mr)
+{
+       int err;
+       HCA_ENTER(HCA_DBG_MEMORY);
+       mr->mtt = mthca_alloc_mtt(dev, list_len);
+       if (IS_ERR(mr->mtt)){
+               err= PTR_ERR(mr->mtt);
+               goto out;
+       }
+
+       err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
+       if (err) {
+               mthca_free_mtt(dev, mr->mtt);
+               goto out;
+       }
+
+       err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
+                            total_size, access, mr);
+       if (err)
+               mthca_free_mtt(dev, mr->mtt);
+
+out:
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return err;
+}
+
+/* Free mr or fmr */
+static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
+{
+       mthca_table_put(dev, dev->mr_table.mpt_table,   key_to_hw_index(dev, lkey));
+       mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
+}
+
+void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
+{
+       int err;
+       u8 status;
+
+       might_sleep();
+
+       err = mthca_HW2SW_MPT(dev, NULL,
+                             key_to_hw_index(dev, mr->ibmr.lkey) &
+                             (dev->limits.num_mpts - 1),
+                             &status);
+       if (err){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("HW2SW_MPT failed (%d)\n", err));
+       }else if (status){
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("HW2SW_MPT returned status 0x%02x\n",
+                          status));
+       }
+
+       mthca_free_region(dev, mr->ibmr.lkey);
+       mthca_free_mtt(dev, mr->mtt);
+}
+
+int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
+                   mthca_mpt_access_t access, struct mthca_fmr *mr)
+{
+       struct mthca_mpt_entry *mpt_entry;
+       struct mthca_mailbox *mailbox;
+       u64 mtt_seg;
+       u32 key, idx;
+       u8 status;
+       int list_len = mr->attr.max_pages;
+       int err = -ENOMEM;
+       int i;
+       CPU_2_BE64_PREP;
+
+       might_sleep();
+
+       if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
+               return -EINVAL;
+
+       /* For Arbel, all MTTs must fit in the same page. */
+       if (mthca_is_memfree(dev) &&
+           mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
+               return -EINVAL;
+
+       mr->maps = 0;
+
+       key = mthca_alloc(&dev->mr_table.mpt_alloc);
+       if (key == -1)
+               return -ENOMEM;
+
+       idx = key & (dev->limits.num_mpts - 1);
+       mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
+
+       if (mthca_is_memfree(dev)) {
+               err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
+               if (err)
+                       goto err_out_mpt_free;
+
+               mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
+               BUG_ON(!mr->mem.arbel.mpt);
+       } else
+               mr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base +
+                       sizeof *(mr->mem.tavor.mpt) * idx);
+
+       mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
+       if (IS_ERR(mr->mtt))
+               goto err_out_table;
+
+       mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
+
+       if (mthca_is_memfree(dev)) {
+               mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
+                                                     mr->mtt->first_seg);
+               BUG_ON(!mr->mem.arbel.mtts);
+       } else
+               mr->mem.tavor.mtts = (u64*)((u8*)dev->mr_table.tavor_fmr.mtt_base + mtt_seg);
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               goto err_out_free_mtt;
+
+       mpt_entry = mailbox->buf;
+
+       mpt_entry->flags = cl_hton32(MTHCA_MPT_FLAG_SW_OWNS     |
+                                      MTHCA_MPT_FLAG_MIO         |
+                                      MTHCA_MPT_FLAG_REGION      |
+                                      access);
+
+       mpt_entry->page_size = cl_hton32(mr->attr.page_size - 12);
+       mpt_entry->key       = cl_hton32(key);
+       mpt_entry->pd        = cl_hton32(pd);
+       RtlZeroMemory(&mpt_entry->start, 
+               sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
+       mpt_entry->mtt_seg   = CPU_2_BE64(dev->mr_table.mtt_base + mtt_seg);
+
+       {
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_MEMORY  ,("Dumping MPT entry %08x:\n", mr->ibmr.lkey));
+               for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; i=i+4) {
+                               HCA_PRINT(TRACE_LEVEL_INFORMATION   ,HCA_DBG_MEMORY   ,("[%02x]  %08x %08x %08x %08x \n",i,
+                                       cl_ntoh32(((__be32 *) mpt_entry)[i]),
+                                       cl_ntoh32(((__be32 *) mpt_entry)[i+1]),
+                                       cl_ntoh32(((__be32 *) mpt_entry)[i+2]),
+                                       cl_ntoh32(((__be32 *) mpt_entry)[i+3])));
+               }
+       }
+
+       err = mthca_SW2HW_MPT(dev, mailbox,
+                             key & (dev->limits.num_mpts - 1),
+                             &status);
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("SW2HW_MPT failed (%d)\n", err));
+               goto err_out_mailbox_free;
+       }
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("SW2HW_MPT returned status 0x%02x\n",
+                          status));
+               err = -EINVAL;
+               goto err_out_mailbox_free;
+       }
+
+       mthca_free_mailbox(dev, mailbox);
+       return 0;
+
+err_out_mailbox_free:
+       mthca_free_mailbox(dev, mailbox);
+
+err_out_free_mtt:
+       mthca_free_mtt(dev, mr->mtt);
+
+err_out_table:
+       mthca_table_put(dev, dev->mr_table.mpt_table, key);
+
+err_out_mpt_free:
+       mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
+       return err;
+}
+
+int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
+{
+       if (fmr->maps)
+               return -EBUSY;
+
+       mthca_free_region(dev, fmr->ibmr.lkey);
+       mthca_free_mtt(dev, fmr->mtt);
+
+       return 0;
+}
+
+static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
+                                 int list_len, u64 iova)
+{
+       int page_mask;
+       UNREFERENCED_PARAMETER(page_list);
+
+       if (list_len > fmr->attr.max_pages)
+               return -EINVAL;
+
+       page_mask = (1 << fmr->attr.page_size) - 1;
+
+       /* We are getting page lists, so va must be page aligned. */
+       if (iova & page_mask)
+               return -EINVAL;
+
+       /* Trust the user not to pass misaligned data in page_list */
+       #if 0
+               for (i = 0; i < list_len; ++i) {
+                       if (page_list[i] & ~page_mask)
+                               return -EINVAL;
+               }
+       #endif  
+
+       if (fmr->maps >= fmr->attr.max_maps)
+               return -EINVAL;
+
+       return 0;
+}
+
+
+int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+                            int list_len, u64 iova)
+{
+       struct mthca_fmr *fmr = to_mfmr(ibfmr);
+       struct mthca_dev *dev = to_mdev(ibfmr->device);
+       struct mthca_mpt_entry mpt_entry;
+       u32 key;
+       int i, err;
+       CPU_2_BE64_PREP;
+
+       err = mthca_check_fmr(fmr, page_list, list_len, iova);
+       if (err)
+               return err;
+
+       ++fmr->maps;
+
+       key = tavor_key_to_hw_index(fmr->ibmr.lkey);
+       key += dev->limits.num_mpts;
+       fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
+
+       writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
+
+       for (i = 0; i < list_len; ++i) {
+               // BUG in compiler:  it can't perform OR on u64 !!! We perform OR on the low dword
+               u64 val = page_list[i];
+               __be64 mtt_entry = cl_hton64(val);
+               *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT;
+               mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
+       }
+
+       mpt_entry.lkey   = cl_hton32(key);
+       mpt_entry.length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_size));
+       mpt_entry.start  = cl_hton64(iova);
+
+       __raw_writel((u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
+       memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
+                   offsetof(struct mthca_mpt_entry, window_count) -
+                   offsetof(struct mthca_mpt_entry, start));
+
+       writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
+
+       return 0;
+}
+
+int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+                            int list_len, u64 iova)
+{
+       struct mthca_fmr *fmr = to_mfmr(ibfmr);
+       struct mthca_dev *dev = to_mdev(ibfmr->device);
+       u32 key;
+       int i, err;
+       CPU_2_BE64_PREP;
+
+       err = mthca_check_fmr(fmr, page_list, list_len, iova);
+       if (err)
+               return err;
+
+       ++fmr->maps;
+
+       key = arbel_key_to_hw_index(fmr->ibmr.lkey);
+       key += dev->limits.num_mpts;
+       fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
+
+       *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
+
+       wmb();
+
+       for (i = 0; i < list_len; ++i) {
+               // BUG in compiler:  it can't perform OR on u64 !!! We perform OR on the low dword
+               u64 val = page_list[i];
+               *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT;
+               fmr->mem.arbel.mtts[i] = cl_hton64(val);
+       }
+
+       fmr->mem.arbel.mpt->key    = cl_hton32(key);
+       fmr->mem.arbel.mpt->lkey   = cl_hton32(key);
+       fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_size));
+       fmr->mem.arbel.mpt->start  = cl_hton64(iova);
+
+       wmb();
+
+       *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
+
+       wmb();
+
+       return 0;
+}
+
+void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
+{
+       u32 key;
+
+       if (!fmr->maps)
+               return;
+
+       key = tavor_key_to_hw_index(fmr->ibmr.lkey);
+       key &= dev->limits.num_mpts - 1;
+       fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
+
+       fmr->maps = 0;
+
+       writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
+}
+
+void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
+{
+       u32 key;
+
+       if (!fmr->maps)
+               return;
+
+       key = arbel_key_to_hw_index(fmr->ibmr.lkey);
+       key &= dev->limits.num_mpts - 1;
+       fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
+
+       fmr->maps = 0;
+
+       *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
+}
+
+int mthca_init_mr_table(struct mthca_dev *dev)
+{
+       int err, i;
+
+       err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
+                              (u32)dev->limits.num_mpts,
+                              (u32)~0, (u32)dev->limits.reserved_mrws);
+       if (err)
+               return err;
+
+       if (!mthca_is_memfree(dev) &&
+           (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
+               dev->limits.fmr_reserved_mtts = 0;
+       else
+               dev->mthca_flags |= MTHCA_FLAG_FMR;
+
+       err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
+                              fls(dev->limits.num_mtt_segs - 1));
+
+       if (err)
+               goto err_mtt_buddy;
+
+       dev->mr_table.tavor_fmr.mpt_base = NULL;
+       dev->mr_table.tavor_fmr.mtt_base = NULL;
+
+       if (dev->limits.fmr_reserved_mtts) {
+               i = fls(dev->limits.fmr_reserved_mtts - 1);
+
+               if (i >= 31) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("Unable to reserve 2^31 FMR MTTs.\n"));
+                       err = -EINVAL;
+                       goto err_fmr_mpt;
+               }
+
+               dev->mr_table.tavor_fmr.mpt_base =
+                       ioremap(dev->mr_table.mpt_base,
+                               (1 << i) * sizeof (struct mthca_mpt_entry), 
+                               &dev->mr_table.tavor_fmr.mpt_base_size);
+
+               if (!dev->mr_table.tavor_fmr.mpt_base) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("MPT ioremap for FMR failed.\n"));
+                       err = -ENOMEM;
+                       goto err_fmr_mpt;
+               }
+
+               dev->mr_table.tavor_fmr.mtt_base =
+                       ioremap(dev->mr_table.mtt_base,
+                               (1 << i) * MTHCA_MTT_SEG_SIZE,
+                               &dev->mr_table.tavor_fmr.mtt_base_size );
+               if (!dev->mr_table.tavor_fmr.mtt_base) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_MEMORY  ,("MTT ioremap for FMR failed.\n"));
+                       err = -ENOMEM;
+                       goto err_fmr_mtt;
+               }
+
+               err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i);
+               if (err)
+                       goto err_fmr_mtt_buddy;
+
+               /* Prevent regular MRs from using FMR keys */
+               err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i);
+               if (err)
+                       goto err_reserve_fmr;
+
+               dev->mr_table.fmr_mtt_buddy =
+                       &dev->mr_table.tavor_fmr.mtt_buddy;
+       } else
+               dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
+
+       /* FMR table is always the first, take reserved MTTs out of there */
+       if (dev->limits.reserved_mtts) {
+               i = fls(dev->limits.reserved_mtts - 1);
+
+               if (mthca_alloc_mtt_range(dev, i,
+                                         dev->mr_table.fmr_mtt_buddy) == -1) {
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("MTT table of order %d is too small.\n",
+                                 dev->mr_table.fmr_mtt_buddy->max_order));
+                       err = -ENOMEM;
+                       goto err_reserve_mtts;
+               }
+       }
+
+       return 0;
+
+err_reserve_mtts:
+err_reserve_fmr:
+       if (dev->limits.fmr_reserved_mtts)
+               mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
+
+err_fmr_mtt_buddy:
+       if (dev->mr_table.tavor_fmr.mtt_base)
+               iounmap(dev->mr_table.tavor_fmr.mtt_base,
+                       dev->mr_table.tavor_fmr.mtt_base_size);
+
+err_fmr_mtt:
+       if (dev->mr_table.tavor_fmr.mpt_base)
+               iounmap(dev->mr_table.tavor_fmr.mpt_base,
+                       dev->mr_table.tavor_fmr.mpt_base_size);
+
+err_fmr_mpt:
+       mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
+
+err_mtt_buddy:
+       mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
+
+       return err;
+}
+
+void mthca_cleanup_mr_table(struct mthca_dev *dev)
+{
+       /* XXX check if any MRs are still allocated? */
+       if (dev->limits.fmr_reserved_mtts)
+               mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
+
+       mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
+
+       if (dev->mr_table.tavor_fmr.mtt_base)
+               iounmap(dev->mr_table.tavor_fmr.mtt_base,
+                       dev->mr_table.tavor_fmr.mtt_base_size);
+       if (dev->mr_table.tavor_fmr.mpt_base)
+               iounmap(dev->mr_table.tavor_fmr.mpt_base,
+                       dev->mr_table.tavor_fmr.mpt_base_size);
+
+       mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
+}
+
diff --git a/trunk/hw/mthca/kernel/mthca_pd.c b/trunk/hw/mthca/kernel/mthca_pd.c
new file mode 100644 (file)
index 0000000..c42105c
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_pd.c 2803 2005-07-05 15:58:55Z roland $
+ */
+
+#include "mthca_dev.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_pd_table)
+#pragma alloc_text (PAGE, mthca_cleanup_pd_table)
+#endif
+
+int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd)
+{
+       int err = 0;
+
+       might_sleep();
+
+       pd->privileged = privileged;
+
+       atomic_set(&pd->sqp_count, 0);
+       pd->pd_num = mthca_alloc(&dev->pd_table.alloc);
+       if (pd->pd_num == -1)
+               return -ENOMEM;
+
+       if (privileged) {
+               err = mthca_mr_alloc_notrans(dev, pd->pd_num,
+                                            MTHCA_MPT_FLAG_LOCAL_READ |
+                                            MTHCA_MPT_FLAG_LOCAL_WRITE,
+                                            &pd->ntmr);
+               if (err)
+                       mthca_free(&dev->pd_table.alloc, pd->pd_num);
+       }
+
+       return err;
+}
+
+void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
+{
+       might_sleep();
+       if (pd->privileged)
+               mthca_free_mr(dev, &pd->ntmr);
+       mthca_free(&dev->pd_table.alloc, pd->pd_num);
+}
+
+int mthca_init_pd_table(struct mthca_dev *dev)
+{
+       return mthca_alloc_init(&dev->pd_table.alloc,
+                               dev->limits.num_pds,
+                               (1 << 24) - 1,
+                               dev->limits.reserved_pds);
+}
+
+void mthca_cleanup_pd_table(struct mthca_dev *dev)
+{
+       /* XXX check if any PDs are still allocated? */
+       mthca_alloc_cleanup(&dev->pd_table.alloc);
+}
+
diff --git a/trunk/hw/mthca/kernel/mthca_profile.c b/trunk/hw/mthca/kernel/mthca_profile.c
new file mode 100644 (file)
index 0000000..e936b15
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_profile.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+
+#include "mthca_profile.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_profile.tmh"
+#endif
+
+enum {
+       MTHCA_RES_QP,
+       MTHCA_RES_EEC,
+       MTHCA_RES_SRQ,
+       MTHCA_RES_CQ,
+       MTHCA_RES_EQP,
+       MTHCA_RES_EEEC,
+       MTHCA_RES_EQ,
+       MTHCA_RES_RDB,
+       MTHCA_RES_MCG,
+       MTHCA_RES_MPT,
+       MTHCA_RES_MTT,
+       MTHCA_RES_UAR,
+       MTHCA_RES_UDAV,
+       MTHCA_RES_UARC,
+       MTHCA_RES_NUM
+};
+
+enum {
+       MTHCA_NUM_EQS = 32,
+       MTHCA_NUM_PDS = 1 << 15
+};
+
+u64 mthca_make_profile(struct mthca_dev *dev,
+                      struct mthca_profile *request,
+                      struct mthca_dev_lim *dev_lim,
+                      struct mthca_init_hca_param *init_hca)
+{
+       struct mthca_resource {
+               u64 size;
+               u64 start;
+               int type;
+               int num;
+               int log_num;
+       };
+
+       u64 mem_base, mem_avail;
+       u64 total_size = 0;
+       struct mthca_resource *profile;
+       struct mthca_resource tmp;
+       int i, j;
+
+       profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
+       if (!profile)
+               return (u64)-ENOMEM;
+
+       RtlZeroMemory(profile, MTHCA_RES_NUM * sizeof *profile);
+
+       profile[MTHCA_RES_QP].size   = dev_lim->qpc_entry_sz;
+       profile[MTHCA_RES_EEC].size  = dev_lim->eec_entry_sz;
+       profile[MTHCA_RES_SRQ].size  = dev_lim->srq_entry_sz;
+       profile[MTHCA_RES_CQ].size   = dev_lim->cqc_entry_sz;
+       profile[MTHCA_RES_EQP].size  = dev_lim->eqpc_entry_sz;
+       profile[MTHCA_RES_EEEC].size = dev_lim->eeec_entry_sz;
+       profile[MTHCA_RES_EQ].size   = dev_lim->eqc_entry_sz;
+       profile[MTHCA_RES_RDB].size  = MTHCA_RDB_ENTRY_SIZE;
+       profile[MTHCA_RES_MCG].size  = MTHCA_MGM_ENTRY_SIZE;
+       profile[MTHCA_RES_MPT].size  = dev_lim->mpt_entry_sz;
+       profile[MTHCA_RES_MTT].size  = MTHCA_MTT_SEG_SIZE;
+       profile[MTHCA_RES_UAR].size  = dev_lim->uar_scratch_entry_sz;
+       profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
+       profile[MTHCA_RES_UARC].size = request->uarc_size;
+
+       profile[MTHCA_RES_QP].num    = request->num_qp;
+       profile[MTHCA_RES_SRQ].num   = request->num_srq;
+       profile[MTHCA_RES_EQP].num   = request->num_qp;
+       profile[MTHCA_RES_RDB].num   = request->num_qp * request->rdb_per_qp;
+       profile[MTHCA_RES_CQ].num    = request->num_cq;
+       profile[MTHCA_RES_EQ].num    = MTHCA_NUM_EQS;
+       profile[MTHCA_RES_MCG].num   = request->num_mcg;
+       profile[MTHCA_RES_MPT].num   = request->num_mpt;
+       profile[MTHCA_RES_MTT].num   = request->num_mtt;
+       profile[MTHCA_RES_UAR].num   = request->num_uar;
+       profile[MTHCA_RES_UARC].num  = request->num_uar;
+       profile[MTHCA_RES_UDAV].num  = request->num_udav;
+
+       for (i = 0; i < MTHCA_RES_NUM; ++i) {
+               profile[i].type     = i;
+               profile[i].log_num  = max(ffs(profile[i].num) - 1, 0);
+               profile[i].size    *= profile[i].num;
+               if (mthca_is_memfree(dev))
+                       profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
+       }
+
+       if (mthca_is_memfree(dev)) {
+               mem_base  = 0;
+               mem_avail = dev_lim->hca.arbel.max_icm_sz;
+       } else {
+               mem_base  = dev->ddr_start;
+               mem_avail = dev->fw.tavor.fw_start - dev->ddr_start;
+       }
+
+       /*
+        * Sort the resources in decreasing order of size.  Since they
+        * all have sizes that are powers of 2, we'll be able to keep
+        * resources aligned to their size and pack them without gaps
+        * using the sorted order.
+        */
+       for (i = MTHCA_RES_NUM; i > 0; --i)
+               for (j = 1; j < i; ++j) {
+                       if (profile[j].size > profile[j - 1].size) {
+                               tmp            = profile[j];
+                               profile[j]     = profile[j - 1];
+                               profile[j - 1] = tmp;
+                       }
+               }
+
+       for (i = 0; i < MTHCA_RES_NUM; ++i) {
+               if (profile[i].size) {
+                       profile[i].start = mem_base + total_size;
+                       total_size      += profile[i].size;
+               }
+               if (total_size > mem_avail) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Profile requires 0x%I64x bytes; "
+                                 "won't in 0x%I64x bytes of context memory.\n",
+                                 (unsigned long long) total_size,
+                                 (unsigned long long) mem_avail));
+                       kfree(profile);
+                       return (u64)-ENOMEM;
+               }
+
+               if (profile[i].size)
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("profile[%2d]--%2d/%2d @ 0x%16I64x "
+                                 "(size 0x%8I64x)\n",
+                                 i, profile[i].type, profile[i].log_num,
+                                 (unsigned long long) profile[i].start,
+                                 (unsigned long long) profile[i].size));
+       }
+
+       if (mthca_is_memfree(dev)){
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA context memory: reserving %d KB\n",
+                         (int) (total_size >> 10)));
+       }else{
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory: allocated %d KB/%d KB (%d KB free)\n",
+                         (int) (total_size >> 10), (int) (mem_avail >> 10),
+                         (int) ((mem_avail - total_size) >> 10)));
+       }
+       for (i = 0; i < MTHCA_RES_NUM; ++i) {
+               int mc_entry_sz = MTHCA_MGM_ENTRY_SIZE;
+               int mtt_seg_sz = MTHCA_MTT_SEG_SIZE;
+               
+               switch (profile[i].type) {
+               case MTHCA_RES_QP:
+                       dev->limits.num_qps   = profile[i].num;
+                       init_hca->qpc_base    = profile[i].start;
+                       init_hca->log_num_qps = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_EEC:
+                       dev->limits.num_eecs   = profile[i].num;
+                       init_hca->eec_base     = profile[i].start;
+                       init_hca->log_num_eecs = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_SRQ:
+                       dev->limits.num_srqs   = profile[i].num;
+                       init_hca->srqc_base    = profile[i].start;
+                       init_hca->log_num_srqs = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_CQ:
+                       dev->limits.num_cqs   = profile[i].num;
+                       init_hca->cqc_base    = profile[i].start;
+                       init_hca->log_num_cqs = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_EQP:
+                       init_hca->eqpc_base = profile[i].start;
+                       break;
+               case MTHCA_RES_EEEC:
+                       init_hca->eeec_base = profile[i].start;
+                       break;
+               case MTHCA_RES_EQ:
+                       dev->limits.num_eqs   = profile[i].num;
+                       init_hca->eqc_base    = profile[i].start;
+                       init_hca->log_num_eqs = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_RDB:
+                       for (dev->qp_table.rdb_shift = 0;
+                            request->num_qp << dev->qp_table.rdb_shift < profile[i].num;
+                            ++dev->qp_table.rdb_shift)
+                               ; /* nothing */
+                       dev->qp_table.rdb_base    = (u32) profile[i].start;
+                       init_hca->rdb_base        = profile[i].start;
+                       break;
+               case MTHCA_RES_MCG:
+                       dev->limits.num_mgms      = profile[i].num >> 1;
+                       dev->limits.num_amgms     = profile[i].num >> 1;
+                       init_hca->mc_base         = profile[i].start;
+                       init_hca->log_mc_entry_sz = (u16)(ffs(mc_entry_sz) - 1);
+                       init_hca->log_mc_table_sz = (u8)profile[i].log_num;
+                       init_hca->mc_hash_sz      = (u16)(1 << (profile[i].log_num - 1));
+                       break;
+               case MTHCA_RES_MPT:
+                       dev->limits.num_mpts   = profile[i].num;
+                       dev->mr_table.mpt_base = profile[i].start;
+                       init_hca->mpt_base     = profile[i].start;
+                       init_hca->log_mpt_sz   = (u8)profile[i].log_num;
+                       break;
+               case MTHCA_RES_MTT:
+                       dev->limits.num_mtt_segs = profile[i].num;
+                       dev->mr_table.mtt_base   = profile[i].start;
+                       init_hca->mtt_base       = profile[i].start;
+                       init_hca->mtt_seg_sz     = (u8)(ffs(mtt_seg_sz) - 7);
+                       break;
+               case MTHCA_RES_UAR:
+                       dev->limits.num_uars       = profile[i].num;
+                       init_hca->uar_scratch_base = profile[i].start;
+                       break;
+               case MTHCA_RES_UDAV:
+                       dev->av_table.ddr_av_base = profile[i].start;
+                       dev->av_table.num_ddr_avs = profile[i].num;
+                       break;
+               case MTHCA_RES_UARC:
+                       dev->uar_table.uarc_size = request->uarc_size;
+                       dev->uar_table.uarc_base = profile[i].start;
+                       init_hca->uarc_base      = profile[i].start;
+                       init_hca->log_uarc_sz    = (u8)(ffs(request->uarc_size) - 13);
+                       init_hca->log_uar_sz     = (u8)(ffs(request->num_uar) - 1);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /*
+        * PDs don't take any HCA memory, but we assign them as part
+        * of the HCA profile anyway.
+        */
+       dev->limits.num_pds = MTHCA_NUM_PDS;
+
+       /*
+        * For Tavor, FMRs use ioremapped PCI memory. For 32 bit
+        * systems it may use too much vmalloc space to map all MTT
+        * memory, so we reserve some MTTs for FMR access, taking them
+        * out of the MR pool. They don't use additional memory, but
+        * we assign them as part of the HCA profile anyway.
+        */
+       if (mthca_is_memfree(dev))
+               dev->limits.fmr_reserved_mtts = 0;
+       else
+               dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts;
+
+       kfree(profile);
+       return total_size;
+}
diff --git a/trunk/hw/mthca/kernel/mthca_profile.h b/trunk/hw/mthca/kernel/mthca_profile.h
new file mode 100644 (file)
index 0000000..940fd76
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_profile.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_PROFILE_H
+#define MTHCA_PROFILE_H
+
+#include "mthca_dev.h"
+#include "mthca_cmd.h"
+
+struct mthca_profile {
+       int num_qp;
+       int rdb_per_qp;
+       int num_srq;
+       int num_cq;
+       int num_mcg;
+       int num_mpt;
+       int num_mtt;
+       int num_udav;
+       int num_uar;
+       int uarc_size;
+       int fmr_reserved_mtts;
+};
+
+u64 mthca_make_profile(struct mthca_dev *mdev,
+                      struct mthca_profile *request,
+                      struct mthca_dev_lim *dev_lim,
+                      struct mthca_init_hca_param *init_hca);
+
+#endif /* MTHCA_PROFILE_H */
diff --git a/trunk/hw/mthca/kernel/mthca_provider.c b/trunk/hw/mthca/kernel/mthca_provider.c
new file mode 100644 (file)
index 0000000..76f86cc
--- /dev/null
@@ -0,0 +1,1242 @@
+/* 
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_provider.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include <ib_smi.h>
+
+#include "mx_abi.h"
+#include "mthca_dev.h"
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_provider.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+
+ void ibv_umem_release(struct ib_device *dev, struct ib_umem *umem);
+ int ibv_umem_get(struct ib_device *dev, struct ib_umem *mem,
+                void *addr, size_t size, int write);
+ static void init_query_mad(struct ib_smp *mad)
+ {
+        mad->base_version      = 1;
+        mad->mgmt_class                = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+        mad->class_version = 1;
+        mad->method                            = IB_MGMT_METHOD_GET;
+ }
+
+ int mthca_query_device(struct ib_device *ibdev,
+                             struct ib_device_attr *props)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       struct mthca_dev* mdev = to_mdev(ibdev);
+
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+       err = mthca_MAD_IFC(mdev, 1, 1,
+           1, NULL, NULL, in_mad, out_mad, &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       RtlZeroMemory(props, sizeof *props);
+       props->fw_ver              = mdev->fw_ver;
+       props->device_cap_flags    = mdev->device_cap_flags;
+       props->vendor_id           = cl_ntoh32(*(__be32 *) (out_mad->data + 36)) &
+               0xffffff;
+       props->vendor_part_id      = cl_ntoh16(*(__be16 *) (out_mad->data + 30));
+       props->hw_ver              = cl_ntoh32(*(__be32 *) (out_mad->data + 32));
+       memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
+
+       props->max_mr_size         = ~0ull;
+       props->page_size_cap       = mdev->limits.page_size_cap;
+       props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
+       props->max_qp_wr           = mdev->limits.max_wqes;
+       props->max_sge             = mdev->limits.max_sg;
+       props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
+       props->max_cqe             = mdev->limits.max_cqes;
+       props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
+       props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
+       props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
+       props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
+       props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
+       props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
+       props->max_srq_wr          = mdev->limits.max_srq_wqes;
+       props->max_srq_sge         = mdev->limits.max_sg;
+       props->local_ca_ack_delay  = (u8)mdev->limits.local_ca_ack_delay;
+       props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? 
+                                       IB_ATOMIC_LOCAL : IB_ATOMIC_NONE;
+       props->max_pkeys           = (u16)mdev->limits.pkey_table_len;
+       props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
+       props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
+       props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 
+                                          props->max_mcast_grp;
+
+       err = 0;
+ out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mthca_query_port(struct ib_device *ibdev,
+                           u8 port, struct ib_port_attr *props)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cl_hton32(port);
+
+       err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
+                           port, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       RtlZeroMemory(props, sizeof *props);
+       props->lid               = cl_ntoh16(*(__be16 *) (out_mad->data + 16));
+       props->lmc               = out_mad->data[34] & 0x7;
+       props->sm_lid            = cl_ntoh16(*(__be16 *) (out_mad->data + 18));
+       props->sm_sl             = out_mad->data[36] & 0xf;
+       props->state             = out_mad->data[32] & 0xf;
+       props->phys_state        = out_mad->data[33] >> 4;
+       props->port_cap_flags    = cl_ntoh32(*(__be32 *) (out_mad->data + 20));
+       props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
+       props->max_msg_sz        = 0x80000000;
+       props->pkey_tbl_len      = (u16)to_mdev(ibdev)->limits.pkey_table_len;
+       props->bad_pkey_cntr     = cl_ntoh16(*(__be16 *) (out_mad->data + 46));
+       props->qkey_viol_cntr    = cl_ntoh16(*(__be16 *) (out_mad->data + 48));
+       props->active_width      = out_mad->data[31] & 0xf;
+       props->active_speed      = out_mad->data[35] >> 4;
+       props->max_mtu           = out_mad->data[41] & 0xf;
+       props->active_mtu        = out_mad->data[36] >> 4;
+       props->subnet_timeout    = out_mad->data[51] & 0x1f;
+
+ out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mthca_modify_port(struct ib_device *ibdev,
+                            u8 port, int port_modify_mask,
+                            struct ib_port_modify *props)
+{
+       struct mthca_set_ib_param set_ib;
+       struct ib_port_attr attr;
+       int err;
+       u8 status;
+
+       if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
+               return -EFAULT;
+
+       err = mthca_query_port(ibdev, port, &attr);
+       if (err)
+               goto out;
+
+       set_ib.set_si_guid     = 0;
+       set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
+
+       set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
+               ~props->clr_port_cap_mask;
+
+       err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+out:
+       up(&to_mdev(ibdev)->cap_mask_mutex);
+       return err;
+}
+
+int mthca_query_pkey(struct ib_device *ibdev,
+                           u8 port, u16 index, u16 *pkey)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
+       in_mad->attr_mod = cl_hton32(index / 32);
+
+       err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
+                           port, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       *pkey = cl_ntoh16(((__be16 *) out_mad->data)[index % 32]);
+
+ out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mthca_query_gid(struct ib_device *ibdev, u8 port,
+                          int index, union ib_gid *gid)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cl_hton32(port);
+
+       err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
+                           port, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       memcpy(gid->raw, out_mad->data + 8, 8);
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
+       in_mad->attr_mod = cl_hton32(index / 8);
+
+       err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
+                           port, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
+
+ out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
+                                               ci_umv_buf_t* const     p_umv_buf)
+{
+       struct mthca_alloc_ucontext_resp uresp;
+       struct mthca_ucontext           *context;
+       int                              err;
+
+       RtlZeroMemory(&uresp, sizeof uresp);
+
+       uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
+       if (mthca_is_memfree(to_mdev(ibdev)))
+               uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
+       else
+               uresp.uarc_size = 0;
+
+       context = kzalloc(sizeof *context, GFP_KERNEL);
+       if (!context) {
+               err = -ENOMEM;
+               goto err_nomem;
+       }
+       
+       err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
+       if (err) 
+               goto err_uar_alloc;
+
+       /*
+       * map uar to user space
+       */
+
+       /* map UAR to kernel */
+       context->kva = ioremap(context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size);
+       if (!context->kva) {
+               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,("Couldn't map kernel access region, aborting.\n") );
+               err = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       /* build MDL */
+       context->mdl = IoAllocateMdl( context->kva, (ULONG)context->uar_size,
+               FALSE, TRUE, NULL );
+       if( !context->mdl ) {
+               err = -ENOMEM;
+               goto err_alloc_mdl;
+       }
+       MmBuildMdlForNonPagedPool( context->mdl );
+
+       /* Map the memory into the calling process's address space. */
+       __try   {
+               context->ibucontext.user_uar = MmMapLockedPagesSpecifyCache( context->mdl,
+                       UserMode, MmNonCached, NULL, FALSE, NormalPagePriority );
+       }
+       __except(EXCEPTION_EXECUTE_HANDLER) {
+               err = -EACCES;
+               goto err_map;
+       }
+
+       /* user_db_tab */
+       context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
+       if (IS_ERR(context->db_tab)) {
+               err = PTR_ERR(context->db_tab);
+               goto err_init_user;
+       }
+
+       err = ib_copy_to_umv_buf(p_umv_buf, &uresp, sizeof uresp);
+       if (err) 
+               goto err_copy_to_umv_buf;
+
+       context->ibucontext.device = ibdev;
+       
+       atomic_set(&context->ibucontext.usecnt, 0);
+       return &context->ibucontext;
+
+err_copy_to_umv_buf:
+       mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar,
+               context->db_tab);
+err_init_user: 
+       MmUnmapLockedPages( context->ibucontext.user_uar, context->mdl );
+err_map:
+       IoFreeMdl(context->mdl);
+err_alloc_mdl: 
+       iounmap(context->kva, PAGE_SIZE);
+err_ioremap:   
+       mthca_uar_free(to_mdev(ibdev), &context->uar);
+err_uar_alloc:
+       kfree(context);
+err_nomem:     
+       return ERR_PTR(err);
+}
+
+ int mthca_dealloc_ucontext(struct ib_ucontext *context)
+{
+        struct mthca_ucontext                                   *mucontext = to_mucontext(context);
+
+       mthca_cleanup_user_db_tab(to_mdev(context->device), &mucontext->uar,
+                                 mucontext->db_tab);
+       MmUnmapLockedPages( mucontext->ibucontext.user_uar, mucontext->mdl );
+       IoFreeMdl(mucontext->mdl);
+       iounmap(mucontext->kva, PAGE_SIZE);
+       mthca_uar_free(to_mdev(context->device), &mucontext->uar);
+       kfree(mucontext);
+       
+       return 0;
+}
+
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
+                                   struct ib_ucontext *context,
+                                   ci_umv_buf_t* const                 p_umv_buf)
+{
+       int err;
+       struct mthca_pd *pd;
+       struct ibv_alloc_pd_resp resp;
+
+       /* sanity check */
+       if (p_umv_buf && p_umv_buf->command) {
+               if (p_umv_buf->output_size < sizeof(struct ibv_alloc_pd_resp)) {
+                       err = -EINVAL;
+                       goto err_param;
+               }
+       }
+       
+       pd = kmalloc(sizeof *pd, GFP_KERNEL);
+       if (!pd) {
+               err = -ENOMEM;
+               goto err_mem;
+       }
+
+       err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
+       if (err) {
+               goto err_pd_alloc;
+       }
+
+       if (p_umv_buf) {
+               resp.pd_handle = (u64)(UINT_PTR)pd;
+               resp.pdn = pd->pd_num;
+               if (ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_alloc_pd_resp))) {
+                       err = -EFAULT;
+                       goto err_copy;
+               }
+       }
+
+       return &pd->ibpd;
+
+err_copy:      
+       mthca_pd_free(to_mdev(ibdev), pd);
+err_pd_alloc:
+       kfree(pd);
+err_mem:
+err_param:
+       return ERR_PTR(err);
+}
+
+int mthca_dealloc_pd(struct ib_pd *pd)
+{
+       mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
+       kfree(pd);
+
+       return 0;
+}
+
+struct ib_ah *mthca_ah_create(struct ib_pd *pd,
+                                    struct ib_ah_attr *ah_attr)
+{
+       int err;
+       struct mthca_ah *ah;
+
+       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+       if (!ah)
+               return ERR_PTR(-ENOMEM);
+
+       err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
+       if (err) {
+               kfree(ah);
+               return ERR_PTR(err);
+       }
+
+       return &ah->ibah;
+}
+
+int mthca_ah_destroy(struct ib_ah *ah)
+{
+       mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
+       kfree(ah);
+
+       return 0;
+}
+
+struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+                                      struct ib_srq_init_attr *init_attr,
+                                      ci_umv_buf_t* const                      p_umv_buf)
+{
+#ifdef WIN_TO_BE_CHANGED
+       struct mthca_create_srq ucmd;
+       struct mthca_ucontext *context = NULL;
+       struct mthca_srq *srq;
+       int err;
+
+       srq = kmalloc(sizeof *srq, GFP_KERNEL);
+       if (!srq)
+               return ERR_PTR(-ENOMEM);
+
+       if (pd->ucontext) {
+               context = to_mucontext(pd->ucontext);
+
+               if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd)) {
+                       err = -EFAULT;
+                       goto err_free;
+               }
+               err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+                                       context->db_tab, ucmd.db_index,
+                                       ucmd.db_page);
+
+               if (err)
+                       goto err_free;
+
+               srq->mr.ibmr.lkey = ucmd.lkey;
+               srq->db_index     = ucmd.db_index;
+       }
+
+       err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
+                             &init_attr->attr, srq);
+
+       if (err && pd->ucontext)
+               mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+                                   context->db_tab, ucmd.db_index);
+
+       if (err)
+               goto err_free;
+
+       if (context && ib_copy_to_umv_buf(p_umv_buf, &srq->srqn, sizeof (u32))) {
+               mthca_free_srq(to_mdev(pd->device), srq);
+               err = -EFAULT;
+               goto err_free;
+       }
+
+       return &srq->ibsrq;
+
+err_free:
+       kfree(srq);
+
+       return ERR_PTR(err);
+#else
+       UNREFERENCED_PARAMETER(p_umv_buf);
+       UNREFERENCED_PARAMETER(init_attr);
+       UNREFERENCED_PARAMETER(pd);
+       return NULL;
+#endif
+}
+
+int mthca_destroy_srq(struct ib_srq *srq)
+{
+       struct mthca_ucontext *context;
+
+       if (srq->uobject) {
+               context = to_mucontext(srq->uobject->context);
+
+               mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
+                                   context->db_tab, to_msrq(srq)->db_index);
+       }
+
+       mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
+       kfree(srq);
+
+       return 0;
+}
+
+struct ib_qp *mthca_create_qp(struct ib_pd *pd,
+                                    struct ib_qp_init_attr *init_attr,
+                                     ci_umv_buf_t* const                       p_umv_buf)
+{
+       struct ibv_create_qp ucmd = {0};
+       struct mthca_qp *qp = NULL;
+       struct mthca_ucontext *context = NULL;
+       int err;
+
+       switch (init_attr->qp_type) {
+       case IB_QPT_RELIABLE_CONN:
+       case IB_QPT_UNRELIABLE_CONN:
+       case IB_QPT_UNRELIABLE_DGRM:
+       {
+
+               qp = kmalloc(sizeof *qp, GFP_KERNEL);
+               if (!qp) {
+                       err = -ENOMEM;
+                       goto err_mem;
+               }
+
+               if (pd->ucontext) {
+                       context = to_mucontext(pd->ucontext);
+
+                       if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd)) {
+                               err = -EFAULT;
+                               goto err_copy;
+                       }
+
+                       err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+                                               context->db_tab,
+                                               ucmd.sq_db_index, ucmd.sq_db_page);
+                       if (err) 
+                               goto err_map1;
+
+                       err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+                                               context->db_tab,
+                                               ucmd.rq_db_index, ucmd.rq_db_page);
+                       if (err) 
+                               goto err_map2;
+
+                       qp->mr.ibmr.lkey = ucmd.lkey;
+                       qp->sq.db_index  = ucmd.sq_db_index;
+                       qp->rq.db_index  = ucmd.rq_db_index;
+               }
+
+               err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
+                                    to_mcq(init_attr->send_cq),
+                                    to_mcq(init_attr->recv_cq),
+                                    init_attr->qp_type, init_attr->sq_sig_type,
+                                    &init_attr->cap, qp);
+
+               if (err)
+                       if (pd->ucontext) 
+                               goto err_alloc_qp_user;
+                       else 
+                               goto err_copy;
+
+               qp->ibqp.qp_num = qp->qpn;
+               break;
+       }
+       case IB_QPT_QP0:
+       case IB_QPT_QP1:
+       {
+               /* Don't allow userspace to create special QPs */
+               if (pd->ucontext) {
+                       err = -EINVAL;
+                       goto err_inval;
+               }
+
+               qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+               if (!qp) {
+                       err = -ENOMEM;
+                       goto err_mem;
+               }
+
+               qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_QP0 ? 0 : 1;
+
+               err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
+                                     to_mcq(init_attr->send_cq),
+                                     to_mcq(init_attr->recv_cq),
+                                     init_attr->sq_sig_type, &init_attr->cap,
+                                     qp->ibqp.qp_num, init_attr->port_num,
+                                     to_msqp(qp));
+               if (err)
+                       goto err_alloc_sqp;
+               
+               break;
+       }
+       default:
+               /* Don't support raw QPs */
+               err = -ENOSYS;
+               goto err_unsupported;
+       }
+
+       init_attr->cap.max_send_wr     = qp->sq.max;
+       init_attr->cap.max_recv_wr     = qp->rq.max;
+       init_attr->cap.max_send_sge    = qp->sq.max_gs;
+       init_attr->cap.max_recv_sge    = qp->rq.max_gs;
+       init_attr->cap.max_inline_data    = qp->max_inline_data;
+
+       return &qp->ibqp;
+
+               
+err_alloc_qp_user:
+       if (pd->ucontext) 
+               mthca_unmap_user_db(to_mdev(pd->device),
+                       &context->uar, context->db_tab, ucmd.rq_db_index);
+err_map2:
+       if (pd->ucontext) 
+               mthca_unmap_user_db(to_mdev(pd->device),
+                       &context->uar, context->db_tab, ucmd.sq_db_index);
+err_map1: err_copy: err_alloc_sqp:
+       if (qp)
+               kfree(qp);
+err_mem: err_inval:    err_unsupported:
+       return ERR_PTR(err);
+}
+
+int mthca_destroy_qp(struct ib_qp *qp)
+{
+       if (qp->ucontext) {
+               mthca_unmap_user_db(to_mdev(qp->device),
+                                   &to_mucontext(qp->ucontext)->uar,
+                                   to_mucontext(qp->ucontext)->db_tab,
+                                   to_mqp(qp)->sq.db_index);
+               mthca_unmap_user_db(to_mdev(qp->device),
+                                   &to_mucontext(qp->ucontext)->uar,
+                                   to_mucontext(qp->ucontext)->db_tab,
+                                   to_mqp(qp)->rq.db_index);
+       }
+       mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
+       kfree(qp);
+       return 0;
+}
+
+struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+                                    struct ib_ucontext *context,
+                                    ci_umv_buf_t* const                        p_umv_buf)
+{
+       struct ibv_create_cq ucmd = {0};
+       struct mthca_cq *cq;
+       int nent;
+       int err;
+
+       if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)   
+               return ERR_PTR(-EINVAL);
+
+       if (context) {
+               if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd))
+                       return ERR_PTR(-EFAULT);
+
+               err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+                                       to_mucontext(context)->db_tab,
+                                       ucmd.set_db_index, ucmd.set_db_page);
+               if (err)
+                       return ERR_PTR(err);
+
+               err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+                                       to_mucontext(context)->db_tab,
+                                       ucmd.arm_db_index, ucmd.arm_db_page);
+               if (err)
+                       goto err_unmap_set;
+       }
+
+       cq = kmalloc(sizeof *cq, GFP_KERNEL);
+       if (!cq) {
+               err = -ENOMEM;
+               goto err_unmap_arm;
+       }
+
+       if (context) {
+               cq->mr.ibmr.lkey = ucmd.lkey;
+               cq->set_ci_db_index = ucmd.set_db_index;
+               cq->arm_db_index    = ucmd.arm_db_index;
+       }
+
+       for (nent = 1; nent <= entries; nent <<= 1)
+               ; /* nothing */
+
+       err = mthca_init_cq(to_mdev(ibdev), nent, 
+                           context ? to_mucontext(context) : NULL,
+                           context ? ucmd.mr.pdn : to_mdev(ibdev)->driver_pd.pd_num,
+                           cq);
+       if (err)
+               goto err_free;
+
+       if (context ) {
+               struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf;
+               create_cq_resp->cqn = cq->cqn;
+       }
+
+       HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_LOW ,
+               ("uctx %p, cq_hndl %p, cq_num %#x, cqe  %#x\n",
+               context, &cq->ibcq, cq->cqn, cq->ibcq.cqe ) );
+       
+       return &cq->ibcq;
+
+err_free:
+       kfree(cq);
+
+err_unmap_arm:
+       if (context)
+               mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+                                   to_mucontext(context)->db_tab, ucmd.arm_db_index);
+
+err_unmap_set:
+       if (context)
+               mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
+                                   to_mucontext(context)->db_tab, ucmd.set_db_index);
+
+       return ERR_PTR(err);
+}
+
+int mthca_destroy_cq(struct ib_cq *cq)
+{
+       if (cq->ucontext) {
+               mthca_unmap_user_db(to_mdev(cq->device),
+                                   &to_mucontext(cq->ucontext)->uar,
+                                   to_mucontext(cq->ucontext)->db_tab,
+                                   to_mcq(cq)->arm_db_index);
+               mthca_unmap_user_db(to_mdev(cq->device),
+                                   &to_mucontext(cq->ucontext)->uar,
+                                   to_mucontext(cq->ucontext)->db_tab,
+                                   to_mcq(cq)->set_ci_db_index);
+       }
+       mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
+       kfree(cq);
+
+       return 0;
+}
+
+static
+mthca_mpt_access_t
+map_qp_mpt(
+       IN                              mthca_qp_access_t                               qp_acl)
+{
+#define ACL_MTHCA(mfl,ifl) if (qp_acl & mfl)   mpt_acl |= ifl
+       mthca_mpt_access_t mpt_acl = 0;
+
+       ACL_MTHCA(MTHCA_ACCESS_REMOTE_READ,MTHCA_MPT_FLAG_REMOTE_READ);
+       ACL_MTHCA(MTHCA_ACCESS_REMOTE_WRITE,MTHCA_MPT_FLAG_REMOTE_WRITE);
+       ACL_MTHCA(MTHCA_ACCESS_REMOTE_ATOMIC,MTHCA_MPT_FLAG_ATOMIC);
+       ACL_MTHCA(MTHCA_ACCESS_LOCAL_WRITE,MTHCA_MPT_FLAG_LOCAL_WRITE);
+
+       return (mpt_acl | MTHCA_MPT_FLAG_LOCAL_READ);
+}
+
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc)
+{
+       struct mthca_mr *mr;
+       int err;
+
+       mr = kmalloc(sizeof *mr, GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+       RtlZeroMemory(mr, sizeof *mr);
+
+       err = mthca_mr_alloc_notrans(to_mdev(pd->device),
+                                    to_mpd(pd)->pd_num,
+                                    map_qp_mpt(acc), mr);
+
+       if (err) {
+               kfree(mr);
+               return ERR_PTR(err);
+       }
+
+       return &mr->ibmr;
+}
+
+struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
+                                      struct ib_phys_buf *buffer_list,
+                                      int                 num_phys_buf,
+                                      mthca_qp_access_t                 acc,
+                                      u64                *iova_start)
+{
+       struct mthca_mr *mr;
+       u64 *page_list;
+       u64 total_size;
+       u64 mask;
+       int shift;
+       int npages;
+       int err;
+       int i, j, n;
+
+       /* First check that we have enough alignment */
+       if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
+               return ERR_PTR(-EINVAL);
+
+       if (num_phys_buf > 1 &&
+           ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
+               return ERR_PTR(-EINVAL);
+
+       mask = 0;
+       total_size = 0;
+       for (i = 0; i < num_phys_buf; ++i) {
+               if (i != 0)
+                       mask |= buffer_list[i].addr;
+               if (i != num_phys_buf - 1)
+                       mask |= buffer_list[i].addr + buffer_list[i].size;
+
+               total_size += buffer_list[i].size;
+       }
+
+       if (mask & ~PAGE_MASK)
+               return ERR_PTR(-EINVAL);
+
+       /* Find largest page shift we can use to cover buffers */
+       for (shift = PAGE_SHIFT; shift < 31; ++shift)
+               if (num_phys_buf > 1) {
+                       if ((1ULL << shift) & mask)
+                               break;
+               } else {
+                       if (1ULL << shift >=
+                           buffer_list[0].size +
+                           (buffer_list[0].addr & ((1ULL << shift) - 1)))
+                               break;
+               }
+
+       buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
+       buffer_list[0].addr &= ~0ull << shift;
+
+       mr = kmalloc(sizeof *mr, GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+       RtlZeroMemory(mr, sizeof *mr);
+
+       npages = 0;
+       for (i = 0; i < num_phys_buf; ++i)
+               npages += (int)((buffer_list[i].size + (1ULL << shift) - 1) >> shift);
+
+       if (!npages)
+               return &mr->ibmr;
+
+       page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
+       if (!page_list) {
+               kfree(mr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       n = 0;
+       for (i = 0; i < num_phys_buf; ++i)
+               for (j = 0;
+                    j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
+                    ++j)
+                       page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
+
+       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Registering memory at %I64x (iova %I64x) "
+                 "in PD %x; shift %d, npages %d.\n",
+                 (unsigned long long) buffer_list[0].addr,
+                 (unsigned long long) *iova_start,
+                 to_mpd(pd)->pd_num,
+                 shift, npages));
+
+       err = mthca_mr_alloc_phys(to_mdev(pd->device),
+                                 to_mpd(pd)->pd_num,
+                                 page_list, shift, npages,
+                                 *iova_start, total_size,
+                                 map_qp_mpt(acc), mr);
+
+       if (err) {
+               kfree(page_list);
+               kfree(mr);
+               return ERR_PTR(err);
+       }
+
+       kfree(page_list);
+       return &mr->ibmr;
+}
+
+struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, 
+       void* __ptr64   vaddr, uint64_t length, uint64_t hca_va, mthca_qp_access_t acc)
+{
+       struct mthca_dev *dev = to_mdev(pd->device);
+       struct ib_umem_chunk *chunk;
+       struct mthca_mr *mr;
+       struct ib_umem *region;
+       u64 *pages;
+       int shift, n, len;
+       int i, j, k;
+       int err = 0;
+
+       HCA_ENTER(HCA_DBG_MEMORY);
+       mr = kzalloc(sizeof *mr, GFP_KERNEL);
+       if (!mr) {
+               err = -ENOMEM;
+               goto err_nomem;
+       }
+       region = &mr->umem;
+
+       /*
+        * We ask for writable memory if any access flags other than
+        * "remote read" are set.  "Local write" and "remote write"
+        * obviously require write access.  "Remote atomic" can do
+        * things like fetch and add, which will modify memory, and
+        * "MW bind" can change permissions by binding a window.
+        */
+       err = ibv_umem_get(pd->device, region,
+                         (void *)vaddr, (size_t)length,
+                         !!(acc & ~MTHCA_ACCESS_REMOTE_READ));
+       if (err)
+               goto err_umem_get;
+
+       region->virt_base = hca_va;     /* va in HCA */
+
+       n = 0;
+       shift = ffs(region->page_size) - 1;
+       list_for_each_entry(chunk, &region->chunk_list, list,struct ib_umem_chunk)
+               n += chunk->nents;
+
+       mr->mtt = mthca_alloc_mtt(dev, n);
+       if (IS_ERR(mr->mtt)) {
+               err = PTR_ERR(mr->mtt);
+               goto err_alloc_mtt;
+       }
+
+       pages = (u64 *) kmalloc(PAGE_SIZE,GFP_KERNEL);
+       if (!pages) {
+               err = -ENOMEM;
+               goto err_pages;
+       }
+
+       i = n = 0;
+
+       list_for_each_entry(chunk, &region->chunk_list, list,struct ib_umem_chunk)
+               for (j = 0; j < chunk->nmap; ++j) {
+                       len = sg_dma_len(&chunk->page_list[j]) >> shift;
+                       for (k = 0; k < len; ++k) {
+                               pages[i++] = sg_dma_address(&chunk->page_list[j]) +
+                                       region->page_size * k;
+                               /*
+                                * Be friendly to WRITE_MTT command
+                                * and leave two empty slots for the
+                                * index and reserved fields of the
+                                * mailbox.
+                                */
+                               if (i == PAGE_SIZE / sizeof (u64) - 2) {
+                                       err = mthca_write_mtt(dev, mr->mtt,
+                                                             n, pages, i);
+                                       if (err)
+                                               goto err_write_mtt;
+                                       n += i;
+                                       i = 0;
+                               }
+                       }
+               }
+
+       if (i) {
+               err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
+               if (err)
+                       goto err_write_mtt;
+       }       
+
+       err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
+                            region->length, map_qp_mpt(acc), mr);
+       if (err)
+               goto err_mt_alloc;
+
+       free_page((void*) pages);
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return &mr->ibmr;
+
+err_mt_alloc:
+err_write_mtt:
+       free_page((void*) pages);
+err_pages:
+       mthca_free_mtt(dev, mr->mtt);
+err_alloc_mtt:
+       ibv_umem_release(pd->device, region);
+err_umem_get:  
+       kfree(mr);
+err_nomem:     
+       HCA_EXIT(HCA_DBG_MEMORY);
+       return ERR_PTR(err);
+}
+
+int mthca_dereg_mr(struct ib_mr *mr)
+{
+       struct mthca_mr *mmr = to_mmr(mr);
+       mthca_free_mr(to_mdev(mr->device), mmr);
+       if (mr->pd->ucontext)
+               ibv_umem_release(mr->pd->device, &mmr->umem);
+       kfree(mmr);
+       return 0;
+}
+
+struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
+                                     struct ib_fmr_attr *fmr_attr)
+{
+       struct mthca_fmr *fmr;
+       int err;
+
+       fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
+       if (!fmr)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
+       err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
+                            map_qp_mpt(acc), fmr);
+
+       if (err) {
+               kfree(fmr);
+               return ERR_PTR(err);
+       }
+
+       return &fmr->ibmr;
+}
+
+int mthca_dealloc_fmr(struct ib_fmr *fmr)
+{
+       struct mthca_fmr *mfmr = to_mfmr(fmr);
+       int err;
+
+       err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
+       if (err)
+               return err;
+
+       kfree(mfmr);
+       return 0;
+}
+
+int mthca_unmap_fmr(struct list_head *fmr_list)
+{
+       struct ib_fmr *fmr;
+       int err;
+       u8 status;
+       struct mthca_dev *mdev = NULL;
+
+       list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) {
+               if (mdev && to_mdev(fmr->device) != mdev)
+                       return -EINVAL;
+               mdev = to_mdev(fmr->device);
+       }
+
+       if (!mdev)
+               return 0;
+
+       if (mthca_is_memfree(mdev)) {
+               list_for_each_entry(fmr, fmr_list, list,struct ib_fmr)
+                       mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
+
+               wmb();
+       } else
+               list_for_each_entry(fmr, fmr_list, list,struct ib_fmr)
+                       mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
+
+       err = mthca_SYNC_TPT(mdev, &status);
+       if (err)
+               return err;
+       if (status)
+               return -EINVAL;
+       return 0;
+}
+
+static int mthca_init_node_data(struct mthca_dev *dev)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+       err = mthca_MAD_IFC(dev, 1, 1,
+                           1, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mthca_register_device(struct mthca_dev *dev)
+{
+       int ret;
+
+       ret = mthca_init_node_data(dev);        
+       if (ret)
+               return ret;
+
+       strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
+       dev->ib_dev.node_type            = IB_NODE_CA;
+       dev->ib_dev.phys_port_cnt        = (u8)dev->limits.num_ports;
+       dev->ib_dev.mdev                                = dev;
+       dev->ib_dev.query_device         = mthca_query_device;
+       dev->ib_dev.query_port           = mthca_query_port;
+       dev->ib_dev.modify_port          = mthca_modify_port;
+       dev->ib_dev.query_pkey           = mthca_query_pkey;
+       dev->ib_dev.query_gid            = mthca_query_gid;
+       dev->ib_dev.alloc_ucontext       = mthca_alloc_ucontext;
+       dev->ib_dev.dealloc_ucontext     = mthca_dealloc_ucontext;
+       dev->ib_dev.alloc_pd             = mthca_alloc_pd;
+       dev->ib_dev.dealloc_pd           = mthca_dealloc_pd;
+       dev->ib_dev.create_ah            = mthca_ah_create;
+       dev->ib_dev.destroy_ah           = mthca_ah_destroy;
+
+       if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
+               dev->ib_dev.create_srq           = mthca_create_srq;
+               dev->ib_dev.modify_srq           = mthca_modify_srq;
+               dev->ib_dev.destroy_srq          = mthca_destroy_srq;
+
+               if (mthca_is_memfree(dev))
+                       dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
+               else
+                       dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
+       }
+
+       dev->ib_dev.create_qp            = mthca_create_qp;
+       dev->ib_dev.modify_qp            = mthca_modify_qp;
+       dev->ib_dev.destroy_qp           = mthca_destroy_qp;
+       dev->ib_dev.create_cq            = mthca_create_cq;
+       dev->ib_dev.destroy_cq           = mthca_destroy_cq;
+       dev->ib_dev.poll_cq              = mthca_poll_cq;
+       dev->ib_dev.get_dma_mr           = mthca_get_dma_mr;
+       dev->ib_dev.reg_phys_mr          = mthca_reg_phys_mr;
+       dev->ib_dev.reg_user_mr          = mthca_reg_user_mr;
+       dev->ib_dev.dereg_mr             = mthca_dereg_mr;
+
+       if (dev->mthca_flags & MTHCA_FLAG_FMR) {
+               dev->ib_dev.alloc_fmr            = mthca_alloc_fmr;
+               dev->ib_dev.unmap_fmr            = mthca_unmap_fmr;
+               dev->ib_dev.dealloc_fmr          = mthca_dealloc_fmr;
+               if (mthca_is_memfree(dev))
+                       dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
+               else
+                       dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
+       }
+
+       dev->ib_dev.attach_mcast         = mthca_multicast_attach;
+       dev->ib_dev.detach_mcast         = mthca_multicast_detach;
+       dev->ib_dev.process_mad          = mthca_process_mad;
+
+       if (mthca_is_memfree(dev)) {
+               dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
+               dev->ib_dev.post_send     = mthca_arbel_post_send;
+               dev->ib_dev.post_recv     = mthca_arbel_post_receive;
+       } else {
+               dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
+               dev->ib_dev.post_send     = mthca_tavor_post_send;
+               dev->ib_dev.post_recv     = mthca_tavor_post_receive;
+       }
+
+       KeInitializeMutex(&dev->cap_mask_mutex, 0);
+
+       ret = ib_register_device(&dev->ib_dev);
+       if (ret)
+               return ret;
+
+       mthca_start_catas_poll(dev);
+
+       return 0;
+}
+
+void mthca_unregister_device(struct mthca_dev *dev)
+{
+       mthca_stop_catas_poll(dev);
+       ib_unregister_device(&dev->ib_dev);
+}
diff --git a/trunk/hw/mthca/kernel/mthca_provider.h b/trunk/hw/mthca/kernel/mthca_provider.h
new file mode 100644 (file)
index 0000000..2e71579
--- /dev/null
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_provider.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_PROVIDER_H
+#define MTHCA_PROVIDER_H
+
+#include <ib_verbs.h>
+#include <ib_pack.h>
+#include <iba/ib_ci.h>
+
+typedef uint32_t mthca_mpt_access_t;
+#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
+#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
+#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
+#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
+#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
+
+union mthca_buf {
+       struct scatterlist direct;
+       struct scatterlist *page_list;
+};
+
+struct mthca_uar {
+       PFN_NUMBER pfn;
+       int           index;
+};
+
+struct mthca_user_db_table;
+
+struct mthca_ucontext {
+       struct ib_ucontext          ibucontext;
+       struct mthca_uar            uar;
+       struct mthca_user_db_table *db_tab;
+       // for user UAR 
+       PMDL    mdl;
+       PVOID   kva;
+       SIZE_T uar_size;        
+};
+
+struct mthca_mtt;
+
+struct mthca_mr {
+       //NB: the start of this structure is to be equal to mlnx_mro_t !
+       //NB: the structure was not inserted here for not to mix driver and provider structures
+       struct ib_mr      ibmr;
+#ifdef WIN_TO_BE_REMOVED
+       mt_iobuf_t              iobuf;
+#endif
+       struct mthca_mtt *mtt;
+       struct ib_umem umem;
+};
+
+struct mthca_fmr {
+       struct ib_fmr      ibmr;
+       struct ib_fmr_attr attr;
+       struct mthca_mtt  *mtt;
+       int                maps;
+       union {
+               struct {
+                       struct mthca_mpt_entry __iomem *mpt;
+                       u64 __iomem *mtts;
+               } tavor;
+               struct {
+                       struct mthca_mpt_entry *mpt;
+                       __be64 *mtts;
+               } arbel;
+       } mem;
+};
+
+struct mthca_pd {
+       struct ib_pd    ibpd;
+       u32             pd_num;
+       atomic_t        sqp_count;
+       struct mthca_mr ntmr;
+       int             privileged;
+};
+
+struct mthca_eq {
+       struct mthca_dev      *dev;
+       int                    eqn;
+       u32                    eqn_mask;
+       u32                    cons_index;
+       u16                    msi_x_vector;
+       u16                    msi_x_entry;
+       int                    have_irq;
+       int                    nent;
+       struct scatterlist *page_list;
+       struct mthca_mr        mr;
+       KDPC                            dpc;                    /* DPC for MSI-X interrupts */
+       spinlock_t  lock;                       /* spinlock for simult DPCs */
+};
+
+struct mthca_av;
+
+enum mthca_ah_type {
+       MTHCA_AH_ON_HCA,
+       MTHCA_AH_PCI_POOL,
+       MTHCA_AH_KMALLOC
+};
+
+struct mthca_ah {
+       struct ib_ah       ibah;
+       enum mthca_ah_type type;
+       u32                key;
+       struct mthca_av   *av;
+       dma_addr_t         avdma;
+};
+
+/*
+ * Quick description of our CQ/QP locking scheme:
+ *
+ * We have one global lock that protects dev->cq/qp_table.  Each
+ * struct mthca_cq/qp also has its own lock.  An individual qp lock
+ * may be taken inside of an individual cq lock.  Both cqs attached to
+ * a qp may be locked, with the send cq locked first.  No other
+ * nesting should be done.
+ *
+ * Each struct mthca_cq/qp also has an atomic_t ref count.  The
+ * pointer from the cq/qp_table to the struct counts as one reference.
+ * This reference also is good for access through the consumer API, so
+ * modifying the CQ/QP etc doesn't need to take another reference.
+ * Access because of a completion being polled does need a reference.
+ *
+ * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
+ * destroy function to sleep on.
+ *
+ * This means that access from the consumer API requires nothing but
+ * taking the struct's lock.
+ *
+ * Access because of a completion event should go as follows:
+ * - lock cq/qp_table and look up struct
+ * - increment ref count in struct
+ * - drop cq/qp_table lock
+ * - lock struct, do your thing, and unlock struct
+ * - decrement ref count; if zero, wake up waiters
+ *
+ * To destroy a CQ/QP, we can do the following:
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
+ * - decrement ref count
+ * - wait_event until ref count is zero
+ *
+ * It is the consumer's responsibilty to make sure that no QP
+ * operations (WQE posting or state modification) are pending when the
+ * QP is destroyed.  Also, the consumer must make sure that calls to
+ * qp_modify are serialized.
+ *
+ * Possible optimizations (wait for profile data to see if/where we
+ * have locks bouncing between CPUs):
+ * - split cq/qp table lock into n separate (cache-aligned) locks,
+ *   indexed (say) by the page in the table
+ * - split QP struct lock into three (one for common info, one for the
+ *   send queue and one for the receive queue)
+ */
+//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP
+// operations (WQE posting or state modification) are pending when the QP is destroyed"
+
+struct mthca_cq {
+       struct ib_cq           ibcq;
+       void                                            *cq_context;    // leo: for IBAL shim
+       spinlock_t             lock;
+       atomic_t               refcount;
+       int                    cqn;
+       u32                    cons_index;
+       int                    is_direct;
+       int                    is_kernel;
+
+       /* Next fields are Arbel only */
+       int                    set_ci_db_index;
+       __be32                *set_ci_db;
+       int                    arm_db_index;
+       __be32                *arm_db;
+       int                    arm_sn;
+
+       union mthca_buf        queue;
+       struct mthca_mr        mr;
+       wait_queue_head_t      wait;
+};
+
+struct mthca_srq {
+       struct ib_srq           ibsrq;
+       spinlock_t              lock;
+       atomic_t                refcount;
+       int                     srqn;
+       int                     max;
+       int                     max_gs;
+       int                     wqe_shift;
+       int                     first_free;
+       int                     last_free;
+       u16                     counter;  /* Arbel only */
+       int                     db_index; /* Arbel only */
+       __be32                 *db;       /* Arbel only */
+       void                   *last;
+
+       int                     is_direct;
+       u64                    *wrid;
+       union mthca_buf         queue;
+       struct mthca_mr         mr;
+
+       wait_queue_head_t       wait;
+};
+
+struct mthca_wq {
+       spinlock_t lock;
+       int        max;
+       unsigned   next_ind;
+       unsigned   last_comp;
+       unsigned   head;
+       unsigned   tail;
+       void      *last;
+       int        max_gs;
+       int        wqe_shift;
+
+       int        db_index;    /* Arbel only */
+       __be32    *db;
+};
+
+struct mthca_qp {
+       struct ib_qp           ibqp;
+       void                                            *qp_context;    // leo: for IBAL shim
+       //TODO: added just because absense of ibv_query_qp
+       // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;
+       struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp
+       atomic_t               refcount;
+       u32                    qpn;
+       int                    is_direct;
+       u8                     transport;
+       u8                     state;
+       u8                     atomic_rd_en;
+       u8                     resp_depth;
+
+       struct mthca_mr        mr;
+
+       struct mthca_wq        rq;
+       struct mthca_wq        sq;
+       enum ib_sig_type       sq_policy;
+       int                    send_wqe_offset;
+       int                    max_inline_data;
+
+       u64                   *wrid;
+       union mthca_buf        queue;
+
+       wait_queue_head_t      wait;
+};
+
+struct mthca_sqp {
+       struct mthca_qp qp;
+       int             port;
+       int             pkey_index;
+       u32             qkey;
+       u32             send_psn;
+       struct ib_ud_header ud_header;
+       struct scatterlist sg;
+};
+
+static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+       return container_of(ibucontext, struct mthca_ucontext, ibucontext);
+}
+
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
+{
+       return container_of(ibmr, struct mthca_fmr, ibmr);
+}
+
+static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
+{
+       return container_of(ibmr, struct mthca_mr, ibmr);
+}
+
+static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
+{
+       return container_of(ibpd, struct mthca_pd, ibpd);
+}
+
+static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
+{
+       return container_of(ibah, struct mthca_ah, ibah);
+}
+
+static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
+{
+       return container_of(ibcq, struct mthca_cq, ibcq);
+}
+
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
+{
+       return container_of(ibsrq, struct mthca_srq, ibsrq);
+}
+
+static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
+{
+       return container_of(ibqp, struct mthca_qp, ibqp);
+}
+
+static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
+{
+       return container_of(qp, struct mthca_sqp, qp);
+}
+
+static inline uint8_t start_port(struct ib_device *device)
+{
+       return device->node_type == IB_NODE_SWITCH ? 0 : 1;
+}
+
+static inline uint8_t end_port(struct ib_device *device)
+{
+       return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
+}
+
+static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)
+{
+       RtlCopyMemory(dest, p_umv_buf->p_inout_buf,  len);
+       return 0;
+}
+
+static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)
+{
+       if (p_umv_buf->output_size < len) {
+               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;
+               p_umv_buf->output_size = 0;
+               return -EFAULT;
+       }
+       RtlCopyMemory(p_umv_buf->p_inout_buf,  src, len);
+       p_umv_buf->status = IB_SUCCESS;
+       p_umv_buf->output_size = (uint32_t)len;
+       return 0;
+}
+
+
+
+// API
+int mthca_query_device(struct ib_device *ibdev,
+                                        struct ib_device_attr *props);
+
+int mthca_query_port(struct ib_device *ibdev,
+                           u8 port, struct ib_port_attr *props);
+
+int mthca_modify_port(struct ib_device *ibdev,
+                            u8 port, int port_modify_mask,
+                            struct ib_port_modify *props);
+
+int mthca_query_pkey(struct ib_device *ibdev,
+                           u8 port, u16 index, u16 *pkey);
+
+int mthca_query_gid(struct ib_device *ibdev, u8 port,
+                          int index, union ib_gid *gid);
+
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
+                                               ci_umv_buf_t* const                     p_umv_buf);
+
+int mthca_dealloc_ucontext(struct ib_ucontext *context);
+
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
+                                   struct ib_ucontext *context,
+                                   ci_umv_buf_t* const                 p_umv_buf);
+
+int mthca_dealloc_pd(struct ib_pd *pd);
+
+struct ib_ah *mthca_ah_create(struct ib_pd *pd,
+                                    struct ib_ah_attr *ah_attr);
+
+int mthca_ah_destroy(struct ib_ah *ah);
+
+struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+                                      struct ib_srq_init_attr *init_attr,
+                                      ci_umv_buf_t* const                      p_umv_buf);
+
+int mthca_destroy_srq(struct ib_srq *srq);
+
+struct ib_qp *mthca_create_qp(struct ib_pd *pd,
+                                    struct ib_qp_init_attr *init_attr,
+                                    ci_umv_buf_t* const                        p_umv_buf);
+
+int mthca_destroy_qp(struct ib_qp *qp);
+
+struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+                                    struct ib_ucontext *context,
+                                    ci_umv_buf_t* const                        p_umv_buf);
+
+int mthca_destroy_cq(struct ib_cq *cq);
+
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);
+
+struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
+                                      struct ib_phys_buf *buffer_list,
+                                      int                 num_phys_buf,
+                                      mthca_qp_access_t                 acc,
+                                      u64                *iova_start);
+
+struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, 
+        void* __ptr64  vaddr, uint64_t length, uint64_t hca_va, mthca_qp_access_t acc);
+
+int mthca_dereg_mr(struct ib_mr *mr);
+
+struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
+                                     struct ib_fmr_attr *fmr_attr);
+
+int mthca_dealloc_fmr(struct ib_fmr *fmr);
+
+int mthca_unmap_fmr(struct list_head *fmr_list);
+
+int mthca_poll_cq_list(
+       IN              struct ib_cq *ibcq, 
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,
+               OUT                     ib_wc_t** const                         pp_done_wclist );
+
+
+#endif /* MTHCA_PROVIDER_H */
diff --git a/trunk/hw/mthca/kernel/mthca_qp.c b/trunk/hw/mthca/kernel/mthca_qp.c
new file mode 100644 (file)
index 0000000..01c53cd
--- /dev/null
@@ -0,0 +1,2252 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_qp.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include <ib_verbs.h>
+#include <ib_cache.h>
+#include <ib_pack.h>
+
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_qp.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+#include "mthca_wqe.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_qp_table)
+#pragma alloc_text (PAGE, mthca_cleanup_qp_table)
+#endif
+
+enum {
+       MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
+       MTHCA_ACK_REQ_FREQ       = 10,
+       MTHCA_FLIGHT_LIMIT       = 9,
+       MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
+       MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
+       MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
+};
+
+enum {
+       MTHCA_QP_STATE_RST  = 0,
+       MTHCA_QP_STATE_INIT = 1,
+       MTHCA_QP_STATE_RTR  = 2,
+       MTHCA_QP_STATE_RTS  = 3,
+       MTHCA_QP_STATE_SQE  = 4,
+       MTHCA_QP_STATE_SQD  = 5,
+       MTHCA_QP_STATE_ERR  = 6,
+       MTHCA_QP_STATE_DRAINING = 7
+};
+
+enum {
+       MTHCA_QP_ST_RC  = 0x0,
+       MTHCA_QP_ST_UC  = 0x1,
+       MTHCA_QP_ST_RD  = 0x2,
+       MTHCA_QP_ST_UD  = 0x3,
+       MTHCA_QP_ST_MLX = 0x7
+};
+
+enum {
+       MTHCA_QP_PM_MIGRATED = 0x3,
+       MTHCA_QP_PM_ARMED    = 0x0,
+       MTHCA_QP_PM_REARM    = 0x1
+};
+
+enum {
+       /* qp_context flags */
+       MTHCA_QP_BIT_DE  = 1 <<  8,
+       /* params1 */
+       MTHCA_QP_BIT_SRE = 1 << 15,
+       MTHCA_QP_BIT_SWE = 1 << 14,
+       MTHCA_QP_BIT_SAE = 1 << 13,
+       MTHCA_QP_BIT_SIC = 1 <<  4,
+       MTHCA_QP_BIT_SSC = 1 <<  3,
+       /* params2 */
+       MTHCA_QP_BIT_RRE = 1 << 15,
+       MTHCA_QP_BIT_RWE = 1 << 14,
+       MTHCA_QP_BIT_RAE = 1 << 13,
+       MTHCA_QP_BIT_RIC = 1 <<  4,
+       MTHCA_QP_BIT_RSC = 1 <<  3
+};
+
+#pragma pack(push,1)
+struct mthca_qp_path {
+       __be32 port_pkey;
+       u8     rnr_retry;
+       u8     g_mylmc;
+       __be16 rlid;
+       u8     ackto;
+       u8     mgid_index;
+       u8     static_rate;
+       u8     hop_limit;
+       __be32 sl_tclass_flowlabel;
+       u8     rgid[16];
+} ;
+
+struct mthca_qp_context {
+       __be32 flags;
+       __be32 tavor_sched_queue; /* Reserved on Arbel */
+       u8     mtu_msgmax;
+       u8     rq_size_stride;  /* Reserved on Tavor */
+       u8     sq_size_stride;  /* Reserved on Tavor */
+       u8     rlkey_arbel_sched_queue; /* Reserved on Tavor */
+       __be32 usr_page;
+       __be32 local_qpn;
+       __be32 remote_qpn;
+       u32    reserved1[2];
+       struct mthca_qp_path pri_path;
+       struct mthca_qp_path alt_path;
+       __be32 rdd;
+       __be32 pd;
+       __be32 wqe_base;
+       __be32 wqe_lkey;
+       __be32 params1;
+       __be32 reserved2;
+       __be32 next_send_psn;
+       __be32 cqn_snd;
+       __be32 snd_wqe_base_l;  /* Next send WQE on Tavor */
+       __be32 snd_db_index;    /* (debugging only entries) */
+       __be32 last_acked_psn;
+       __be32 ssn;
+       __be32 params2;
+       __be32 rnr_nextrecvpsn;
+       __be32 ra_buff_indx;
+       __be32 cqn_rcv;
+       __be32 rcv_wqe_base_l;  /* Next recv WQE on Tavor */
+       __be32 rcv_db_index;    /* (debugging only entries) */
+       __be32 qkey;
+       __be32 srqn;
+       __be32 rmsn;
+       __be16 rq_wqe_counter;  /* reserved on Tavor */
+       __be16 sq_wqe_counter;  /* reserved on Tavor */
+       u32    reserved3[18];
+} ;
+
+struct mthca_qp_param {
+       __be32 opt_param_mask;
+       u32    reserved1;
+       struct mthca_qp_context context;
+       u32    reserved2[62];
+} ;
+#pragma pack(pop)
+
+enum {
+       MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
+       MTHCA_QP_OPTPAR_RRE               = 1 << 1,
+       MTHCA_QP_OPTPAR_RAE               = 1 << 2,
+       MTHCA_QP_OPTPAR_RWE               = 1 << 3,
+       MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
+       MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
+       MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
+       MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
+       MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
+       MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
+       MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
+       MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
+       MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
+       MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
+       MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
+       MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
+       MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
+};
+
+static const u8 mthca_opcode[] = {
+       MTHCA_OPCODE_RDMA_WRITE,
+       MTHCA_OPCODE_RDMA_WRITE_IMM,
+       MTHCA_OPCODE_SEND,
+       MTHCA_OPCODE_SEND_IMM,
+       MTHCA_OPCODE_RDMA_READ,
+       MTHCA_OPCODE_ATOMIC_CS,
+       MTHCA_OPCODE_ATOMIC_FA
+};
+
+
+enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
+
+static struct _state_table {
+       int trans;
+       u32 req_param[NUM_TRANS];
+       u32 opt_param[NUM_TRANS];
+} state_table[IBQPS_ERR + 1][IBQPS_ERR + 1]= {0};
+
+static void fill_state_table()
+{
+       struct _state_table *t;
+       RtlZeroMemory( state_table, sizeof(state_table) );
+
+       /* IBQPS_RESET */       
+       t = &state_table[IBQPS_RESET][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_INIT].trans                                             = MTHCA_TRANS_RST2INIT;
+       t[IBQPS_INIT].req_param[UD]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY;
+       t[IBQPS_INIT].req_param[UC]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_INIT].req_param[RC]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_INIT].req_param[MLX]    = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+       t[IBQPS_INIT].opt_param[MLX]    = IB_QP_PORT;
+
+       /* IBQPS_INIT */        
+       t = &state_table[IBQPS_INIT][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_INIT].trans                                             = MTHCA_TRANS_INIT2INIT;
+       t[IBQPS_INIT].opt_param[UD]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY;
+       t[IBQPS_INIT].opt_param[UC]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_INIT].opt_param[RC]     = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_INIT].opt_param[MLX]    = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+
+       t[IBQPS_RTR].trans                                              = MTHCA_TRANS_INIT2RTR;
+       t[IBQPS_RTR].req_param[UC]      = 
+               IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN;
+       t[IBQPS_RTR].req_param[RC]      = 
+               IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_MIN_RNR_TIMER;
+       t[IBQPS_RTR].opt_param[UD]      = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+       t[IBQPS_RTR].opt_param[UC]      = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_RTR].opt_param[RC]      = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS;
+       t[IBQPS_RTR].opt_param[MLX]     = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+
+/* IBQPS_RTR */        
+       t = &state_table[IBQPS_RTR][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_RTS].trans                                              = MTHCA_TRANS_RTR2RTS;
+       t[IBQPS_RTS].req_param[UD]      = IB_QP_SQ_PSN;
+       t[IBQPS_RTS].req_param[UC]      = IB_QP_SQ_PSN;
+       t[IBQPS_RTS].req_param[RC]      = 
+               IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY |IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC;
+       t[IBQPS_RTS].req_param[MLX]     = IB_QP_SQ_PSN;
+       t[IBQPS_RTS].opt_param[UD]      = IB_QP_CUR_STATE |IB_QP_QKEY;
+       t[IBQPS_RTS].opt_param[UC]      = 
+               IB_QP_CUR_STATE |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_RTS].opt_param[RC]      =       IB_QP_CUR_STATE |IB_QP_ALT_PATH |
+               IB_QP_ACCESS_FLAGS |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_RTS].opt_param[MLX]     = IB_QP_CUR_STATE |IB_QP_QKEY;
+
+       /* IBQPS_RTS */ 
+       t = &state_table[IBQPS_RTS][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_RTS].trans                                              = MTHCA_TRANS_RTS2RTS;
+       t[IBQPS_RTS].opt_param[UD]      = IB_QP_CUR_STATE |IB_QP_QKEY;
+       t[IBQPS_RTS].opt_param[UC]      = IB_QP_ACCESS_FLAGS |IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_RTS].opt_param[RC]      =       IB_QP_ACCESS_FLAGS |
+               IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE |IB_QP_MIN_RNR_TIMER;
+       t[IBQPS_RTS].opt_param[MLX]     = IB_QP_CUR_STATE |IB_QP_QKEY;
+
+       t[IBQPS_SQD].trans                                              = MTHCA_TRANS_RTS2SQD;
+
+       /* IBQPS_SQD */ 
+       t = &state_table[IBQPS_SQD][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_RTS].trans                                              = MTHCA_TRANS_SQD2RTS;
+       t[IBQPS_RTS].opt_param[UD]      = IB_QP_CUR_STATE |IB_QP_QKEY;
+       t[IBQPS_RTS].opt_param[UC]      = IB_QP_CUR_STATE |
+               IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_RTS].opt_param[RC]      =       IB_QP_CUR_STATE |IB_QP_ALT_PATH |
+               IB_QP_ACCESS_FLAGS |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_RTS].opt_param[MLX]     = IB_QP_CUR_STATE |IB_QP_QKEY;
+
+       t[IBQPS_SQD].trans                                              = MTHCA_TRANS_SQD2SQD;
+       t[IBQPS_SQD].opt_param[UD]      = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+       t[IBQPS_SQD].opt_param[UC]      = IB_QP_AV |    IB_QP_CUR_STATE |
+               IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_SQD].opt_param[RC]      =       IB_QP_AV |IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY |
+               IB_QP_MAX_QP_RD_ATOMIC |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_CUR_STATE |IB_QP_ALT_PATH |
+               IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE;
+       t[IBQPS_SQD].opt_param[MLX]     = IB_QP_PKEY_INDEX |IB_QP_QKEY;
+
+       /* IBQPS_SQE */ 
+       t = &state_table[IBQPS_SQE][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+       t[IBQPS_RTS].trans                                              = MTHCA_TRANS_SQERR2RTS;
+       t[IBQPS_RTS].opt_param[UD]      = IB_QP_CUR_STATE |IB_QP_QKEY;
+       t[IBQPS_RTS].opt_param[UC]      = IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS;
+//     t[IBQPS_RTS].opt_param[RC]      =       IB_QP_CUR_STATE |IB_QP_MIN_RNR_TIMER;
+       t[IBQPS_RTS].opt_param[MLX]     = IB_QP_CUR_STATE |IB_QP_QKEY;
+
+       /* IBQPS_ERR */ 
+       t = &state_table[IBQPS_ERR][0];
+       t[IBQPS_RESET].trans                                    = MTHCA_TRANS_ANY2RST;
+       t[IBQPS_ERR].trans                                              = MTHCA_TRANS_ANY2ERR;
+
+};
+
+
+static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
+{
+       return qp->qpn >= (u32)dev->qp_table.sqp_start &&
+               qp->qpn <= (u32)dev->qp_table.sqp_start + 3;
+}
+
+static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
+{
+       return qp->qpn >= (u32)dev->qp_table.sqp_start &&
+               qp->qpn <= (u32)(dev->qp_table.sqp_start + 1);
+}
+
+
+static void dump_wqe(u32 *wqe_ptr , struct mthca_qp *qp_ptr)
+{
+       __be32 *wqe = wqe_ptr;
+
+       UNREFERENCED_PARAMETER(qp_ptr);
+
+       (void) wqe;     /* avoid warning if mthca_dbg compiled away... */
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_QP,("WQE contents  QPN 0x%06x \n",qp_ptr->qpn));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",0
+               , cl_ntoh32(wqe[0]), cl_ntoh32(wqe[1]), cl_ntoh32(wqe[2]), cl_ntoh32(wqe[3])));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",4
+               , cl_ntoh32(wqe[4]), cl_ntoh32(wqe[5]), cl_ntoh32(wqe[6]), cl_ntoh32(wqe[7])));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",8
+               , cl_ntoh32(wqe[8]), cl_ntoh32(wqe[9]), cl_ntoh32(wqe[10]), cl_ntoh32(wqe[11])));
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",12
+               , cl_ntoh32(wqe[12]), cl_ntoh32(wqe[13]), cl_ntoh32(wqe[14]), cl_ntoh32(wqe[15])));
+
+}
+
+
+static void *get_recv_wqe(struct mthca_qp *qp, int n)
+{
+       if (qp->is_direct)
+               return (u8*)qp->queue.direct.page + (n << qp->rq.wqe_shift);
+       else
+               return (u8*)qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].page +
+                       ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
+}
+
+static void *get_send_wqe(struct mthca_qp *qp, int n)
+{
+       if (qp->is_direct)
+               return (u8*)qp->queue.direct.page + qp->send_wqe_offset +
+                       (n << qp->sq.wqe_shift);
+       else
+               return (u8*)qp->queue.page_list[(qp->send_wqe_offset +
+                                           (n << qp->sq.wqe_shift)) >>
+                                          PAGE_SHIFT].page +
+                       ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
+                        (PAGE_SIZE - 1));
+}
+
+static void mthca_wq_init(struct mthca_wq *wq)
+{      
+       spin_lock_init(&wq->lock);      
+       wq->next_ind  = 0;      
+       wq->last_comp = wq->max - 1;    
+       wq->head      = 0;      
+       wq->tail      = 0;      
+}
+
+void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
+                   enum ib_event_type event_type)
+{
+       struct mthca_qp *qp;
+       struct ib_event event;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock(&dev->qp_table.lock, &lh);
+       qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
+       if (qp)
+               atomic_inc(&qp->refcount);
+       spin_unlock(&lh);
+
+       if (!qp) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_QP  ,("QP %06x Async event for bogus \n", qpn));
+               return;
+       }
+
+       event.device      = &dev->ib_dev;
+       event.event       = event_type;
+       event.element.qp  = &qp->ibqp;
+       if (qp->ibqp.event_handler)
+               qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
+
+       if (atomic_dec_and_test(&qp->refcount))
+               wake_up(&qp->wait);
+}
+
+static int to_mthca_state(enum ib_qp_state ib_state)
+{
+       switch (ib_state) {
+       case IBQPS_RESET: return MTHCA_QP_STATE_RST;
+       case IBQPS_INIT:  return MTHCA_QP_STATE_INIT;
+       case IBQPS_RTR:   return MTHCA_QP_STATE_RTR;
+       case IBQPS_RTS:   return MTHCA_QP_STATE_RTS;
+       case IBQPS_SQD:   return MTHCA_QP_STATE_SQD;
+       case IBQPS_SQE:   return MTHCA_QP_STATE_SQE;
+       case IBQPS_ERR:   return MTHCA_QP_STATE_ERR;
+       default:                return -1;
+       }
+}
+
+static int to_mthca_st(int transport)
+{
+       switch (transport) {
+       case RC:  return MTHCA_QP_ST_RC;
+       case UC:  return MTHCA_QP_ST_UC;
+       case UD:  return MTHCA_QP_ST_UD;
+       case RD:  return MTHCA_QP_ST_RD;
+       case MLX: return MTHCA_QP_ST_MLX;
+       default:  return -1;
+       }
+}
+
+static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
+                       int attr_mask)
+{
+       if (attr_mask & IB_QP_PKEY_INDEX)
+               sqp->pkey_index = attr->pkey_index;
+       if (attr_mask & IB_QP_QKEY)
+               sqp->qkey = attr->qkey;
+       if (attr_mask & IB_QP_SQ_PSN)
+               sqp->send_psn = attr->sq_psn;
+}
+
+static void init_port(struct mthca_dev *dev, int port)
+{
+       int err;
+       u8 status;
+       struct mthca_init_ib_param param;
+
+       RtlZeroMemory(&param, sizeof param);
+
+       param.port_width    = dev->limits.port_width_cap;
+       param.vl_cap    = dev->limits.vl_cap;
+       param.mtu_cap   = dev->limits.mtu_cap;
+       param.gid_cap   = (u16)dev->limits.gid_table_len;
+       param.pkey_cap  = (u16)dev->limits.pkey_table_len;
+
+       err = mthca_INIT_IB(dev, &param, port, &status);
+       if (err)
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("INIT_IB failed, return code %d.\n", err));
+       if (status)
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("INIT_IB returned status %02x.\n", status));
+}
+
+
+static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
+                                 int attr_mask)
+{
+       u8 dest_rd_atomic;
+       u32 access_flags;
+       u32 hw_access_flags = 0;
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               dest_rd_atomic = attr->max_dest_rd_atomic;
+       else
+               dest_rd_atomic = qp->resp_depth;
+
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               access_flags = attr->qp_access_flags;
+       else
+               access_flags = qp->atomic_rd_en;
+
+       if (!dest_rd_atomic)
+               access_flags &= MTHCA_ACCESS_REMOTE_WRITE;
+
+       if (access_flags & MTHCA_ACCESS_REMOTE_READ)
+               hw_access_flags |= MTHCA_QP_BIT_RRE;
+       if (access_flags & MTHCA_ACCESS_REMOTE_ATOMIC)
+               hw_access_flags |= MTHCA_QP_BIT_RAE;
+       if (access_flags & MTHCA_ACCESS_REMOTE_WRITE)
+               hw_access_flags |= MTHCA_QP_BIT_RWE;
+
+       return cl_hton32(hw_access_flags);
+}
+
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       enum ib_qp_state cur_state, new_state;
+       struct mthca_mailbox *mailbox;
+       struct mthca_qp_param *qp_param;
+       struct mthca_qp_context *qp_context;
+       u32 req_param, opt_param;
+       u8 status;
+       int err;
+       SPIN_LOCK_PREP(lhs);
+       SPIN_LOCK_PREP(lhr);
+
+       if (attr_mask & IB_QP_CUR_STATE) {
+               if (attr->cur_qp_state != IBQPS_RTR &&
+                   attr->cur_qp_state != IBQPS_RTS &&
+                   attr->cur_qp_state != IBQPS_SQD &&
+                   attr->cur_qp_state != IBQPS_SQE)
+                       return -EINVAL;
+               else
+                       cur_state = attr->cur_qp_state;
+       } else {
+               spin_lock_irq(&qp->sq.lock, &lhs);
+               spin_lock(&qp->rq.lock, &lhr);
+               cur_state = qp->state;
+               spin_unlock(&lhr);
+               spin_unlock_irq(&lhs);
+       }
+
+       if (attr_mask & IB_QP_STATE) {
+               if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR)
+                       return -EINVAL;
+               new_state = attr->qp_state;
+       } else
+               new_state = cur_state;
+
+       if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Illegal QP transition "
+                         "%d->%d\n", cur_state, new_state));
+               return -EINVAL;
+       }
+
+       req_param = state_table[cur_state][new_state].req_param[qp->transport];
+       opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
+
+       if ((req_param & attr_mask) != req_param) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("QP transition "
+                         "%d->%d missing req attr 0x%08x\n",
+                         cur_state, new_state,
+                         req_param & ~attr_mask));
+               //NB: IBAL doesn't use all the fields, so we can miss some mandatory flags
+               return -EINVAL;
+       }
+
+       if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("QP transition (transport %d) "
+                         "%d->%d has extra attr 0x%08x\n",
+                         qp->transport,
+                         cur_state, new_state,
+                         attr_mask & ~(req_param | opt_param |
+                                                IB_QP_STATE)));
+               //NB: The old code sometimes uses optional flags that are not so in this code
+               return -EINVAL;
+       }
+
+       if ((attr_mask & IB_QP_PKEY_INDEX) && 
+            attr->pkey_index >= dev->limits.pkey_table_len) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("PKey index (%u) too large. max is %d\n",
+                         attr->pkey_index,dev->limits.pkey_table_len-1)); 
+               return -EINVAL;
+       }
+
+       if ((attr_mask & IB_QP_PORT) &&
+           (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Port number (%u) is invalid\n", attr->port_num));
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+           attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as initiator %u too large (max is %d)\n",
+                         attr->max_rd_atomic, dev->limits.max_qp_init_rdma));
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+           attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as responder %u too large (max %d)\n",
+                         attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift));
+               return -EINVAL;
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       qp_param = mailbox->buf;
+       qp_context = &qp_param->context;
+       RtlZeroMemory(qp_param, sizeof *qp_param);
+
+       qp_context->flags      = cl_hton32((to_mthca_state(new_state) << 28) |
+                                            (to_mthca_st(qp->transport) << 16));
+       qp_context->flags     |= cl_hton32(MTHCA_QP_BIT_DE);
+       if (!(attr_mask & IB_QP_PATH_MIG_STATE))
+               qp_context->flags |= cl_hton32(MTHCA_QP_PM_MIGRATED << 11);
+       else {
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PM_STATE);
+               switch (attr->path_mig_state) {
+               case IB_APM_MIGRATED:
+                       qp_context->flags |= cl_hton32(MTHCA_QP_PM_MIGRATED << 11);
+                       break;
+               case IB_APM_REARM:
+                       qp_context->flags |= cl_hton32(MTHCA_QP_PM_REARM << 11);
+                       break;
+               case IB_APM_ARMED:
+                       qp_context->flags |= cl_hton32(MTHCA_QP_PM_ARMED << 11);
+                       break;
+               }
+       }
+
+       /* leave tavor_sched_queue as 0 */
+
+       if (qp->transport == MLX || qp->transport == UD)
+               qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
+       else if (attr_mask & IB_QP_PATH_MTU)
+               qp_context->mtu_msgmax = (u8)((attr->path_mtu << 5) | 31);
+
+       if (mthca_is_memfree(dev)) {
+               if (qp->rq.max)
+                       qp_context->rq_size_stride = (u8)(long_log2(qp->rq.max) << 3);
+               qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
+
+               if (qp->sq.max)
+                       qp_context->sq_size_stride = (u8)(long_log2(qp->sq.max) << 3);
+               qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
+       }
+
+       /* leave arbel_sched_queue as 0 */
+
+       if (qp->ibqp.ucontext)
+               qp_context->usr_page =
+                       cl_hton32(to_mucontext(qp->ibqp.ucontext)->uar.index);
+       else
+               qp_context->usr_page = cl_hton32(dev->driver_uar.index);
+       qp_context->local_qpn  = cl_hton32(qp->qpn);
+       if (attr_mask & IB_QP_DEST_QPN) {
+               qp_context->remote_qpn = cl_hton32(attr->dest_qp_num);
+       }
+
+       if (qp->transport == MLX)
+               qp_context->pri_path.port_pkey |=
+                       cl_hton32(to_msqp(qp)->port << 24);
+       else {
+               if (attr_mask & IB_QP_PORT) {
+                       qp_context->pri_path.port_pkey |=
+                               cl_hton32(attr->port_num << 24);
+                       qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PORT_NUM);
+               }
+       }
+
+       if (attr_mask & IB_QP_PKEY_INDEX) {
+               qp_context->pri_path.port_pkey |=
+                       cl_hton32(attr->pkey_index);
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PKEY_INDEX);
+       }
+
+       if (attr_mask & IB_QP_RNR_RETRY) {
+               qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RNR_RETRY);
+       }
+
+       if (attr_mask & IB_QP_AV) {
+               qp_context->pri_path.g_mylmc     = attr->ah_attr.src_path_bits & 0x7f;
+               qp_context->pri_path.rlid        = cl_hton16(attr->ah_attr.dlid);
+               qp_context->pri_path.static_rate = (u8)!!attr->ah_attr.static_rate;
+               if (attr->ah_attr.ah_flags & IB_AH_GRH) {
+                       qp_context->pri_path.g_mylmc |= 1 << 7;
+                       qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
+                       qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
+                       qp_context->pri_path.sl_tclass_flowlabel =
+                               cl_hton32((attr->ah_attr.sl << 28)                |
+                                           (attr->ah_attr.grh.traffic_class << 20) |
+                                           (attr->ah_attr.grh.flow_label));
+                       memcpy(qp_context->pri_path.rgid,
+                              attr->ah_attr.grh.dgid.raw, 16);
+               } else {
+                       qp_context->pri_path.sl_tclass_flowlabel =
+                               cl_hton32(attr->ah_attr.sl << 28);
+               }
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
+       }
+
+       if (attr_mask & IB_QP_TIMEOUT) {
+               qp_context->pri_path.ackto = attr->timeout << 3;
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
+       }
+
+       /* XXX alt_path */
+
+       /* leave rdd as 0 */
+       qp_context->pd         = cl_hton32(to_mpd(ibqp->pd)->pd_num);
+       /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
+       qp_context->wqe_lkey   = cl_hton32(qp->mr.ibmr.lkey);
+       qp_context->params1    = cl_hton32((unsigned long)(
+               (MTHCA_ACK_REQ_FREQ << 28) |
+               (MTHCA_FLIGHT_LIMIT << 24) |
+               MTHCA_QP_BIT_SWE));
+       if (qp->sq_policy == IB_SIGNAL_ALL_WR)
+               qp_context->params1 |= cl_hton32(MTHCA_QP_BIT_SSC);
+       if (attr_mask & IB_QP_RETRY_CNT) {
+               qp_context->params1 |= cl_hton32(attr->retry_cnt << 16);
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RETRY_COUNT);
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+               if (attr->max_rd_atomic) {
+                       qp_context->params1 |=
+                               cl_hton32(MTHCA_QP_BIT_SRE |
+                                           MTHCA_QP_BIT_SAE);
+                       qp_context->params1 |=
+                               cl_hton32(fls(attr->max_rd_atomic - 1) << 21);
+               }
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_SRA_MAX);
+       }
+
+       if (attr_mask & IB_QP_SQ_PSN)
+               qp_context->next_send_psn = cl_hton32(attr->sq_psn);
+       qp_context->cqn_snd = cl_hton32(to_mcq(ibqp->send_cq)->cqn);
+
+       if (mthca_is_memfree(dev)) {
+               qp_context->snd_wqe_base_l = cl_hton32(qp->send_wqe_offset);
+               qp_context->snd_db_index   = cl_hton32(qp->sq.db_index);
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+
+               if (attr->max_dest_rd_atomic)
+                       qp_context->params2 |=
+                               cl_hton32(fls(attr->max_dest_rd_atomic - 1) << 21);
+
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RRA_MAX);
+
+       }
+
+       if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+               qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RWE |
+                                                       MTHCA_QP_OPTPAR_RRE |
+                                                       MTHCA_QP_OPTPAR_RAE);
+       }
+
+       qp_context->params2 |= cl_hton32(MTHCA_QP_BIT_RSC);
+
+       if (ibqp->srq)
+               qp_context->params2 |= cl_hton32(MTHCA_QP_BIT_RIC);
+
+       if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+               qp_context->rnr_nextrecvpsn |= cl_hton32(attr->min_rnr_timer << 24);
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
+       }
+       if (attr_mask & IB_QP_RQ_PSN)
+               qp_context->rnr_nextrecvpsn |= cl_hton32(attr->rq_psn);
+
+       qp_context->ra_buff_indx =
+               cl_hton32(dev->qp_table.rdb_base +
+                           ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
+                            dev->qp_table.rdb_shift));
+
+       qp_context->cqn_rcv = cl_hton32(to_mcq(ibqp->recv_cq)->cqn);
+
+       if (mthca_is_memfree(dev))
+               qp_context->rcv_db_index   = cl_hton32(qp->rq.db_index);
+
+       if (attr_mask & IB_QP_QKEY) {
+               qp_context->qkey = cl_hton32(attr->qkey);
+               qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_Q_KEY);
+       }
+
+       if (ibqp->srq)
+               qp_context->srqn = cl_hton32(1 << 24 |
+                                              to_msrq(ibqp->srq)->srqn);
+
+       err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
+                             qp->qpn, 0, mailbox, 0, &status);
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
+                          state_table[cur_state][new_state].trans, status));
+               err = -EINVAL;
+       }
+
+       if (!err) {
+               qp->state = new_state;
+               if (attr_mask & IB_QP_ACCESS_FLAGS)
+                       qp->atomic_rd_en = (u8)attr->qp_access_flags;
+               if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+                       qp->resp_depth = attr->max_dest_rd_atomic;
+       }
+
+       mthca_free_mailbox(dev, mailbox);
+
+       if (is_sqp(dev, qp))
+               store_attrs(to_msqp(qp), attr, attr_mask);
+
+       /*
+        * If we moved QP0 to RTR, bring the IB link up; if we moved
+        * QP0 to RESET or ERROR, bring the link back down.
+        */
+       if (is_qp0(dev, qp)) {
+               if (cur_state != IBQPS_RTR &&
+                   new_state == IBQPS_RTR)
+                       init_port(dev, to_msqp(qp)->port);
+
+               if (cur_state != IBQPS_RESET &&
+                   cur_state != IBQPS_ERR &&
+                   (new_state == IBQPS_RESET ||
+                    new_state == IBQPS_ERR))
+                       mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
+       }
+
+       /*
+        * If we moved a kernel QP to RESET, clean up all old CQ
+        * entries and reinitialize the QP.
+        */
+       if (!err && new_state == IB_QPS_RESET && !qp->ibqp.ucontext) {
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+                              qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+               if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+                                      qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+
+               mthca_wq_init(&qp->sq);
+               qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+               mthca_wq_init(&qp->rq);
+               qp->rq.last = get_send_wqe(qp, qp->rq.max - 1);
+
+               if (mthca_is_memfree(dev)) {
+                       *qp->sq.db = 0;
+                       *qp->rq.db = 0;
+               }
+       }
+
+       return err;
+}
+
+static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
+{
+
+       /*
+        * Calculate the maximum size of WQE s/g segments, excluding
+        * the next segment and other non-data segments.
+        */
+       int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
+
+       switch (qp->transport) {
+       case MLX:
+               max_data_size -= 2 * sizeof (struct mthca_data_seg);
+               break;
+
+       case UD:
+               if (mthca_is_memfree(dev))
+                       max_data_size -= sizeof (struct mthca_arbel_ud_seg);
+               else
+                       max_data_size -= sizeof (struct mthca_tavor_ud_seg);
+               break;
+
+       default:
+               max_data_size -= sizeof (struct mthca_raddr_seg);
+               break;
+       }
+               return max_data_size;
+}
+
+static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
+{
+       /* We don't support inline data for kernel QPs (yet). */
+       return pd->ibpd.ucontext ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
+}
+
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+                                struct mthca_pd *pd,
+                                struct mthca_qp *qp)
+{
+       int max_data_size = mthca_max_data_size(dev, qp,
+               min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift));
+
+       qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
+
+       qp->sq.max_gs = min(dev->limits.max_sg,
+               (int)(max_data_size / sizeof (struct mthca_data_seg)));
+       qp->rq.max_gs = min(dev->limits.max_sg,
+               (int)((min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
+               sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg)));     
+}
+
+/*
+ * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
+ * rq.max_gs and sq.max_gs must all be assigned.
+ * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
+ * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
+ * queue)
+ */
+static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
+                              struct mthca_pd *pd,
+                              struct mthca_qp *qp)
+{
+       int size;
+       int err = -ENOMEM;
+       
+       HCA_ENTER(HCA_DBG_QP);
+       size = sizeof (struct mthca_next_seg) +
+               qp->rq.max_gs * sizeof (struct mthca_data_seg);
+
+       if (size > dev->limits.max_desc_sz)
+               return -EINVAL;
+
+       for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
+            qp->rq.wqe_shift++)
+               ; /* nothing */
+
+       size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
+       switch (qp->transport) {
+               case MLX:
+                       size += 2 * sizeof (struct mthca_data_seg);
+                       break;
+
+               case UD:
+                       size += mthca_is_memfree(dev) ?
+                               sizeof (struct mthca_arbel_ud_seg) :
+                               sizeof (struct mthca_tavor_ud_seg);
+                       break;
+               
+               case UC:
+                       size += sizeof (struct mthca_raddr_seg);
+                       break;
+               
+               case RC:
+                       size += sizeof (struct mthca_raddr_seg);
+                       /*
+                        * An atomic op will require an atomic segment, a
+                        * remote address segment and one scatter entry.
+                        */
+                       size = max(size,
+                                sizeof (struct mthca_atomic_seg) +
+                                sizeof (struct mthca_raddr_seg) +
+                                sizeof (struct mthca_data_seg));
+                       break;
+                       
+               default:
+                       break;
+       }
+               
+       /* Make sure that we have enough space for a bind request */
+       size = max(size, sizeof (struct mthca_bind_seg));
+       
+       size += sizeof (struct mthca_next_seg);
+       
+       if (size > dev->limits.max_desc_sz)
+               return -EINVAL;
+
+       for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
+            qp->sq.wqe_shift++)
+               ; /* nothing */
+
+       qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
+                                   1 << qp->sq.wqe_shift);
+
+       /*
+        * If this is a userspace QP, we don't actually have to
+        * allocate anything.  All we need is to calculate the WQE
+        * sizes and the send_wqe_offset, so we're done now.
+        */
+       if (pd->ibpd.ucontext)
+               return 0;
+
+       size = (int)(LONG_PTR)NEXT_PAGE_ALIGN(qp->send_wqe_offset +
+                         (qp->sq.max << qp->sq.wqe_shift));
+
+       qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
+                          GFP_KERNEL);
+       if (!qp->wrid)
+               goto err_out;
+
+       err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
+                             &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
+       if (err)
+               goto err_out;
+       
+       HCA_EXIT(HCA_DBG_QP);
+       return 0;
+
+err_out:
+       kfree(qp->wrid);
+       return err;
+}
+
+static void mthca_free_wqe_buf(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       mthca_buf_free(dev, (int)(LONG_PTR)NEXT_PAGE_ALIGN(qp->send_wqe_offset +
+                                      (qp->sq.max << qp->sq.wqe_shift)),
+                      &qp->queue, qp->is_direct, &qp->mr);
+       kfree(qp->wrid);
+}
+
+static int mthca_map_memfree(struct mthca_dev *dev,
+                            struct mthca_qp *qp)
+{
+       int ret;
+
+       if (mthca_is_memfree(dev)) {
+               ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
+               if (ret)
+                       return ret;
+
+               ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
+               if (ret)
+                       goto err_qpc;
+
+               ret = mthca_table_get(dev, dev->qp_table.rdb_table,
+                                     qp->qpn << dev->qp_table.rdb_shift);
+               if (ret)
+                       goto err_eqpc;
+
+       }
+
+       return 0;
+
+err_eqpc:
+       mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
+
+err_qpc:
+       mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
+
+       return ret;
+}
+
+static void mthca_unmap_memfree(struct mthca_dev *dev,
+                               struct mthca_qp *qp)
+{
+       mthca_table_put(dev, dev->qp_table.rdb_table,
+                       qp->qpn << dev->qp_table.rdb_shift);
+       mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
+       mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
+}
+
+static int mthca_alloc_memfree(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       int ret = 0;
+
+       if (mthca_is_memfree(dev)) {
+               qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
+                                                qp->qpn, &qp->rq.db);
+               if (qp->rq.db_index < 0)
+                       return ret;
+
+               qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
+                                                qp->qpn, &qp->sq.db);
+               if (qp->sq.db_index < 0)
+                       mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
+       }
+
+       return ret;
+}
+
+static void mthca_free_memfree(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       if (mthca_is_memfree(dev)) {
+               mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
+               mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
+       }
+}
+
+static int mthca_alloc_qp_common(struct mthca_dev *dev,
+                                struct mthca_pd *pd,
+                                struct mthca_cq *send_cq,
+                                struct mthca_cq *recv_cq,
+                                enum ib_sig_type send_policy,
+                                struct mthca_qp *qp)
+{
+       int ret;
+       int i;
+
+       atomic_set(&qp->refcount, 1);
+       init_waitqueue_head(&qp->wait);
+       qp->state        = IBQPS_RESET;
+       qp->atomic_rd_en = 0;
+       qp->resp_depth   = 0;
+       qp->sq_policy    = send_policy;
+       mthca_wq_init(&qp->sq);
+       mthca_wq_init(&qp->rq);
+
+       UNREFERENCED_PARAMETER(send_cq);
+       UNREFERENCED_PARAMETER(recv_cq);
+       
+       ret = mthca_map_memfree(dev, qp);
+       if (ret)
+               return ret;
+
+       ret = mthca_alloc_wqe_buf(dev, pd, qp);
+       if (ret) {
+               mthca_unmap_memfree(dev, qp);
+               return ret;
+       }
+
+       mthca_adjust_qp_caps(dev, pd, qp);
+
+       /*
+        * If this is a userspace QP, we're done now.  The doorbells
+        * will be allocated and buffers will be initialized in
+        * userspace.
+        */
+       if (pd->ibpd.ucontext)
+               return 0;
+
+       ret = mthca_alloc_memfree(dev, qp);
+       if (ret) {
+               mthca_free_wqe_buf(dev, qp);
+               mthca_unmap_memfree(dev, qp);
+               return ret;
+       }
+
+       if (mthca_is_memfree(dev)) {
+               struct mthca_next_seg *next;
+               struct mthca_data_seg *scatter;
+               int size = (sizeof (struct mthca_next_seg) +
+                           qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
+
+               for (i = 0; i < qp->rq.max; ++i) {
+                       next = get_recv_wqe(qp, i);
+                       next->nda_op = cl_hton32(((i + 1) & (qp->rq.max - 1)) <<
+                                                  qp->rq.wqe_shift);
+                       next->ee_nds = cl_hton32(size);
+
+                       for (scatter = (void *) (next + 1);
+                            (void *) scatter < (void *) ((u8*)next + (1 << qp->rq.wqe_shift));
+                            ++scatter)
+                               scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+               }
+
+               for (i = 0; i < qp->sq.max; ++i) {
+                       next = get_send_wqe(qp, i);
+                       next->nda_op = cl_hton32((((i + 1) & (qp->sq.max - 1)) <<
+                                                   qp->sq.wqe_shift) +
+                                                  qp->send_wqe_offset);
+               }
+       }
+
+       qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+       qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
+       return 0;
+}
+
+static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
+                                        struct mthca_pd *pd, struct mthca_qp *qp)
+{
+       int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
+
+       /* Sanity check QP size before proceeding */
+       if (cap->max_send_wr     > (u32)dev->limits.max_wqes ||
+           cap->max_recv_wr     > (u32)dev->limits.max_wqes ||
+           cap->max_send_sge    > (u32)dev->limits.max_sg   ||
+           cap->max_recv_sge    > (u32)dev->limits.max_sg   ||
+           cap->max_inline_data > (u32)mthca_max_inline_data(pd, max_data_size))
+               return -EINVAL;
+
+       /*
+        * For MLX transport we need 2 extra S/G entries:
+        * one for the header and one for the checksum at the end
+        */
+       if (qp->transport == MLX && cap->max_recv_sge + 2 > (u32)dev->limits.max_sg)
+               return -EINVAL;
+
+       if (mthca_is_memfree(dev)) {
+               qp->rq.max = cap->max_recv_wr ?
+                       roundup_pow_of_two(cap->max_recv_wr) : 0;
+               qp->sq.max = cap->max_send_wr ?
+                       roundup_pow_of_two(cap->max_send_wr) : 0;
+       } else {
+               qp->rq.max = cap->max_recv_wr;
+               qp->sq.max = cap->max_send_wr;
+       }
+
+       qp->rq.max_gs = cap->max_recv_sge;
+       qp->sq.max_gs = MAX(cap->max_send_sge,
+                             ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
+                                   MTHCA_INLINE_CHUNK_SIZE) /
+                             (int)sizeof (struct mthca_data_seg));
+
+       return 0;
+}
+
+int mthca_alloc_qp(struct mthca_dev *dev,
+                  struct mthca_pd *pd,
+                  struct mthca_cq *send_cq,
+                  struct mthca_cq *recv_cq,
+                  enum ib_qp_type_t type,
+                  enum ib_sig_type send_policy,
+                  struct ib_qp_cap *cap,
+                  struct mthca_qp *qp)
+{
+       int err;
+       SPIN_LOCK_PREP(lh);
+
+       err = mthca_set_qp_size(dev, cap, pd, qp);
+       if (err)
+               return err;
+
+       switch (type) {
+       case IB_QPT_RELIABLE_CONN: qp->transport = RC; break;
+       case IB_QPT_UNRELIABLE_CONN: qp->transport = UC; break;
+       case IB_QPT_UNRELIABLE_DGRM: qp->transport = UD; break;
+       default: return -EINVAL;
+       }
+
+       qp->qpn = mthca_alloc(&dev->qp_table.alloc);
+       if (qp->qpn == -1)
+               return -ENOMEM;
+
+       err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
+                                   send_policy, qp);
+       if (err) {
+               mthca_free(&dev->qp_table.alloc, qp->qpn);
+               return err;
+       }
+
+       spin_lock_irq(&dev->qp_table.lock, &lh);
+       mthca_array_set(&dev->qp_table.qp,
+                       qp->qpn & (dev->limits.num_qps - 1), qp);
+       spin_unlock_irq(&lh);
+
+       return 0;
+}
+
+int mthca_alloc_sqp(struct mthca_dev *dev,
+                   struct mthca_pd *pd,
+                   struct mthca_cq *send_cq,
+                   struct mthca_cq *recv_cq,
+                   enum ib_sig_type send_policy,
+                   struct ib_qp_cap *cap,
+                   int qpn,
+                   int port,
+                   struct mthca_sqp *sqp)
+{
+       u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
+       int err;
+       SPIN_LOCK_PREP(lhs);
+       SPIN_LOCK_PREP(lhr);
+       SPIN_LOCK_PREP(lht);
+
+       err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
+       if (err)
+               return err;
+
+       alloc_dma_zmem_map(dev, 
+               sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE, 
+               PCI_DMA_BIDIRECTIONAL,
+               &sqp->sg);
+       if (!sqp->sg.page)
+               return -ENOMEM;
+
+       spin_lock_irq(&dev->qp_table.lock, &lht);
+       if (mthca_array_get(&dev->qp_table.qp, mqpn))
+               err = -EBUSY;
+       else
+               mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
+       spin_unlock_irq(&lht);
+
+       if (err)
+               goto err_out;
+
+       sqp->port = port;
+       sqp->qp.qpn       = mqpn;
+       sqp->qp.transport = MLX;
+
+       err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
+                                   send_policy, &sqp->qp);
+       if (err)
+               goto err_out_free;
+
+       atomic_inc(&pd->sqp_count);
+
+       return 0;
+
+ err_out_free:
+       /*
+        * Lock CQs here, so that CQ polling code can do QP lookup
+        * without taking a lock.
+        */
+       spin_lock_irq(&send_cq->lock, &lhs);
+       if (send_cq != recv_cq)
+               spin_lock(&recv_cq->lock, &lhr);
+
+       spin_lock(&dev->qp_table.lock, &lht);
+       mthca_array_clear(&dev->qp_table.qp, mqpn);
+       spin_unlock(&lht);
+
+       if (send_cq != recv_cq)
+               spin_unlock(&lhr);
+       spin_unlock_irq(&lhs);
+
+ err_out:
+       free_dma_mem_map(dev, &sqp->sg, PCI_DMA_BIDIRECTIONAL);
+
+       return err;
+}
+
+void mthca_free_qp(struct mthca_dev *dev,
+                  struct mthca_qp *qp)
+{
+       u8 status;
+       struct mthca_cq *send_cq;
+       struct mthca_cq *recv_cq;
+       SPIN_LOCK_PREP(lhs);
+       SPIN_LOCK_PREP(lhr);
+       SPIN_LOCK_PREP(lht);
+
+       send_cq = to_mcq(qp->ibqp.send_cq);
+       recv_cq = to_mcq(qp->ibqp.recv_cq);
+
+       /*
+        * Lock CQs here, so that CQ polling code can do QP lookup
+        * without taking a lock.
+        */
+       spin_lock_irq(&send_cq->lock, &lhs);
+       if (send_cq != recv_cq)
+               spin_lock(&recv_cq->lock, &lhr);
+
+       spin_lock(&dev->qp_table.lock, &lht);
+       mthca_array_clear(&dev->qp_table.qp,
+                         qp->qpn & (dev->limits.num_qps - 1));
+       spin_unlock(&lht);
+
+       if (send_cq != recv_cq)
+               spin_unlock(&lhr);
+       spin_unlock_irq(&lhs);
+
+       atomic_dec(&qp->refcount);
+       wait_event(&qp->wait, !atomic_read(&qp->refcount));
+
+       if (qp->state != IBQPS_RESET)
+               mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
+
+       /*
+        * If this is a userspace QP, the buffers, MR, CQs and so on
+        * will be cleaned up in userspace, so all we have to do is
+        * unref the mem-free tables and free the QPN in our table.
+        */
+       if (!qp->ibqp.ucontext) {
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+                              qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+               if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+                                      qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+
+               mthca_free_memfree(dev, qp);
+               mthca_free_wqe_buf(dev, qp);
+       }
+
+       mthca_unmap_memfree(dev, qp);
+
+       if (is_sqp(dev, qp)) {
+               atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
+               free_dma_mem_map(dev, &to_msqp(qp)->sg, PCI_DMA_BIDIRECTIONAL);
+       } else
+               mthca_free(&dev->qp_table.alloc, qp->qpn);
+}
+
+static enum mthca_wr_opcode conv_ibal_wr_opcode(struct _ib_send_wr *wr)
+{
+
+       enum mthca_wr_opcode opcode = -1; //= wr->wr_type;
+
+       switch (wr->wr_type) {
+               case WR_SEND: 
+                       opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_SEND_IMM : MTHCA_OPCODE_SEND;
+                       break;
+               case WR_RDMA_WRITE:     
+                       opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_RDMA_WRITE_IMM : MTHCA_OPCODE_RDMA_WRITE;
+                       break;
+               case WR_RDMA_READ:              opcode = MTHCA_OPCODE_RDMA_READ; break;
+               case WR_COMPARE_SWAP:           opcode = MTHCA_OPCODE_ATOMIC_CS; break;
+               case WR_FETCH_ADD:                      opcode = MTHCA_OPCODE_ATOMIC_FA; break;
+               default:                                                opcode = MTHCA_OPCODE_INVALID;break;
+       }
+       return opcode;
+}
+
+/* Create UD header for an MLX send and build a data segment for it */
+static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
+                           int ind, struct _ib_send_wr *wr,
+                           struct mthca_mlx_seg *mlx,
+                           struct mthca_data_seg *data)
+{
+       enum ib_wr_opcode opcode = conv_ibal_wr_opcode(wr);
+       int header_size;
+       int err;
+       u16 pkey;
+       CPU_2_BE64_PREP;
+
+       ib_ud_header_init(256, /* assume a MAD */
+               mthca_ah_grh_present(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)),
+               &sqp->ud_header);
+
+       err = mthca_read_ah(dev, to_mah((struct ib_ah *)wr->dgrm.ud.h_av), &sqp->ud_header);
+       if (err)
+               return err;
+       mlx->flags &= ~cl_hton32(MTHCA_NEXT_SOLICIT | 1);
+       mlx->flags |= cl_hton32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
+                                 (sqp->ud_header.lrh.destination_lid ==
+                                  IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
+                                 (sqp->ud_header.lrh.service_level << 8));
+       mlx->rlid = sqp->ud_header.lrh.destination_lid;
+       mlx->vcrc = 0;
+
+       switch (opcode) {
+       case MTHCA_OPCODE_SEND:
+               sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+               sqp->ud_header.immediate_present = 0;
+               break;
+       case MTHCA_OPCODE_SEND_IMM:
+               sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+               sqp->ud_header.immediate_present = 1;
+               sqp->ud_header.immediate_data = wr->immediate_data;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
+       if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+               sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+       sqp->ud_header.bth.solicited_event = (u8)!!(wr->send_opt & IB_SEND_OPT_SOLICITED);
+       if (!sqp->qp.ibqp.qp_num)
+               ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port,
+                                  sqp->pkey_index, &pkey);
+       else
+               ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port,
+                                  wr->dgrm.ud.pkey_index, &pkey);
+       sqp->ud_header.bth.pkey = cl_hton16(pkey);
+       sqp->ud_header.bth.destination_qpn = wr->dgrm.ud.remote_qp;
+       sqp->ud_header.bth.psn = cl_hton32((sqp->send_psn++) & ((1 << 24) - 1));
+       sqp->ud_header.deth.qkey = wr->dgrm.ud.remote_qkey & 0x00000080 ?
+                                              cl_hton32(sqp->qkey) : wr->dgrm.ud.remote_qkey;
+       sqp->ud_header.deth.source_qpn = cl_hton32(sqp->qp.ibqp.qp_num);
+
+       header_size = ib_ud_header_pack(&sqp->ud_header,
+                                       (u8*)sqp->sg.page +
+                                       ind * MTHCA_UD_HEADER_SIZE);
+
+       data->byte_count = cl_hton32(header_size);
+       data->lkey       = cl_hton32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
+       data->addr       = CPU_2_BE64(sqp->sg.dma_address +
+                                      ind * MTHCA_UD_HEADER_SIZE);
+
+       return 0;
+}
+
+static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
+                                   struct ib_cq *ib_cq)
+{
+       unsigned cur;
+       struct mthca_cq *cq;
+       SPIN_LOCK_PREP(lh);
+
+       cur = wq->head - wq->tail;
+       if (likely((int)cur + nreq < wq->max))
+               return 0;
+
+       cq = to_mcq(ib_cq);
+       spin_lock(&cq->lock, &lh);
+       cur = wq->head - wq->tail;
+       spin_unlock(&lh);
+
+       return (int)cur + nreq >= wq->max;
+}
+
+int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       u8 *wqe;
+       u8 *prev_wqe;
+       int err = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       u32 f0 = 0;
+       int ind;
+       u8 op0 = 0;
+       enum ib_wr_opcode opcode;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&qp->sq.lock, &lh);
+
+       /* XXX check that state is OK to post send */
+
+       ind = qp->sq.next_ind;
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_send_wqe(qp, ind);
+               prev_wqe = qp->sq.last;
+               qp->sq.last = wqe;
+               opcode = conv_ibal_wr_opcode(wr);
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               ((struct mthca_next_seg *) wqe)->flags =
+                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                        cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
+                       ((wr->send_opt & IB_SEND_OPT_SOLICITED) ?
+                        cl_hton32(MTHCA_NEXT_SOLICIT) : 0)   |
+                       cl_hton32(1);
+               if (opcode == MTHCA_OPCODE_SEND_IMM||
+                   opcode == MTHCA_OPCODE_RDMA_WRITE_IMM)
+                       ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               switch (qp->transport) {
+               case RC:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_ATOMIC_CS:
+                       case MTHCA_OPCODE_ATOMIC_FA:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+
+                               wqe += sizeof (struct mthca_raddr_seg);
+
+                               if (opcode == MTHCA_OPCODE_ATOMIC_CS) {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic2);
+                                       ((struct mthca_atomic_seg *) wqe)->compare =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                               } else {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                                       ((struct mthca_atomic_seg *) wqe)->compare = 0;
+                               }
+
+                               wqe += sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                       sizeof (struct mthca_atomic_seg)) / 16 ;
+                               break;
+
+                       case MTHCA_OPCODE_RDMA_READ:
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UC:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UD:
+                       ((struct mthca_tavor_ud_seg *) wqe)->lkey =
+                               cl_hton32(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->key);
+                       ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
+                               cl_hton64(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->avdma);
+                       ((struct mthca_tavor_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
+                       ((struct mthca_tavor_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
+
+                       wqe += sizeof (struct mthca_tavor_ud_seg);
+                       size += sizeof (struct mthca_tavor_ud_seg) / 16;
+                       break;
+
+               case MLX:
+                       err = build_mlx_header(dev, to_msqp(qp), ind, wr,
+                                              (void*)(wqe - sizeof (struct mthca_next_seg)),
+                                              (void*)wqe);
+                       if (err) {
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+                       break;
+               }
+
+               if ((int)(int)wr->num_ds > qp->sq.max_gs) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SQ %06x too many gathers\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("SQ %06x [%02x]  lkey 0x%08x vaddr 0x%I64x 0x%x\n",qp->qpn,i,
+                               (wr->ds_array[i].lkey),(wr->ds_array[i].vaddr),wr->ds_array[i].length));
+               }
+
+               /* Add one more inline data segment for ICRC */
+               if (qp->transport == MLX) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32((unsigned long)((1 << 31) | 4));
+                       ((u32 *) wqe)[1] = 0;
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               qp->wrid[ind + qp->rq.max] = wr->wr_id;
+
+               if (opcode == MTHCA_OPCODE_INVALID) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SQ %06x opcode invalid\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32(((ind << qp->sq.wqe_shift) +  
+                       qp->send_wqe_offset) |opcode);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) |size);
+
+               if (!size0) {
+                       size0 = size;
+                       op0   = opcode;
+               }
+
+               #if 0
+                       dump_wqe( (u32*)qp->sq.last,qp);
+               #endif
+
+               ++ind;
+               if (unlikely(ind >= qp->sq.max))
+                       ind -= qp->sq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               __be32 doorbell[2];
+
+               doorbell[0] = cl_hton32(((qp->sq.next_ind << qp->sq.wqe_shift) +
+                                          qp->send_wqe_offset) | f0 | op0);
+               doorbell[1] = cl_hton32((qp->qpn << 8) | size0);
+
+               wmb();
+
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_SEND_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+
+       qp->sq.next_ind = ind;
+       qp->sq.head    += nreq;
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+int mthca_tavor_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+                            struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       __be32 doorbell[2];
+       int err = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       int ind;
+       u8 *wqe;
+       u8 *prev_wqe;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&qp->rq.lock, &lh);
+
+       /* XXX check that state is OK to post receive */
+
+       ind = qp->rq.next_ind;
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+                       doorbell[1] = cl_hton32(qp->qpn << 8);
+
+                       wmb();
+
+                       mthca_write64(doorbell, dev->kar + MTHCA_RECEIVE_DOORBELL,
+                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+                       qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+                       size0 = 0;
+               }
+               if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_recv_wqe(qp, ind);
+               prev_wqe = qp->rq.last;
+               qp->rq.last = wqe;
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD);
+               ((struct mthca_next_seg *) wqe)->flags = 0;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               if (unlikely((int)wr->num_ds > qp->rq.max_gs)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("RQ %06x too many gathers\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+//                     HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("RQ %06x [%02x]  lkey 0x%08x vaddr 0x%I64x 0x %x 0x%08x\n",i,qp->qpn,
+//                             (wr->ds_array[i].lkey),(wr->ds_array[i].vaddr),wr->ds_array[i].length, wr->wr_id));
+               }
+
+               qp->wrid[ind] = wr->wr_id;
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32((ind << qp->rq.wqe_shift) | 1);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD | size);
+
+               if (!size0)
+                       size0 = size;
+
+               #if 0
+                       dump_wqe( (u32*)wqe ,qp);
+               #endif
+               
+               ++ind;
+               if (unlikely(ind >= qp->rq.max))
+                       ind -= qp->rq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+               doorbell[1] = cl_hton32((qp->qpn << 8) | nreq);
+
+               wmb();
+
+               mthca_write64(doorbell, dev->kar + MTHCA_RECEIVE_DOORBELL,
+             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+
+       qp->rq.next_ind = ind;
+       qp->rq.head    += nreq;
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+int mthca_arbel_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       __be32 doorbell[2];
+       u8 *wqe;
+       u8 *prev_wqe;
+       int err = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       u32 f0 = 0;
+       int ind;
+       u8 op0 = 0;
+       enum ib_wr_opcode opcode;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&qp->sq.lock, &lh);
+
+       /* XXX check that state is OK to post send */
+
+       ind = qp->sq.head & (qp->sq.max - 1);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
+                       nreq = 0;
+                       doorbell[0] = cl_hton32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
+                               ((qp->sq.head & 0xffff) << 8) |f0 | op0);
+                       doorbell[1] = cl_hton32((qp->qpn << 8) | size0);
+                       qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
+                       size0 = 0;
+
+                       /*
+                        * Make sure that descriptors are written before
+                        * doorbell record.
+                        */
+                       wmb();
+                       *qp->sq.db = cl_hton32(qp->sq.head & 0xffff);
+
+                       /*
+                        * Make sure doorbell record is written before we
+                        * write MMIO send doorbell.
+                        */
+                       wmb();
+                       mthca_write64(doorbell, dev->kar + MTHCA_SEND_DOORBELL,
+                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+               }
+
+               if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_send_wqe(qp, ind);
+               prev_wqe = qp->sq.last;
+               qp->sq.last = wqe;
+               opcode = conv_ibal_wr_opcode(wr);
+
+               ((struct mthca_next_seg *) wqe)->flags =
+                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                        cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
+                       ((wr->send_opt & IB_SEND_OPT_SOLICITED) ?
+                        cl_hton32(MTHCA_NEXT_SOLICIT) : 0)   |
+                       cl_hton32(1);
+               if (opcode == MTHCA_OPCODE_SEND_IMM||
+                       opcode == MTHCA_OPCODE_RDMA_WRITE_IMM)
+                       ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               switch (qp->transport) {
+               case RC:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_ATOMIC_CS:
+                       case MTHCA_OPCODE_ATOMIC_FA:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+
+                               wqe += sizeof (struct mthca_raddr_seg);
+
+                               if (opcode == MTHCA_OPCODE_ATOMIC_FA) {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic2);
+                                       ((struct mthca_atomic_seg *) wqe)->compare =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                               } else {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                                       ((struct mthca_atomic_seg *) wqe)->compare = 0;
+                               }
+
+                               wqe += sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                       sizeof (struct mthca_atomic_seg)) / 16 ;
+                               break;
+
+                       case MTHCA_OPCODE_RDMA_READ:
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UC:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UD:
+                       memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
+                              to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->av, MTHCA_AV_SIZE);
+                       ((struct mthca_arbel_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
+                       ((struct mthca_arbel_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
+
+                       wqe += sizeof (struct mthca_arbel_ud_seg);
+                       size += sizeof (struct mthca_arbel_ud_seg) / 16;
+                       break;
+
+               case MLX:
+                       err = build_mlx_header(dev, to_msqp(qp), ind, wr,
+                                              (void*)(wqe - sizeof (struct mthca_next_seg)),
+                                              (void*)wqe);
+                       if (err) {
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+                       break;
+               }
+
+               if ((int)wr->num_ds > qp->sq.max_gs) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SQ %06x full too many gathers\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               /* Add one more inline data segment for ICRC */
+               if (qp->transport == MLX) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32((unsigned long)((1 << 31) | 4));
+                       ((u32 *) wqe)[1] = 0;
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               qp->wrid[ind + qp->rq.max] = wr->wr_id;
+
+               if (opcode == MTHCA_OPCODE_INVALID) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SQ %06x opcode invalid\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32(((ind << qp->sq.wqe_shift) +
+                       qp->send_wqe_offset) |opcode);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD | size);
+               
+               if (!size0) {
+                       size0 = size;
+                       op0   = opcode;
+               }
+
+               ++ind;
+               if (unlikely(ind >= qp->sq.max))
+                       ind -= qp->sq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cl_hton32((nreq << 24) |
+                       ((qp->sq.head & 0xffff) << 8) |f0 | op0);
+               doorbell[1] = cl_hton32((qp->qpn << 8) | size0);
+               qp->sq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               wmb();
+               *qp->sq.db = cl_hton32(qp->sq.head & 0xffff);
+
+               /*
+                * Make sure doorbell record is written before we
+                * write MMIO send doorbell.
+                */
+               wmb();
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_SEND_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+int mthca_arbel_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
+                            struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_qp *qp = to_mqp(ibqp);
+       int err = 0;
+       int nreq;
+       int ind;
+       int i;
+       u8 *wqe;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&qp->rq.lock, &lh);
+
+       /* XXX check that state is OK to post receive */
+
+       ind = qp->rq.head & (qp->rq.max - 1);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_recv_wqe(qp, ind);
+
+               ((struct mthca_next_seg *) wqe)->flags = 0;
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > qp->rq.max_gs)) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("RQ %06x full too many scatter\n",qp->qpn));
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < qp->rq.max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               qp->wrid[ind] = wr->wr_id;
+
+               ++ind;
+               if (unlikely(ind >= qp->rq.max))
+                       ind -= qp->rq.max;
+       }
+out:
+       if (likely(nreq)) {
+               qp->rq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               wmb();
+               *qp->rq.db = cl_hton32(qp->rq.head & 0xffff);
+       }
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+                      int index, int *dbd, __be32 *new_wqe)
+{
+       struct mthca_next_seg *next;
+
+       UNREFERENCED_PARAMETER(dev);
+       
+       /*
+        * For SRQs, all WQEs generate a CQE, so we're always at the
+        * end of the doorbell chain.
+        */
+       if (qp->ibqp.srq) {
+               *new_wqe = 0;
+               return 0;
+       }
+
+       if (is_send)
+               next = get_send_wqe(qp, index);
+       else
+               next = get_recv_wqe(qp, index);
+
+       *dbd = !!(next->ee_nds & cl_hton32(MTHCA_NEXT_DBD));
+       if (next->ee_nds & cl_hton32(0x3f))
+               *new_wqe = (next->nda_op & cl_hton32((unsigned long)~0x3f)) |
+                       (next->ee_nds & cl_hton32(0x3f));
+       else
+               *new_wqe = 0;
+
+       return 0;
+}
+
+int mthca_init_qp_table(struct mthca_dev *dev)
+{
+       int err;
+       u8 status;
+       int i;
+
+       spin_lock_init(&dev->qp_table.lock);
+       fill_state_table();
+
+       /*
+        * We reserve 2 extra QPs per port for the special QPs.  The
+        * special QP for port 1 has to be even, so round up.
+        */
+       dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
+       err = mthca_alloc_init(&dev->qp_table.alloc,
+                              dev->limits.num_qps,
+                              (1 << 24) - 1,
+                              dev->qp_table.sqp_start +
+                              MTHCA_MAX_PORTS * 2);
+       if (err)
+               return err;
+
+       err = mthca_array_init(&dev->qp_table.qp,
+                              dev->limits.num_qps);
+       if (err) {
+               mthca_alloc_cleanup(&dev->qp_table.alloc);
+               return err;
+       }
+
+       for (i = 0; i < 2; ++i) {
+               err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_QP1 : IB_QPT_QP0,
+                                           dev->qp_table.sqp_start + i * 2,
+                                           &status);
+               if (err)
+                       goto err_out;
+               if (status) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("CONF_SPECIAL_QP returned "
+                                  "status %02x, aborting.\n",
+                                  status));
+                       err = -EINVAL;
+                       goto err_out;
+               }
+       }
+       return 0;
+
+ err_out:
+       mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status);
+       mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status);
+
+       mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
+       mthca_alloc_cleanup(&dev->qp_table.alloc);
+
+       return err;
+}
+
+void mthca_cleanup_qp_table(struct mthca_dev *dev)
+{
+       u8 status;
+
+       mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status);
+       mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status);
+
+       mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
+       mthca_alloc_cleanup(&dev->qp_table.alloc);
+}
+
diff --git a/trunk/hw/mthca/kernel/mthca_srq.c b/trunk/hw/mthca/kernel/mthca_srq.c
new file mode 100644 (file)
index 0000000..ca467b5
--- /dev/null
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include "mt_l2w.h"
+#include "mthca_dev.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mthca_srq.tmh"
+#endif
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+#include "mthca_wqe.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text (PAGE, mthca_init_srq_table)
+#pragma alloc_text (PAGE, mthca_cleanup_srq_table)
+#endif
+
+enum {
+       MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
+};
+
+struct mthca_tavor_srq_context {
+       __be64 wqe_base_ds;     /* low 6 bits is descriptor size */
+       __be32 state_pd;
+       __be32 lkey;
+       __be32 uar;
+       __be32 wqe_cnt;
+       u32    reserved[2];
+};
+
+struct mthca_arbel_srq_context {
+       __be32 state_logsize_srqn;
+       __be32 lkey;
+       __be32 db_index;
+       __be32 logstride_usrpage;
+       __be64 wqe_base;
+       __be32 eq_pd;
+       __be16 limit_watermark;
+       __be16 wqe_cnt;
+       u16    reserved1;
+       __be16 wqe_counter;
+       u32    reserved2[3];
+};
+
+static void *get_wqe(struct mthca_srq *srq, int n)
+{
+       if (srq->is_direct)
+               return (u8*)srq->queue.direct.page + (n << srq->wqe_shift);
+       else
+               return (u8*)srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].page +
+                       ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
+}
+
+/*
+ * Return a pointer to the location within a WQE that we're using as a
+ * link when the WQE is in the free list.  We use the imm field
+ * because in the Tavor case, posting a WQE may overwrite the next
+ * segment of the previous WQE, but a receive WQE will never touch the
+ * imm field.  This avoids corrupting our free list if the previous
+ * WQE has already completed and been put on the free list when we
+ * post the next WQE.
+ */
+static inline int *wqe_to_link(void *wqe)
+{
+       return (int *) ((u8*)wqe + offsetof(struct mthca_next_seg, imm));
+}
+
+static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
+                                        struct mthca_pd *pd,
+                                        struct mthca_srq *srq,
+                                        struct mthca_tavor_srq_context *context)
+{
+       CPU_2_BE64_PREP;
+
+       RtlZeroMemory(context, sizeof *context);
+
+       context->wqe_base_ds = CPU_2_BE64(1ULL  << (srq->wqe_shift - 4));
+       context->state_pd    = cl_hton32(pd->pd_num);
+       context->lkey        = cl_hton32(srq->mr.ibmr.lkey);
+
+       if (pd->ibpd.ucontext)
+               context->uar =
+                       cl_hton32(to_mucontext(pd->ibpd.ucontext)->uar.index);
+       else
+               context->uar = cl_hton32(dev->driver_uar.index);
+}
+
+static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
+                                        struct mthca_pd *pd,
+                                        struct mthca_srq *srq,
+                                        struct mthca_arbel_srq_context *context)
+{
+       int logsize;
+
+       RtlZeroMemory(context, sizeof *context);
+
+       logsize = long_log2(srq->max) + srq->wqe_shift;
+       context->state_logsize_srqn = cl_hton32(logsize << 24 | srq->srqn);
+       context->lkey = cl_hton32(srq->mr.ibmr.lkey);
+       context->db_index = cl_hton32(srq->db_index);
+       context->logstride_usrpage = cl_hton32((srq->wqe_shift - 4) << 29);
+       if (pd->ibpd.ucontext)
+               context->logstride_usrpage |=
+                       cl_hton32(to_mucontext(pd->ibpd.ucontext)->uar.index);
+       else
+               context->logstride_usrpage |= cl_hton32(dev->driver_uar.index);
+       context->eq_pd = cl_hton32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
+}
+
+static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+       mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
+                      srq->is_direct, &srq->mr);
+       kfree(srq->wrid);
+}
+
+static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
+                              struct mthca_srq *srq)
+{
+       struct mthca_data_seg *scatter;
+       u8 *wqe;
+       int err;
+       int i;
+
+       if (pd->ibpd.ucontext)
+               return 0;
+
+       srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
+       if (!srq->wrid)
+               return -ENOMEM;
+
+       err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
+                             MTHCA_MAX_DIRECT_SRQ_SIZE,
+                             &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
+       if (err) {
+               kfree(srq->wrid);
+               return err;
+       }
+
+       /*
+        * Now initialize the SRQ buffer so that all of the WQEs are
+        * linked into the list of free WQEs.  In addition, set the
+        * scatter list L_Keys to the sentry value of 0x100.
+        */
+       for (i = 0; i < srq->max; ++i) {
+               wqe = get_wqe(srq, i);
+
+               *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+
+               for (scatter = (struct mthca_data_seg *)(wqe + sizeof (struct mthca_next_seg));
+                    (void *) scatter < (void*)(wqe + (1 << srq->wqe_shift));
+                    ++scatter)
+                       scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+       }
+
+       srq->last = get_wqe(srq, srq->max - 1);
+
+       return 0;
+}
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+                   struct ib_srq_attr *attr, struct mthca_srq *srq)
+{
+       struct mthca_mailbox *mailbox;
+       u8 status;
+       int ds;
+       int err;
+       SPIN_LOCK_PREP(lh);
+
+       /* Sanity check SRQ size before proceeding */
+       if ((int)attr->max_wr  > dev->limits.max_srq_wqes ||
+               (int)attr->max_sge > dev->limits.max_sg)
+               return -EINVAL;
+
+       srq->max      = attr->max_wr;
+       srq->max_gs   = attr->max_sge;
+       srq->counter  = 0;
+
+       if (mthca_is_memfree(dev))
+               srq->max = roundup_pow_of_two(srq->max + 1);
+
+       ds = max(64UL,
+                roundup_pow_of_two(sizeof (struct mthca_next_seg) +
+                                   srq->max_gs * sizeof (struct mthca_data_seg)));
+       srq->wqe_shift = long_log2(ds);
+
+       srq->srqn = mthca_alloc(&dev->srq_table.alloc);
+       if (srq->srqn == -1)
+               return -ENOMEM;
+
+       if (mthca_is_memfree(dev)) {
+               err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
+               if (err)
+                       goto err_out;
+
+               if (!pd->ibpd.ucontext) {
+                       srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
+                                                      srq->srqn, &srq->db);
+                       if (srq->db_index < 0) {
+                               err = -ENOMEM;
+                               goto err_out_icm;
+                       }
+               }
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto err_out_db;
+       }
+
+       err = mthca_alloc_srq_buf(dev, pd, srq);
+       if (err)
+               goto err_out_mailbox;
+
+       spin_lock_init(&srq->lock);
+       atomic_set(&srq->refcount, 1);
+       init_waitqueue_head(&srq->wait);
+
+       if (mthca_is_memfree(dev))
+               mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+       else
+               mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+
+       err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
+
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("SW2HW_SRQ failed (%d)\n", err));
+               goto err_out_free_buf;
+       }
+       if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_SRQ returned status 0x%02x\n",
+                          status));
+               err = -EINVAL;
+               goto err_out_free_buf;
+       }
+
+       spin_lock_irq(&dev->srq_table.lock, &lh);
+       if (mthca_array_set(&dev->srq_table.srq,
+                           srq->srqn & (dev->limits.num_srqs - 1),
+                           srq)) {
+               spin_unlock_irq(&lh);
+               goto err_out_free_srq;
+       }
+       spin_unlock_irq(&lh);
+
+       mthca_free_mailbox(dev, mailbox);
+
+       srq->first_free = 0;
+       srq->last_free  = srq->max - 1;
+
+       return 0;
+
+err_out_free_srq:
+       err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+       if (err){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ failed (%d)\n", err));
+       }else if (status){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ returned status 0x%02x\n", status));
+       }
+
+err_out_free_buf:
+       if (!pd->ibpd.ucontext)
+               mthca_free_srq_buf(dev, srq);
+
+err_out_mailbox:
+       mthca_free_mailbox(dev, mailbox);
+
+err_out_db:
+       if (!pd->ibpd.ucontext && mthca_is_memfree(dev))
+               mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+
+err_out_icm:
+       mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+
+err_out:
+       mthca_free(&dev->srq_table.alloc, srq->srqn);
+
+       return err;
+}
+
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+       struct mthca_mailbox *mailbox;
+       int err;
+       u8 status;
+       SPIN_LOCK_PREP(lh);
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox)) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("No memory for mailbox to free SRQ.\n"));
+               return;
+       }
+
+       err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+       if (err){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ failed (%d)\n", err));
+       }else if (status){
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ returned status 0x%02x\n", status));
+       }
+
+       spin_lock_irq(&dev->srq_table.lock, &lh);
+       mthca_array_clear(&dev->srq_table.srq,
+                         srq->srqn & (dev->limits.num_srqs - 1));
+       spin_unlock_irq(&lh);
+
+       atomic_dec(&srq->refcount);
+       wait_event(&srq->wait, !atomic_read(&srq->refcount));
+
+       if (!srq->ibsrq.uobject) {
+               mthca_free_srq_buf(dev, srq);
+               if (mthca_is_memfree(dev))
+                       mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+       }
+
+       mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+       mthca_free(&dev->srq_table.alloc, srq->srqn);
+       mthca_free_mailbox(dev, mailbox);
+}
+
+int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+                    enum ib_srq_attr_mask attr_mask)
+{      
+       struct mthca_dev *dev = to_mdev(ibsrq->device);
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       int ret;
+       u8 status;
+
+       /* We don't support resizing SRQs (yet?) */
+       if (attr_mask & IB_SRQ_MAX_WR)
+               return -EINVAL;
+
+       if (attr_mask & IB_SRQ_LIMIT) {
+               ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
+               if (ret)
+                       return ret;
+               if (status)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+                    enum ib_event_type event_type)
+{
+       struct mthca_srq *srq;
+       struct ib_event event;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock(&dev->srq_table.lock, &lh);
+       srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
+       if (srq)
+               atomic_inc(&srq->refcount);
+       spin_unlock(&lh);
+
+       if (!srq) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Async event for bogus SRQ %08x\n", srqn));
+               return;
+       }
+
+       if (!srq->ibsrq.event_handler)
+               goto out;
+
+       event.device      = &dev->ib_dev;
+       event.event       = event_type;
+       event.element.srq  = &srq->ibsrq;
+       srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+
+out:
+       if (atomic_dec_and_test(&srq->refcount))
+               wake_up(&srq->wait);
+}
+
+/*
+ * This function must be called with IRQs disabled.
+ */
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
+{
+       int ind;
+       SPIN_LOCK_PREP(lh);
+
+       ind = wqe_addr >> srq->wqe_shift;
+
+       spin_lock(&srq->lock, &lh);
+
+       if (likely(srq->first_free >= 0))
+               *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
+       else
+               srq->first_free = ind;
+
+       *wqe_to_link(get_wqe(srq, ind)) = -1;
+       srq->last_free = ind;
+
+       spin_unlock(&lh);
+}
+
+//TODO: is this code correct at all ?
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibsrq->device);
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       __be32 doorbell[2];     
+       int err = 0;
+       int first_ind;
+       int ind;
+       int next_ind;
+       int nreq;
+       int i;
+       u8 *wqe;
+       u8 *prev_wqe;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&srq->lock, &lh);
+
+       first_ind = srq->first_free;
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+                       doorbell[1] = cl_hton32(srq->srqn << 8);
+
+                       /*
+                        * Make sure that descriptors are written
+                        * before doorbell is rung.
+                        */
+                       wmb();
+
+                       mthca_write64(doorbell,
+                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
+                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+                       first_ind = srq->first_free;
+               }
+
+               ind = srq->first_free;
+
+               if (ind < 0) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe       = get_wqe(srq, ind);
+               next_ind  = *wqe_to_link(wqe);
+
+               if (next_ind < 0) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               prev_wqe  = srq->last;
+               srq->last = wqe;
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               /* flags field will always remain 0 */
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > srq->max_gs)) {
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       srq->last = prev_wqe;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < srq->max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32((ind << srq->wqe_shift) | 1);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD);
+
+               srq->wrid[ind]  = wr->wr_id;
+               srq->first_free = next_ind;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+               doorbell[1] = cl_hton32((srq->srqn << 8) | nreq);
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell is rung.
+                */
+               wmb();
+
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_RECEIVE_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+//TODO: is this code correct at all ?
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       int err = 0;
+       int ind;
+       int next_ind;
+       int nreq;
+       int i;
+       u8 *wqe;
+       SPIN_LOCK_PREP(lh);
+
+       spin_lock_irqsave(&srq->lock, &lh);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               ind = srq->first_free;
+
+               if (ind < 0) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe       = get_wqe(srq, ind);
+               next_ind  = *wqe_to_link(wqe);
+
+               if (next_ind < 0) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 
+                       cl_hton32((next_ind << srq->wqe_shift) | 1);
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               /* flags field will always remain 0 */
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > srq->max_gs)) {
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < srq->max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               srq->wrid[ind]  = wr->wr_id;
+               srq->first_free = next_ind;
+       }
+
+out:   
+       if (likely(nreq)) {
+               srq->counter = srq->counter + (u16)nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * we write doorbell record.
+                */
+               wmb();
+               *srq->db = cl_hton32(srq->counter);
+       }
+
+       spin_unlock_irqrestore(&lh);
+       return err;
+}
+
+int mthca_init_srq_table(struct mthca_dev *dev)
+{
+       int err;
+
+       if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+               return 0;
+
+       spin_lock_init(&dev->srq_table.lock);
+
+       err = mthca_alloc_init(&dev->srq_table.alloc,
+                              dev->limits.num_srqs,
+                              dev->limits.num_srqs - 1,
+                              dev->limits.reserved_srqs);
+       if (err)
+               return err;
+
+       err = mthca_array_init(&dev->srq_table.srq,
+                              dev->limits.num_srqs);
+       if (err)
+               mthca_alloc_cleanup(&dev->srq_table.alloc);
+
+       return err;
+}
+
+void mthca_cleanup_srq_table(struct mthca_dev *dev)
+{
+       if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+               return;
+
+       mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
+       mthca_alloc_cleanup(&dev->srq_table.alloc);
+}
+
diff --git a/trunk/hw/mthca/kernel/mthca_uar.c b/trunk/hw/mthca/kernel/mthca_uar.c
new file mode 100644 (file)
index 0000000..a68644e
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_uar.c 2643 2005-06-16 22:48:17Z roland $
+ */
+
+#include "mthca_dev.h"
+#include "mthca_memfree.h"
+
+int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar)
+{
+       uar->index = mthca_alloc(&dev->uar_table.alloc);
+       if (uar->index == -1)
+               return -ENOMEM;
+
+       uar->pfn = (unsigned long)(pci_resource_start(dev, HCA_BAR_TYPE_UAR) >> PAGE_SHIFT) + uar->index;
+
+       return 0;
+}
+
+void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar)
+{
+       mthca_free(&dev->uar_table.alloc, uar->index);
+}
+
+int mthca_init_uar_table(struct mthca_dev *dev)
+{
+       int ret;
+
+       ret = mthca_alloc_init(&dev->uar_table.alloc,
+                              dev->limits.num_uars,
+                              dev->limits.num_uars - 1,
+                              dev->limits.reserved_uars);
+       if (ret)
+               return ret;
+
+       ret = mthca_init_db_tab(dev);
+       if (ret)
+               mthca_alloc_cleanup(&dev->uar_table.alloc);
+
+       return ret;
+}
+
+void mthca_cleanup_uar_table(struct mthca_dev *dev)
+{
+       mthca_cleanup_db_tab(dev);
+
+       /* XXX check if any UARs are still allocated? */
+       mthca_alloc_cleanup(&dev->uar_table.alloc);
+}
diff --git a/trunk/hw/mthca/kernel/mthca_user.h b/trunk/hw/mthca/kernel/mthca_user.h
new file mode 100644 (file)
index 0000000..e603f47
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MTHCA_USER_H
+#define MTHCA_USER_H
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in u64
+ * instead.
+ */
+
+struct mthca_alloc_ucontext_resp {
+       uint64_t uar_addr;
+       u64 pd_handle;
+       u32 pdn;
+       u32 qp_tab_size;
+       u32 uarc_size;
+       u32 vend_id;
+       u16 dev_id;
+};
+
+struct mthca_create_srq {
+       u32 lkey;
+       u32 db_index;
+       u64 db_page;
+};
+
+struct mthca_create_srq_resp {
+       u32 srqn;
+       u32 reserved;
+};
+
+#endif /* MTHCA_USER_H */
diff --git a/trunk/hw/mthca/kernel/mthca_wqe.h b/trunk/hw/mthca/kernel/mthca_wqe.h
new file mode 100644 (file)
index 0000000..7f162ce
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_WQE_H
+#define MTHCA_WQE_H
+
+enum {
+       MTHCA_NEXT_DBD       = 1 << 7,
+       MTHCA_NEXT_FENCE     = 1 << 6,
+       MTHCA_NEXT_CQ_UPDATE = 1 << 3,
+       MTHCA_NEXT_EVENT_GEN = 1 << 2,
+       MTHCA_NEXT_SOLICIT   = 1 << 1,
+
+       MTHCA_MLX_VL15       = 1 << 17,
+       MTHCA_MLX_SLR        = 1 << 16
+};
+
+enum {
+       MTHCA_INVAL_LKEY = 0x100,
+               MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256,
+               MTHCA_ARBEL_MAX_WQES_PER_SEND_DB = 255
+};
+
+struct mthca_next_seg {
+       __be32 nda_op;          /* [31:6] next WQE [4:0] next opcode */
+       __be32 ee_nds;          /* [31:8] next EE  [7] DBD [6] F [5:0] next WQE size */
+       __be32 flags;           /* [3] CQ [2] Event [1] Solicit */
+       __be32 imm;             /* immediate data */
+};
+
+struct mthca_tavor_ud_seg {
+       u32    reserved1;
+       __be32 lkey;
+       __be64 av_addr;
+       u32    reserved2[4];
+       __be32 dqpn;
+       __be32 qkey;
+       u32    reserved3[2];
+};
+
+struct mthca_arbel_ud_seg {
+       __be32 av[8];
+       __be32 dqpn;
+       __be32 qkey;
+       u32    reserved[2];
+};
+
+struct mthca_bind_seg {
+       __be32 flags;           /* [31] Atomic [30] rem write [29] rem read */
+       u32    reserved;
+       __be32 new_rkey;
+       __be32 lkey;
+       __be64 addr;
+       __be64 length;
+};
+
+struct mthca_raddr_seg {
+       __be64 raddr;
+       __be32 rkey;
+       u32    reserved;
+};
+
+struct mthca_atomic_seg {
+       __be64 swap_add;
+       __be64 compare;
+};
+
+struct mthca_data_seg {
+       __be32 byte_count;
+       __be32 lkey;
+       __be64 addr;
+};
+
+struct mthca_mlx_seg {
+       __be32 nda_op;
+       __be32 nds;
+       __be32 flags;           /* [17] VL15 [16] SLR [14:12] static rate
+                                  [11:8] SL [3] C [2] E */
+       __be16 rlid;
+       __be16 vcrc;
+};
+
+#endif /* MTHCA_WQE_H */
diff --git a/trunk/hw/mthca/mx_abi.h b/trunk/hw/mthca/mx_abi.h
new file mode 100644 (file)
index 0000000..66b2bc7
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: kern-abi.h 4019 2005-11-11 00:33:09Z sean.hefty $
+ */
+
+#ifndef MX_ABI_H
+#define MX_ABI_H
+
+#include <cl_types_osd.h>
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * Specifically:
+ *  - Do not use pointer types -- pass pointers in uint64_t instead.
+ *  - Make sure that any structure larger than 4 bytes is padded to a
+ *    multiple of 8 bytes.  Otherwise the structure size will be
+ *    different between 32-bit and 64-bit architectures.
+ */
+
+struct mthca_alloc_ucontext_resp {
+       uint64_t uar_addr;
+       uint64_t pd_handle;
+       uint32_t pdn;
+       uint32_t qp_tab_size;
+       uint32_t uarc_size;
+       uint32_t vend_id;
+       uint16_t dev_id;
+       uint16_t reserved[3];
+};
+
+struct ibv_get_context_resp {
+       uint64_t uar_addr;
+       uint64_t pd_handle;
+       uint32_t pdn;
+       uint32_t qp_tab_size;
+       uint32_t uarc_size;
+       uint32_t vend_id;
+       uint16_t dev_id;
+};
+
+struct ibv_alloc_pd_resp {
+       uint64_t pd_handle;
+       uint32_t pdn;
+       uint32_t reserved;
+};
+
+struct ibv_reg_mr {
+       uint64_t start;
+       uint64_t length;
+       uint64_t hca_va;
+       uint32_t access_flags;
+       uint32_t pdn;
+       uint64_t pd_handle;
+};
+
+struct ibv_reg_mr_resp {
+       uint64_t mr_handle;
+       uint32_t lkey;
+       uint32_t rkey;
+};
+
+struct ibv_create_cq {
+       struct ibv_reg_mr mr;   
+       uint64_t arm_db_page;
+       uint64_t set_db_page;
+       uint32_t arm_db_index;
+       uint32_t set_db_index;
+       uint64_t user_handle;
+       uint32_t cqe;
+       uint32_t lkey;          /* used only by kernel */
+};
+
+struct ibv_create_cq_resp {
+       uint64_t user_handle;
+       uint64_t cq_handle;
+       struct ibv_reg_mr_resp mr;
+       uint32_t cqe;
+       uint32_t cqn;
+};
+
+struct ibv_create_qp {
+       uint64_t sq_db_page;
+       uint64_t rq_db_page;
+       uint32_t sq_db_index;
+       uint32_t rq_db_index;
+       struct ibv_reg_mr mr;
+       uint64_t user_handle;
+       uint64_t send_cq_handle;
+       uint64_t recv_cq_handle;
+       uint64_t srq_handle;
+       uint32_t max_send_wr;
+       uint32_t max_recv_wr;
+       uint32_t max_send_sge;
+       uint32_t max_recv_sge;
+       uint32_t max_inline_data;
+       uint32_t lkey;  /* used only in kernel */
+       uint8_t  sq_sig_all;
+       uint8_t  qp_type;
+       uint8_t  is_srq;
+       uint8_t  reserved[5];
+};
+
+struct ibv_create_qp_resp {
+       struct ibv_reg_mr_resp mr;
+       uint64_t user_handle;
+       uint64_t qp_handle;
+       uint32_t qpn;
+       uint32_t max_send_wr;
+       uint32_t max_recv_wr;
+       uint32_t max_send_sge;
+       uint32_t max_recv_sge;
+       uint32_t max_inline_data;
+};
+
+struct ibv_modify_qp_resp {
+       enum ibv_qp_attr_mask attr_mask;
+       uint8_t qp_state;
+       uint8_t reserved[3];
+};
+
+struct ibv_create_ah {
+       struct ibv_reg_mr mr;   
+};
+
+struct ibv_create_ah_resp {
+       uint64_t start;
+       struct ibv_reg_mr_resp mr;
+       ib_av_attr_t            av_attr;
+};
+
+
+#endif /* MX_ABI_H */
+
diff --git a/trunk/hw/mthca/user/Makefile b/trunk/hw/mthca/user/Makefile
new file mode 100644 (file)
index 0000000..bffacaa
--- /dev/null
@@ -0,0 +1,7 @@
+#\r
+# DO NOT EDIT THIS FILE!!!  Edit .\sources. if you want to add a new source\r
+# file to this component.  This file merely indirects to the real make file\r
+# that is shared by all the driver components of the OpenIB Windows project.\r
+#\r
+\r
+!INCLUDE ..\..\..\inc\openib.def\r
diff --git a/trunk/hw/mthca/user/SOURCES b/trunk/hw/mthca/user/SOURCES
new file mode 100644 (file)
index 0000000..48d292e
--- /dev/null
@@ -0,0 +1,69 @@
+TRUNK=..\..\..\r
+\r
+!if $(FREEBUILD)\r
+TARGETNAME=mthcau\r
+!else\r
+TARGETNAME=mthcaud\r
+!endif\r
+\r
+TARGETPATH=$(TRUNK)\bin\user\obj$(BUILD_ALT_DIR)\r
+TARGETTYPE=DYNLINK\r
+DLLDEF=$(O)\mlnx_uvp.def\r
+USE_MSVCRT=1\r
+DLLENTRY=DllMain\r
+\r
+#ENABLE_EVENT_TRACING=1\r
+\r
+SOURCES= \\r
+       mlnx_uvp.rc \\r
+       mlnx_ual_av.c \\r
+       mlnx_ual_ca.c \\r
+       mlnx_ual_cq.c \\r
+       mlnx_ual_main.c \\r
+       mlnx_ual_mcast.c \\r
+       mlnx_ual_mrw.c \\r
+       mlnx_ual_osbypass.c \\r
+       mlnx_ual_pd.c \\r
+       mlnx_ual_qp.c   \\r
+                               \\r
+       mlnx_uvp_debug.c \\r
+       mlnx_uvp.c \\r
+       mlnx_uvp_ah.c \\r
+       mlnx_uvp_cq.c \\r
+       mlnx_uvp_memfree.c \\r
+       mlnx_uvp_qp.c \\r
+       mlnx_uvp_srq.c \\r
+       mlnx_uvp_verbs.c \r
+\r
+INCLUDES= \\r
+       ..; \\r
+       $(TRUNK)\inc\user; \\r
+       $(TRUNK)\inc\complib; \\r
+       $(TRUNK)\inc\user\complib; \\r
+       $(TRUNK)\inc;   \\r
+\r
+USER_C_FLAGS=$(USER_C_FLAGS) /DCL_NO_TRACK_MEM\r
+\r
+TARGETLIBS=\\r
+       $(SDK_LIB_PATH)\user32.lib \\r
+       $(SDK_LIB_PATH)\kernel32.lib \\r
+       $(SDK_LIB_PATH)\Advapi32.lib \\r
+!if $(FREEBUILD)\r
+       $(TARGETPATH)\*\complib.lib \\r
+       $(TARGETPATH)\*\ibal.lib\r
+!else\r
+       $(TARGETPATH)\*\complibd.lib \\r
+       $(TARGETPATH)\*\ibald.lib\r
+!endif\r
+\r
+\r
+!IFDEF ENABLE_EVENT_TRACING\r
+\r
+C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
+\r
+RUN_WPP= -ext:.c.h $(SOURCES) \\r
+       -scan:mlnx_uvp_debug.h \\r
+       -func:UVP_PRINT(LEVEL,FLAGS,(MSG,...)) \r
+!ENDIF\r
+\r
+MSC_WARNING_LEVEL= /W3\r
diff --git a/trunk/hw/mthca/user/arch.h b/trunk/hw/mthca/user/arch.h
new file mode 100644 (file)
index 0000000..ed0e611
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: arch.h 3340 2005-09-07 22:17:35Z roland $
+ */
+
+#ifndef INFINIBAND_ARCH_H
+#define INFINIBAND_ARCH_H
+
+#define htonll cl_hton64
+#define ntohll cl_ntoh64
+/*
+ * Architecture-specific defines.  Currently, an architecture is
+ * required to implement the following operations:
+ *
+ * mb() - memory barrier.  No loads or stores may be reordered across
+ *     this macro by either the compiler or the CPU.
+ */
+
+#define mb                     MemoryBarrier
+#define wmb                    MemoryBarrier
+#define rmb                    MemoryBarrier
+
+#endif /* INFINIBAND_ARCH_H */
diff --git a/trunk/hw/mthca/user/driver.h b/trunk/hw/mthca/user/driver.h
new file mode 100644 (file)
index 0000000..1432497
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: driver.h 3825 2005-10-19 21:25:34Z roland $
+ */
+
+#ifndef INFINIBAND_DRIVER_H
+#define INFINIBAND_DRIVER_H
+
+#include <iba/ib_types.h>
+#include <verbs.h>
+#include <kern-abi.h>
+
+#ifdef __cplusplus
+#  define BEGIN_C_DECLS extern "C" {
+#  define END_C_DECLS   }
+#else /* !__cplusplus */
+#  define BEGIN_C_DECLS
+#  define END_C_DECLS
+#endif /* __cplusplus */
+
+/*
+ * Device-specific drivers should declare their device init function
+ * as below (the name must be "openib_driver_init"):
+ *
+ * struct ibv_device *openib_driver_init(struct sysfs_class_device *);
+ *
+ * libibverbs will call each driver's openib_driver_init() function
+ * once for each InfiniBand device.  If the device is one that the
+ * driver can support, it should return a struct ibv_device * with the
+ * ops member filled in.  If the driver does not support the device,
+ * it should return NULL from openib_driver_init().
+ */
+typedef struct ibv_device *(*ibv_driver_init_func)(struct sysfs_class_device *);
+
+extern int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
+                              size_t cmd_size, struct ibv_get_context_resp *resp,
+                              size_t resp_size);
+extern int ibv_cmd_query_device(struct ibv_context *context,
+                               struct ibv_device_attr *device_attr,
+                               uint64_t *raw_fw_ver,
+                               struct ibv_query_device *cmd, size_t cmd_size);
+extern int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
+                             struct ibv_port_attr *port_attr,
+                             struct ibv_query_port *cmd, size_t cmd_size);
+extern int ibv_cmd_query_gid(struct ibv_context *context, uint8_t port_num,
+                            int index, union ibv_gid *gid);
+extern int ibv_cmd_query_pkey(struct ibv_context *context, uint8_t port_num,
+                             int index, uint16_t *pkey);
+extern int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
+                           struct ibv_alloc_pd *cmd, size_t cmd_size,
+                           struct ibv_alloc_pd_resp *resp, size_t resp_size);
+extern int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
+extern int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+                         uint64_t hca_va, enum ibv_access_flags access,
+                         struct ibv_mr *mr, struct ibv_reg_mr *cmd,
+                         size_t cmd_size);
+extern int ibv_cmd_dereg_mr(struct ibv_mr *mr);
+extern int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
+                            struct ibv_comp_channel *channel,
+                            int comp_vector, struct ibv_cq *cq,
+                            struct ibv_create_cq *cmd, size_t cmd_size,
+                            struct ibv_create_cq_resp *resp, size_t resp_size);
+extern int ibv_cmd_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+extern int ibv_cmd_req_notify_cq(struct ibv_cq *cq, int solicited_only);
+extern int ibv_cmd_destroy_cq(struct ibv_cq *cq);
+
+extern int ibv_cmd_create_srq(struct ibv_pd *pd,
+                             struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
+                             struct ibv_create_srq *cmd, size_t cmd_size,
+                             struct ibv_create_srq_resp *resp, size_t resp_size);
+extern int ibv_cmd_modify_srq(struct ibv_srq *srq,
+                             struct ibv_srq_attr *srq_attr,
+                             enum ibv_srq_attr_mask srq_attr_mask,
+                             struct ibv_modify_srq *cmd, size_t cmd_size);
+extern int ibv_cmd_destroy_srq(struct ibv_srq *srq);
+
+extern int ibv_cmd_create_qp(struct ibv_pd *pd,
+                            struct ibv_qp *qp, struct ibv_qp_init_attr *attr,
+                            struct ibv_create_qp *cmd, size_t cmd_size);
+extern int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+                            enum ibv_qp_attr_mask attr_mask,
+                            struct ibv_modify_qp *cmd, size_t cmd_size);
+extern int ibv_cmd_destroy_qp(struct ibv_qp *qp);
+extern int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
+                            struct ibv_send_wr **bad_wr);
+extern int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+                            struct ibv_recv_wr **bad_wr);
+extern int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
+                                struct ibv_recv_wr **bad_wr);
+extern int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
+                            struct ibv_ah_attr *attr);
+extern int ibv_cmd_destroy_ah(struct ibv_ah *ah);
+extern int ibv_cmd_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+extern int ibv_cmd_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+
+#endif /* INFINIBAND_DRIVER_H */
diff --git a/trunk/hw/mthca/user/mlnx_ual_av.c b/trunk/hw/mthca/user/mlnx_ual_av.c
new file mode 100644 (file)
index 0000000..1a7243c
--- /dev/null
@@ -0,0 +1,605 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_av.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include "mt_l2w.h"\r
+#include "mlnx_uvp.h"\r
+#include "mx_abi.h"\r
+\r
+#include "mlnx_ual_main.h"\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_av.tmh"\r
+#endif\r
+\r
+void\r
+mlnx_get_av_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Address Vector Management Verbs\r
+     */\r
+    p_uvp->pre_create_av  = mlnx_pre_create_av;\r
+    p_uvp->post_create_av = mlnx_post_create_av;\r
+    p_uvp->pre_query_av   = mlnx_pre_query_av;\r
+    p_uvp->post_query_av  = mlnx_post_query_av;\r
+    p_uvp->pre_modify_av   = mlnx_pre_modify_av;\r
+    p_uvp->post_modify_av  = mlnx_post_modify_av;\r
+    p_uvp->pre_destroy_av  = mlnx_pre_destroy_av;\r
+    p_uvp->post_destroy_av = mlnx_post_destroy_av;\r
+\r
+}\r
+\r
+\r
+uint8_t\r
+gid_to_index_lookup (\r
+    IN         ib_ca_attr_t    *p_ca_attr,\r
+    IN         uint8_t         port_num,\r
+    IN         uint8_t         *raw_gid)\r
+{\r
+    ib_gid_t *p_gid_table = NULL;\r
+    uint8_t i, index = 0;\r
+    uint16_t num_gids;\r
+\r
+    p_gid_table = p_ca_attr->p_port_attr[port_num].p_gid_table;\r
+    CL_ASSERT (p_gid_table);\r
+\r
+    num_gids = p_ca_attr->p_port_attr[port_num].num_gids;\r
+    UVP_PRINT(TRACE_LEVEL_INFORMATION, UVP_DBG_AV, \r
+              ("Port %d has %d gids\n", port_num, num_gids));\r
+\r
+    for (i = 0; i < num_gids; i++)\r
+    {\r
+        if (cl_memcmp (raw_gid, p_gid_table[i].raw, sizeof (ib_gid_t)))\r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_AV ,\r
+                      ("found GID at index %d\n", i));\r
+            index = i;\r
+            break;\r
+        }\r
+    }\r
+    return index;\r
+}\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+ib_api_status_t\r
+map_itom_av_attr (\r
+    IN         ib_ca_attr_t            *p_ca_attr,\r
+    IN         const ib_av_attr_t      *p_av_attr,\r
+    OUT                struct ibv_ah_attr              *p_attr)\r
+{\r
+\r
+\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       if (p_av_attr->port_num == 0 || \r
+               p_av_attr->port_num > p_ca_attr->num_ports) {\r
+               UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV ,\r
+                       (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
+               return IB_INVALID_PORT;\r
+       }\r
+\r
+       p_attr->sl = p_av_attr->sl;\r
+       p_attr->port_num = p_av_attr->port_num;\r
+       p_attr->dlid = CL_NTOH16 (p_av_attr->dlid);\r
+       p_attr->src_path_bits = p_av_attr->path_bits; // PATH:\r
+\r
+       //TODO: how static_rate is coded ?\r
+       p_attr->static_rate   =\r
+               (p_av_attr->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);\r
+               \r
+       /* For global destination or Multicast address:*/\r
+       if (p_av_attr->grh_valid) {\r
+               p_attr->is_global = TRUE;\r
+               p_attr->grh.hop_limit            = p_av_attr->grh.hop_limit;\r
+               ib_grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL,\r
+                       &p_attr->grh.traffic_class, &p_attr->grh.flow_label );\r
+               p_attr->grh.sgid_index = gid_to_index_lookup (p_ca_attr, \r
+                       p_av_attr->port_num, (uint8_t *) p_av_attr->grh.src_gid.raw); \r
+               cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, \r
+                       sizeof (IB_gid_t));\r
+       }else{\r
+               p_attr->is_global = FALSE;\r
+       }\r
+\r
+       return status;\r
+} \r
+\r
+#else\r
+void\r
+map_itom_av_attr (\r
+    IN         ib_ca_attr_t            *p_ca_attr,\r
+    IN         const ib_av_attr_t      *p_av_attr,\r
+    OUT                VAPI_ud_av_t            *p_hhul_av)\r
+{\r
+    uint8_t ver;\r
+    uint8_t tclass;\r
+    uint32_t flow_lbl;\r
+\r
+    p_hhul_av->sl            = p_av_attr->sl;\r
+    p_hhul_av->port          = p_av_attr->port_num;\r
+    p_hhul_av->dlid          = CL_NTOH16 (p_av_attr->dlid);\r
+       /*\r
+        * VAPI uses static rate as IPD.\r
+        * 0 is matched links.  3 is suitable for 4x to 1x.\r
+        */\r
+       p_hhul_av->static_rate   =\r
+               (p_av_attr->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);\r
+\r
+    p_hhul_av->src_path_bits = 0;\r
+    /* p_hhul_av->src_path_bits = p_av_attr->path_bits; */\r
+    UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_AV ,\r
+              ("ib_av_attr->path_bits %d\n", p_av_attr->path_bits));\r
+    p_hhul_av->grh_flag      = (MT_bool)p_av_attr->grh_valid;\r
+\r
+    if (p_av_attr->grh_valid)\r
+    {\r
+        ib_grh_get_ver_class_flow (p_av_attr->grh.ver_class_flow,\r
+                                   &ver, &tclass, &flow_lbl);\r
+\r
+        p_hhul_av->hop_limit  = p_av_attr->grh.hop_limit;\r
+        p_hhul_av->sgid_index = \r
+            gid_to_index_lookup (p_ca_attr, \r
+                                 p_av_attr->port_num,\r
+                                 (uint8_t *) p_av_attr->grh.src_gid.raw); \r
+\r
+        cl_memcpy (p_hhul_av->dgid, p_av_attr->grh.dest_gid.raw, \r
+                   sizeof (IB_gid_t));\r
+        \r
+        p_hhul_av->traffic_class = tclass;\r
+        p_hhul_av->flow_label    = flow_lbl;\r
+    }\r
+} \r
+#endif\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_create_av (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_av_attr_t              *p_av_attr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       int err;\r
+       struct ibv_create_ah *p_create_av;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       size_t size = max( sizeof(struct ibv_create_ah), sizeof(struct ibv_create_ah_resp) );\r
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
+       mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+\r
+       UVP_ENTER(UVP_DBG_AV);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+\r
+       if (p_av_attr->port_num == 0 || p_av_attr->port_num > p_hobul->p_hca_attr->num_ports) {\r
+               UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV ,\r
+                       (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
+               return IB_INVALID_PORT;\r
+       }\r
+\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_ah);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       /* allocate ibv_ah */\r
+       p_create_av = (struct ibv_create_ah *)p_umv_buf->p_inout_buf;\r
+       err = p_hobul->ibv_ctx->ops.create_ah_pre(p_pd->ibv_pd, p_create_av);\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av_pre failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_av;\r
+       }\r
+\r
+       goto end;\r
+               \r
+err_alloc_av:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_memory:\r
+end:\r
+               UVP_EXIT(UVP_DBG_AV);\r
+               return status;\r
+}\r
+\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+void\r
+mlnx_post_create_av (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    OUT                ib_av_handle_t                  *ph_uvp_av,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       int err;\r
+       struct ibv_ah_attr attr;\r
+       struct ibv_ah *ibv_ah;\r
+       struct ibv_create_ah_resp *p_resp;\r
+       mlnx_ual_av_info_t *av_info;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
+       mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+\r
+       UVP_ENTER(UVP_DBG_AV);\r
+\r
+       CL_ASSERT(p_hobul);\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       p_resp = (struct ibv_create_ah_resp *)p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+\r
+               /* convert parameters */\r
+               cl_memset( &attr, 0, sizeof(attr));\r
+               status = map_itom_av_attr (p_hobul->p_hca_attr, &p_resp->av_attr, &attr);\r
+               if(status != IB_SUCCESS ) \r
+                       goto err_map_itom;\r
+               /* allocate ibv_av */\r
+               ibv_ah = p_hobul->ibv_ctx->ops.create_ah_post(p_pd->ibv_pd, &attr, p_resp);\r
+               if (IS_ERR(ibv_ah)) {\r
+                       err = PTR_ERR(ibv_ah);\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_create_av_post failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_create_ah;\r
+               }\r
+\r
+               /* allocate av */\r
+               av_info = (mlnx_ual_av_info_t *)cl_zalloc( sizeof(mlnx_ual_av_info_t) );\r
+               if( !av_info ) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_alloc_av_info;\r
+               }\r
+\r
+               /* return results */\r
+               cl_memcpy( &av_info->av_attr, &p_resp->av_attr, sizeof(av_info->av_attr) );\r
+               av_info->h_uvp_pd = p_pd;\r
+               av_info->ibv_ah = ibv_ah;\r
+               *ph_uvp_av = (ib_av_handle_t)av_info;\r
+\r
+       }\r
+       goto end;\r
+       \r
+err_alloc_av_info:     \r
+       p_hobul->ibv_ctx->ops.destroy_ah(ibv_ah);\r
+err_map_itom:  \r
+err_create_ah:\r
+end:   \r
+       if (p_resp)\r
+               cl_free( p_resp );\r
+       UVP_EXIT(UVP_DBG_AV);\r
+       return;\r
+}\r
+\r
+#else\r
+void\r
+mlnx_post_create_av (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    OUT                ib_av_handle_t                  *ph_uvp_av,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    ib_api_status_t status;\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*)h_uvp_pd);\r
+    ib_av_attr_t *p_av_attr;\r
+#ifdef WIN_TO_BE_CHANGED\r
+    VAPI_ud_av_t hhul_av;\r
+#endif\r
+    mlnx_ual_av_info_t *p_new_av = NULL;\r
+\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    CL_ASSERT (p_pd_info);\r
+    CL_ASSERT (p_umv_buf);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT (p_hobul);\r
+\r
+    /* \r
+     * Set initial value for handle \r
+     */\r
+    *ph_uvp_av = NULL;\r
+  \r
+    status = ioctl_status;\r
+\r
+    if (IB_SUCCESS == status)\r
+    {\r
+       if (sizeof (ib_av_attr_t) != p_umv_buf->output_size) \r
+       {\r
+           UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV ,\r
+               ("Priv buffer has different size\n"));\r
+           status = IB_ERROR;\r
+           goto cleanup;\r
+       }\r
+       p_av_attr = (ib_av_attr_t *) p_umv_buf->p_inout_buf;\r
+       CL_ASSERT (p_av_attr);\r
+\r
+       p_new_av = cl_zalloc (sizeof (mlnx_ual_av_info_t));\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+       map_itom_av_attr (p_hobul->p_hca_attr, p_av_attr, &hhul_av);\r
+\r
+       if (HH_OK != \r
+           THHUL_pdm_create_ud_av (p_hobul->hhul_hca_hndl,\r
+           p_pd_info->hhul_pd_hndl,\r
+           &hhul_av,\r
+           &p_new_av->h_av))\r
+       {\r
+           UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV ,\r
+               ("FAILED to create usermode UD AV\n"));\r
+           status = IB_ERROR;\r
+           goto cleanup;\r
+       }\r
+#endif\r
+\r
+       p_new_av->p_i_av_attr = p_av_attr;\r
+       p_new_av->h_uvp_pd = h_uvp_pd;\r
+       *ph_uvp_av = p_new_av;\r
+       p_umv_buf->p_inout_buf = NULL;\r
+    }\r
+    /* \r
+     * clean_up if required\r
+     */\r
+cleanup:\r
+    if ((IB_SUCCESS != status) && (IB_SUCCESS == ioctl_status))\r
+    {\r
+        if (p_new_av) \r
+        {\r
+            if (p_new_av->p_i_av_attr);\r
+            {\r
+                cl_free (p_new_av->p_i_av_attr);\r
+            }\r
+            cl_free (p_new_av);\r
+        }\r
+    }\r
+\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return;\r
+}\r
+\r
+#endif\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_av (\r
+       IN      const   ib_av_handle_t          h_uvp_av,\r
+       IN OUT          ci_umv_buf_t            *p_umv_buf )\r
+{\r
+       UNREFERENCED_PARAMETER(h_uvp_av);\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_query_av (\r
+       IN              const   ib_av_handle_t                          h_uvp_av,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN      OUT                     ib_av_attr_t                            *p_addr_vector,\r
+       IN      OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf)\r
+{\r
+       mlnx_ual_av_info_t *av_info = (mlnx_ual_av_info_t *)h_uvp_av;\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    CL_ASSERT(p_umv_buf);\r
+    CL_ASSERT(p_addr_vector);\r
+\r
+    if (ioctl_status == IB_SUCCESS)\r
+    {\r
+        cl_memcpy (p_addr_vector, &av_info->av_attr, sizeof (ib_av_attr_t));\r
+               if (ph_pd)\r
+                       *ph_pd = (ib_pd_handle_t)av_info->h_uvp_pd;\r
+    }\r
+    \r
+    UVP_EXIT(UVP_DBG_AV);\r
+}\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+void mthca_set_av_params(      struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr );\r
+\r
+ib_api_status_t\r
+mlnx_pre_modify_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         const ib_av_attr_t              *p_addr_vector,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       ib_api_status_t status ;\r
+       struct ibv_ah *ibv_ah = (struct ibv_ah *)h_uvp_av->ibv_ah;\r
+       struct mthca_ah *mthca_ah = (struct mthca_ah *)ibv_ah;\r
+       mlnx_ual_pd_info_t *p_pd_info;\r
+       mlnx_ual_hobul_t *p_hobul;\r
+       struct ibv_ah_attr attr;\r
+       int err;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       \r
+       UVP_ENTER(UVP_DBG_AV);\r
+       \r
+       CL_ASSERT(p_umv_buf);\r
+               \r
+       p_pd_info = h_uvp_av->h_uvp_pd;\r
+       CL_ASSERT (p_pd_info);\r
+\r
+       p_hobul = p_pd_info->p_hobul;\r
+       CL_ASSERT (p_hobul);\r
+\r
+       status = map_itom_av_attr (p_hobul->p_hca_attr, p_addr_vector, &attr);\r
+       if(status != IB_SUCCESS)        return status;\r
+       \r
+       mthca_set_av_params( mthca_ah, &attr);\r
+       cl_memcpy (&h_uvp_av->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
+       \r
+       UVP_EXIT(UVP_DBG_AV);\r
+\r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+#else\r
+ib_api_status_t\r
+mlnx_pre_modify_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         const ib_av_attr_t              *p_addr_vector,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    ib_api_status_t status = IB_VERBS_PROCESSING_DONE;\r
+\r
+    mlnx_ual_av_info_t *p_av_info = (mlnx_ual_av_info_t *)((void*) h_uvp_av);\r
+    mlnx_ual_pd_info_t *p_pd_info;\r
+    mlnx_ual_hobul_t *p_hobul;\r
+#ifdef WIN_TO_BE_CHANGED\r
+    VAPI_ud_av_t hhul_av;\r
+#endif\r
+\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    CL_ASSERT (p_umv_buf);\r
+\r
+    p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_av_info->h_uvp_pd);\r
+    CL_ASSERT (p_pd_info);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT (p_hobul);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+    map_itom_av_attr (p_hobul->p_hca_attr, p_addr_vector, &hhul_av);\r
+\r
+       if (HH_OK !=\r
+        THHUL_pdm_modify_ud_av (p_hobul->hhul_hca_hndl, \r
+                                p_av_info->h_av,\r
+                                &hhul_av))\r
+    {\r
+        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV ,\r
+                  ("Failed to modify AV\n"));\r
+        status = IB_ERROR;\r
+    }\r
+    else\r
+#endif                 \r
+    {\r
+        cl_memcpy (p_av_info->p_i_av_attr, p_addr_vector, sizeof (ib_av_attr_t));\r
+    }\r
+\r
+    UVP_EXIT(UVP_DBG_AV);\r
+\r
+    return status;\r
+}\r
+#endif\r
+\r
+void\r
+mlnx_post_modify_av (\r
+    IN         const ib_av_handle_t    h_uvp_av,\r
+    IN         ib_api_status_t         ioctl_status,\r
+    IN OUT     ci_umv_buf_t            *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_destroy_av (\r
+    IN         const ib_av_handle_t            h_uvp_av)\r
+{\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+void\r
+mlnx_post_destroy_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+       UNREFERENCED_PARAMETER(ioctl_status);\r
+\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    CL_ASSERT (h_uvp_av || h_uvp_av->ibv_ah);\r
+       h_uvp_av->ibv_ah->pd->context->ops.destroy_ah(h_uvp_av->ibv_ah);\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return;\r
+}\r
+\r
+#else\r
+void\r
+mlnx_post_destroy_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_pd_info_t *p_pd_info;\r
+    mlnx_ual_av_info_t *p_av_info = (mlnx_ual_av_info_t *)((void*) h_uvp_av);\r
+\r
+    UVP_ENTER(UVP_DBG_AV);\r
+    CL_ASSERT (p_av_info);\r
+\r
+    p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_av_info->h_uvp_pd);\r
+    CL_ASSERT (p_pd_info);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT (p_hobul);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+    if (HH_OK !=\r
+        THHUL_pdm_destroy_ud_av (p_hobul->hhul_hca_hndl,\r
+                                 p_av_info->h_av))\r
+    {\r
+        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV ,\r
+                  ("Failed to destroy av\n"));\r
+    }\r
+#endif         \r
+   \r
+    /*\r
+        * We still have to clean resources even THHUL failed\r
+        */     \r
+    if (p_av_info->p_i_av_attr);\r
+    {\r
+        cl_free (p_av_info->p_i_av_attr);\r
+    }\r
+    cl_free (p_av_info);\r
+\r
+    UVP_EXIT(UVP_DBG_AV);\r
+    return;\r
+}\r
+\r
+#endif\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_ca.c b/trunk/hw/mthca/user/mlnx_ual_ca.c
new file mode 100644 (file)
index 0000000..64ca42a
--- /dev/null
@@ -0,0 +1,279 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_ca.c 701 2005-11-03 07:14:23Z sleybo $\r
+ */\r
+\r
+#include "mlnx_ual_main.h"\r
+#include "mt_l2w.h"\r
+#include "mlnx_uvp.h"\r
+#include "mlnx_uvp_verbs.h"\r
+#include "mx_abi.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_ca.tmh"\r
+#endif\r
+\r
+extern uint32_t        mlnx_dbg_lvl;\r
+\r
+void\r
+mlnx_get_ca_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * HCA Access Verbs\r
+     */\r
+    p_uvp->pre_open_ca  = mlnx_pre_open_ca;\r
+    p_uvp->post_open_ca = mlnx_post_open_ca;\r
+\r
+  \r
+    p_uvp->pre_query_ca  = mlnx_pre_query_ca;\r
+    p_uvp->post_query_ca = mlnx_post_query_ca;\r
+\r
+    p_uvp->pre_modify_ca  = NULL;\r
+    p_uvp->post_modify_ca = NULL;\r
+\r
+    p_uvp->pre_close_ca  = mlnx_pre_close_ca;\r
+    p_uvp->post_close_ca = mlnx_post_close_ca;\r
+\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_open_ca (\r
+       IN              const   ib_net64_t                                      ca_guid,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       ib_api_status_t  status = IB_SUCCESS;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+       if( p_umv_buf )\r
+       {\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
+                       if( !p_umv_buf->p_inout_buf )\r
+                       {\r
+                               status = IB_INSUFFICIENT_MEMORY;\r
+                               goto err_memory;\r
+                       }\r
+               }\r
+               p_umv_buf->input_size = p_umv_buf->output_size = sizeof(struct ibv_get_context_resp);\r
+               p_umv_buf->command = TRUE;\r
+       }\r
+err_memory:    \r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_post_open_ca (\r
+       IN                              const ib_net64_t                        ca_guid,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_ca_handle_t                          *ph_uvp_ca,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       ib_api_status_t  status = ioctl_status;\r
+       mlnx_ual_hobul_t *new_ca;\r
+       struct ibv_get_context_resp *resp_p;\r
+       struct ibv_context * ibvcontext;\r
+       int err;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       if (IB_SUCCESS == status) {\r
+               /* allocate ibv context */\r
+               resp_p = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf;\r
+               ibvcontext = mthca_alloc_context(resp_p);\r
+               if (IS_ERR(ibvcontext)) {\r
+                       err = PTR_ERR(ibvcontext);\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,("mthca_alloc_context failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_alloc_context;\r
+               }\r
+\r
+               /* allocate mthca context */\r
+               new_ca = (mlnx_ual_hobul_t *)cl_zalloc( sizeof(mlnx_ual_hobul_t) );\r
+               if( !new_ca ) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+\r
+               /* return results */\r
+               new_ca->ibv_ctx = ibvcontext;\r
+               new_ca->p_hca_attr = NULL;\r
+               *ph_uvp_ca = (ib_ca_handle_t)new_ca;\r
+       }\r
+\r
+err_memory:    \r
+err_alloc_context:\r
+       cl_free( resp_p );\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_ca (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                          byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(h_uvp_ca);\r
+\r
+       /*\r
+        * First time call query_ca - populate our internal cached attributes\r
+        * so we can access the GID table.  Note that query_ca calls *always*\r
+        * get their attributes from the kernel.\r
+        */\r
+       if ( !h_uvp_ca->p_hca_attr )\r
+       {\r
+               /*\r
+                * Assume if user buffer is valid then byte_cnt is valid too \r
+                * so we can preallocate ca attr buffer for post ioctl data saving\r
+                *\r
+                * Note that we squirel the buffer away into the umv_buf and only\r
+                * set it into the HCA if the query is successful.\r
+                */\r
+               if ( p_ca_attr != NULL )\r
+               {\r
+                       p_umv_buf->p_inout_buf = cl_zalloc(byte_count);\r
+                       if ( !p_umv_buf->p_inout_buf )\r
+                       {\r
+                               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                                       ("Failed to alloc new_ca\n"));\r
+                               status = IB_INSUFFICIENT_RESOURCES;\r
+                               return status;\r
+                       }\r
+               }\r
+               p_umv_buf->input_size = p_umv_buf->output_size = 0;\r
+       }\r
+\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_query_ca (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                          byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(h_uvp_ca);\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if ( ioctl_status == IB_SUCCESS && p_ca_attr &&\r
+               byte_count && !h_uvp_ca->p_hca_attr )\r
+       {\r
+               CL_ASSERT( byte_count >= p_ca_attr->size );\r
+               h_uvp_ca->p_hca_attr = p_umv_buf->p_inout_buf;\r
+               ib_copy_ca_attr( h_uvp_ca->p_hca_attr, p_ca_attr );\r
+       }\r
+       else if (p_umv_buf->p_inout_buf) \r
+       {\r
+               cl_free (p_umv_buf->p_inout_buf);\r
+       }\r
+\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_modify_ca (\r
+    IN         ib_ca_handle_t                          h_uvp_ca,\r
+    IN         uint8_t                                         port_num,\r
+    IN         ib_ca_mod_t                                     ca_mod,\r
+    IN         const ib_port_attr_mod_t*       p_port_attr_mod)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_modify_ca (\r
+    IN         ib_ca_handle_t                  h_uvp_ca,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_close_ca (\r
+    IN         ib_ca_handle_t          h_uvp_ca)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_post_close_ca (\r
+    IN         ib_ca_handle_t          h_uvp_ca,\r
+    IN         ib_api_status_t         ioctl_status )\r
+{\r
+    mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void*)h_uvp_ca);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_hobul);\r
+\r
+       if (p_hobul->ibv_ctx) {\r
+               mthca_free_context(p_hobul->ibv_ctx);\r
+               p_hobul->ibv_ctx = NULL;\r
+       }\r
+    if (p_hobul->p_hca_attr) {\r
+               cl_free( p_hobul->p_hca_attr);\r
+               p_hobul->p_hca_attr = NULL;\r
+    }\r
+    cl_free(p_hobul);\r
+    \r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_cq.c b/trunk/hw/mthca/user/mlnx_ual_cq.c
new file mode 100644 (file)
index 0000000..4a916d4
--- /dev/null
@@ -0,0 +1,227 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_cq.c 783 2005-11-30 07:38:36Z sleybo $\r
+ */\r
+\r
+#include "mt_l2w.h"\r
+#include "mlnx_ual_main.h"\r
+#include "mlnx_uvp.h"\r
+#include "mx_abi.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_cq.tmh"\r
+#endif\r
+\r
+\r
+extern uint32_t        mlnx_dbg_lvl;\r
+\r
+void\r
+mlnx_get_cq_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Completion Queue Management Verbs\r
+     */\r
+    p_uvp->pre_create_cq  = mlnx_pre_create_cq;\r
+    p_uvp->post_create_cq = mlnx_post_create_cq;\r
+  \r
+    p_uvp->pre_query_cq  = mlnx_pre_query_cq;\r
+    p_uvp->post_query_cq = NULL;\r
+\r
+    p_uvp->pre_resize_cq  = NULL; /* mlnx_pre_resize_cq: not supported in kernel */\r
+    p_uvp->post_resize_cq = NULL;      /* mlnx_post_resize_cq:not supported in kernel */ \r
+\r
+    p_uvp->pre_destroy_cq  = mlnx_pre_destroy_cq;\r
+    p_uvp->post_destroy_cq = mlnx_post_destroy_cq;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+ib_api_status_t\r
+       mlnx_pre_create_cq (\r
+               IN              const ib_ca_handle_t                            h_uvp_ca,\r
+               IN      OUT             uint32_t*               const p_size,\r
+               IN      OUT             ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_cq *ibv_cq;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       size_t size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) );\r
+       mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca);\r
+       struct ibv_create_cq *p_create_cq;\r
+       int err;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_cq);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       /* allocate ibv_cq */\r
+       p_create_cq = (struct ibv_create_cq *)p_umv_buf->p_inout_buf;\r
+       ibv_cq = p_hobul->ibv_ctx->ops.create_cq_pre(p_hobul->ibv_ctx, p_size, p_create_cq);\r
+       if (IS_ERR(ibv_cq)) {\r
+               err = PTR_ERR(ibv_cq);\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_cq_pre failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_alloc_cq;\r
+       }\r
+\r
+       goto end;\r
+               \r
+err_alloc_cq:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_memory:\r
+end:\r
+               UVP_EXIT(UVP_DBG_SHIM);\r
+               return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_create_cq (\r
+       IN              const   ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN              const   uint32_t                                        size,\r
+               OUT                     ib_cq_handle_t                          *ph_uvp_cq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       struct ibv_create_cq_resp *p_resp;\r
+       struct ibv_cq *ibv_cq;\r
+       mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca);\r
+       mlnx_ual_cq_info_t *p_new_cq = NULL;\r
+\r
+       UVP_ENTER(UVP_DBG_CQ);\r
+\r
+       CL_ASSERT(p_hobul);\r
+       CL_ASSERT(p_umv_buf);\r
+       p_resp = (struct ibv_create_cq_resp *)p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+\r
+               /* allocate ibv_cq */\r
+               ibv_cq = p_hobul->ibv_ctx->ops.create_cq_post(p_hobul->ibv_ctx, p_resp);\r
+               if (IS_ERR(ibv_cq)) {\r
+                       err = PTR_ERR(ibv_cq);\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_create_cq failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_create_cq;\r
+               }\r
+\r
+               /* allocate cq */\r
+               p_new_cq = (mlnx_ual_cq_info_t *)cl_zalloc( sizeof(mlnx_ual_cq_info_t) );\r
+               if( !p_new_cq ) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+\r
+               /* return results */\r
+               p_new_cq->ibv_cq = ibv_cq;\r
+               p_new_cq->p_hobul = p_hobul;\r
+               p_new_cq->cq_size = size;\r
+               *ph_uvp_cq = (ib_cq_handle_t)p_new_cq;\r
+       }\r
+       goto end;\r
+       \r
+err_memory: \r
+       p_hobul->ibv_ctx->ops.destroy_cq(ibv_cq);\r
+err_create_cq:\r
+end:   \r
+       if (p_resp)\r
+               cl_free( p_resp );\r
+       UVP_EXIT(UVP_DBG_CQ);\r
+       return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_cq (\r
+       IN              const   ib_cq_handle_t          h_uvp_cq,\r
+               OUT                     uint32_t* const         p_size,\r
+       IN      OUT                     ci_umv_buf_t            *p_umv_buf)\r
+{\r
+       mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq);\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       *p_size = p_cq_info->cq_size;\r
+\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_destroy_cq (\r
+    IN         const ib_cq_handle_t                    h_uvp_cq)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+void\r
+mlnx_post_destroy_cq (\r
+    IN         const ib_cq_handle_t            h_uvp_cq,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+       int err;\r
+       mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *) ((void*)h_uvp_cq);\r
+    UNREFERENCED_PARAMETER(ioctl_status);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_cq_info || p_cq_info->ibv_cq);\r
+\r
+       err = p_cq_info->p_hobul->ibv_ctx->ops.destroy_cq( p_cq_info->ibv_cq );\r
+       if (err) \r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_cq failed (%d)\n", err));\r
+\r
+    cl_free (p_cq_info);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_data.h b/trunk/hw/mthca/user/mlnx_ual_data.h
new file mode 100644 (file)
index 0000000..e79b86b
--- /dev/null
@@ -0,0 +1,81 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_data.h 336 2005-09-04 07:15:18Z sleybo $\r
+ */\r
+#include <iba/ib_types.h>\r
+#include <iba/ib_uvp.h>\r
+\r
+// taken from ib_defs.h\r
+typedef uint32_t IB_wqpn_t;  /* Work QP number: Only 24 LSbits */\r
+typedef uint8_t  IB_port_t;\r
+typedef uint8_t IB_gid_t[16]; /* GID (aka IPv6) H-to-L (big) (network) endianess */\r
+typedef uint32_t IB_ts_t;\r
+\r
+typedef struct _ib_ca\r
+{\r
+       struct ibv_context *ibv_ctx;\r
+       ib_ca_attr_t            *p_hca_attr;\r
+} mlnx_ual_hobul_t;\r
+\r
+\r
+typedef struct _ib_pd\r
+{\r
+       struct ibv_pd *ibv_pd;\r
+       mlnx_ual_hobul_t        *p_hobul;\r
+} mlnx_ual_pd_info_t;\r
+\r
+\r
+typedef struct _ib_cq\r
+{\r
+       struct ibv_cq *ibv_cq;\r
+       mlnx_ual_hobul_t        *p_hobul; \r
+       uint32_t                        cq_size;\r
+} mlnx_ual_cq_info_t;\r
+\r
+\r
+typedef struct _ib_qp\r
+{\r
+       struct ibv_qp *ibv_qp;\r
+       mlnx_ual_pd_info_t *h_uvp_pd; \r
+} mlnx_ual_qp_info_t;\r
+\r
+\r
+typedef struct _ib_mw\r
+{\r
+       ib_pd_handle_t          h_uvp_pd; \r
+       uint32_t                        rkey;\r
+} mlnx_ual_mw_info_t;\r
+\r
+\r
+typedef struct _ib_av\r
+{\r
+       struct ibv_ah *ibv_ah;\r
+       ib_pd_handle_t          h_uvp_pd; \r
+       ib_av_attr_t            av_attr;\r
+} mlnx_ual_av_info_t;\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_main.c b/trunk/hw/mthca/user/mlnx_ual_main.c
new file mode 100644 (file)
index 0000000..fd65af0
--- /dev/null
@@ -0,0 +1,199 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_main.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include <tchar.h>\r
+#include <stdlib.h>\r
+#include "mlnx_ual_main.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_main.tmh"\r
+#endif\r
+\r
+\r
+uint32_t       mlnx_dbg_lvl = 0; // MLNX_TRACE_LVL_8;\r
+\r
+\r
+static void uvp_init();\r
+\r
+\r
+extern BOOL APIENTRY\r
+_DllMainCRTStartupForGS(\r
+       IN                              HINSTANCE                                       h_module,\r
+       IN                              DWORD                                           ul_reason_for_call, \r
+       IN                              LPVOID                                          lp_reserved );\r
+\r
+\r
+BOOL APIENTRY\r
+DllMain(\r
+       IN                              HINSTANCE                                       h_module,\r
+       IN                              DWORD                                           ul_reason_for_call, \r
+       IN                              LPVOID                                          lp_reserved )\r
+{\r
+       switch( ul_reason_for_call )\r
+       {\r
+       case DLL_PROCESS_ATTACH:\r
+#if defined(EVENT_TRACING)\r
+#if DBG\r
+               WPP_INIT_TRACING(L"mthcaud.dll");\r
+#else\r
+               WPP_INIT_TRACING(L"mthcau.dll");\r
+#endif\r
+#endif\r
+               if( !_DllMainCRTStartupForGS(\r
+                       h_module, ul_reason_for_call, lp_reserved ) )\r
+               {\r
+                       return FALSE;\r
+               }\r
+\r
+               //uvp_init();\r
+               break;\r
+\r
+        case DLL_PROCESS_DETACH:\r
+               // The calling process is detaching\r
+               // the DLL from its address space.\r
+               //\r
+               // Note that lpvReserved will be NULL if the detach is due to\r
+               // a FreeLibrary() call, and non-NULL if the detach is due to\r
+               // process cleanup.\r
+               //\r
+#if defined(EVENT_TRACING)\r
+               WPP_CLEANUP();\r
+#endif\r
+\r
+       default:\r
+               return _DllMainCRTStartupForGS(\r
+                       h_module, ul_reason_for_call, lp_reserved );\r
+       }\r
+       return TRUE;\r
+}\r
+\r
+\r
+/*\r
+ *     UVP Shared Library Init routine\r
+*/\r
+\r
+static void\r
+uvp_init()\r
+{\r
+\r
+#if !defined(EVENT_TRACING)\r
+#if DBG \r
+#define ENV_BUFSIZE 16\r
+       TCHAR  dbg_lvl_str[ENV_BUFSIZE];\r
+       DWORD   i;\r
+\r
+\r
+       i = GetEnvironmentVariable( "UVP_DBG_LEVEL", dbg_lvl_str, ENV_BUFSIZE );\r
+       if( i && i <= 16 )\r
+       {\r
+               g_mlnx_dbg_level = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE );\r
+       }\r
+\r
+       i = GetEnvironmentVariable( "UVP_DBG_FLAGS", dbg_lvl_str, ENV_BUFSIZE );\r
+       if( i && i <= 16 )\r
+       {\r
+               g_mlnx_dbg_flags = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE );\r
+       }\r
+\r
+\r
+       UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_DEV ,\r
+               ("Given UVP_DBG debug level:%d  debug flags 0x%x\n",\r
+               g_mlnx_dbg_level ,g_mlnx_dbg_flags) );\r
+\r
+#endif\r
+#endif\r
+}\r
+\r
+__declspec(dllexport) ib_api_status_t\r
+uvp_get_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+    /*\r
+     * Version of the header file this interface export can handle\r
+     */\r
+    p_uvp->version = 0x100;\r
+    p_uvp->guid    = 0x12345678;\r
+\r
+    /*\r
+     * CA Management\r
+     */\r
+    mlnx_get_ca_interface (p_uvp);\r
+    \r
+    /*\r
+     * Protection Domain\r
+     */\r
+    mlnx_get_pd_interface (p_uvp);\r
+\r
+    /*\r
+     * QP Management Verbs\r
+     */\r
+    mlnx_get_qp_interface (p_uvp);\r
+\r
+    /*\r
+     * Completion Queue Management Verbs\r
+     */\r
+    mlnx_get_cq_interface (p_uvp);\r
+\r
+    /*\r
+     * AV Management\r
+     */\r
+    mlnx_get_av_interface(p_uvp);\r
+\r
+    /*\r
+     * Memory Region / Window Management Verbs\r
+     */\r
+    mlnx_get_mrw_interface (p_uvp);\r
+\r
+    /*\r
+     * Multicast Support Verbs\r
+     */\r
+    mlnx_get_mcast_interface (p_uvp);\r
+\r
+    /*\r
+     * OS bypass (send, receive, poll/notify cq)\r
+     */\r
+    mlnx_get_osbypass_interface(p_uvp);\r
+\r
+    \r
+    /*\r
+     * Local MAD support, for HCA's that do not support\r
+     * Agents in the HW.\r
+     * ??? Do we need this for user-mode ???\r
+     */\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_main.h b/trunk/hw/mthca/user/mlnx_ual_main.h
new file mode 100644 (file)
index 0000000..203b12f
--- /dev/null
@@ -0,0 +1,517 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_main.h 355 2005-09-11 08:25:00Z sleybo $\r
+ */\r
+\r
+#ifndef __UAL_MAIN_H__\r
+#define __UAL_MAIN_H__\r
+\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <errno.h>\r
+\r
+//#include <iba/ib_ci.h>\r
+#include "mlnx_ual_data.h"\r
+#include "mlnx_uvp_debug.h"\r
+#include <complib/cl_byteswap.h>\r
+#include <complib/cl_memory.h>\r
+//#include <complib/cl_device.h>\r
+\r
+\r
+#define                MAX_WRS_PER_CHAIN               16\r
+#define                MAX_NUM_SGE                             32\r
+\r
+#define                MLNX_SGE_SIZE                           16\r
+#define                MLNX_UAL_ALLOC_HCA_UL_RES       1\r
+#define                MLNX_UAL_FREE_HCA_UL_RES        2\r
+\r
+typedef         unsigned __int3264            cl_dev_handle_t;\r
+\r
+extern uint32_t mlnx_dbg_lvl;\r
+static inline errno_to_iberr(int err)\r
+{\r
+#define MAP_ERR(err,ibstatus)  case err: ib_status = ibstatus; break\r
+       ib_api_status_t ib_status = IB_UNKNOWN_ERROR;\r
+       if (err < 0)\r
+               err = -err;\r
+       switch (err) {\r
+               MAP_ERR( ENOENT, IB_NOT_FOUND );\r
+               MAP_ERR( EINTR, IB_INTERRUPTED );\r
+               MAP_ERR( EAGAIN, IB_RESOURCE_BUSY );\r
+               MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY );\r
+               MAP_ERR( EACCES, IB_INVALID_PERMISSION );\r
+               MAP_ERR( EFAULT, IB_ERROR );\r
+               MAP_ERR( EBUSY, IB_RESOURCE_BUSY );\r
+               MAP_ERR( ENODEV, IB_UNSUPPORTED );\r
+               MAP_ERR( EINVAL, IB_INVALID_PARAMETER );\r
+               MAP_ERR( ENOSYS, IB_UNSUPPORTED );\r
+               default:\r
+                       CL_TRACE (CL_DBG_ERROR, mlnx_dbg_lvl, ("Unmapped errno (%d)\n", err));\r
+                       break;\r
+       }\r
+       return ib_status;\r
+}\r
+\r
+\r
+\r
+\r
+/*\r
+ * PROTOTYPES\r
+ */\r
+\r
+/************* CA operations *************************/\r
+void  \r
+mlnx_get_ca_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+\r
+ib_api_status_t  \r
+mlnx_pre_open_ca (\r
+    IN         const ib_net64_t                        ca_guid,\r
+    IN OUT      ci_umv_buf_t                           *p_umv_buf);\r
+   \r
+    \r
+ib_api_status_t\r
+mlnx_post_open_ca (\r
+       IN                              const ib_net64_t                        ca_guid,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_ca_handle_t                          *ph_uvp_ca,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_ca (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                          byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+void  \r
+mlnx_post_query_ca (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                          byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_modify_ca (\r
+    IN         ib_ca_handle_t                          h_uvp_ca,\r
+    IN         uint8_t                                 port_num,\r
+    IN         ib_ca_mod_t                             modca_cmd,\r
+    IN         const ib_port_attr_mod_t*       p_port_attr_mod );\r
+\r
+void  \r
+mlnx_post_modify_ca (\r
+    IN         ib_ca_handle_t                          h_uvp_ca,\r
+    IN         ib_api_status_t                         ioctl_status);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_close_ca (\r
+    IN         ib_ca_handle_t                          h_uvp_ca );\r
+\r
+ib_api_status_t\r
+mlnx_post_close_ca (\r
+    IN         ib_ca_handle_t                          h_uvp_ca,\r
+    IN         ib_api_status_t                         ioctl_status );\r
+\r
+\r
+/************* PD Management *************************/\r
+void  \r
+mlnx_get_pd_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+    \r
+ib_api_status_t  \r
+mlnx_pre_allocate_pd (\r
+    IN         const ib_ca_handle_t            h_uvp_ca,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_allocate_pd (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_pd_handle_t                          *ph_uvp_pd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_deallocate_pd (\r
+    IN         const ib_pd_handle_t            h_uvp_pd);\r
+\r
+void  \r
+mlnx_post_deallocate_pd (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                 ioctl_status );\r
+\r
+\r
+/************* AV Management *************************/\r
+void\r
+mlnx_get_av_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_create_av (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_av_attr_t                      *p_addr_vector,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+\r
+void\r
+mlnx_post_create_av (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_av_handle_t                          *ph_uvp_av,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_av (\r
+       IN      const           ib_av_handle_t          h_uvp_av,\r
+       IN OUT                  ci_umv_buf_t            *p_umv_buf );\r
+\r
+void  \r
+mlnx_post_query_av (\r
+       IN              const   ib_av_handle_t                          h_uvp_av,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN      OUT                     ib_av_attr_t                            *p_addr_vector,\r
+       IN      OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_modify_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         const ib_av_attr_t                      *p_addr_vector,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_modify_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_destroy_av (\r
+    IN         const ib_av_handle_t            h_uvp_av);\r
+\r
+void  \r
+mlnx_post_destroy_av (\r
+    IN         const ib_av_handle_t            h_uvp_av,\r
+    IN         ib_api_status_t                 ioctl_status);\r
+\r
+\r
+/************* CQ Management *************************/\r
+void  \r
+mlnx_get_cq_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_create_cq (\r
+       IN              const   ib_ca_handle_t                          h_uvp_ca,\r
+       IN      OUT                     uint32_t*                       const   p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
+\r
+void  \r
+mlnx_post_create_cq (\r
+       IN              const   ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN              const   uint32_t                                        size,\r
+               OUT                     ib_cq_handle_t                          *ph_uvp_cq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_resize_cq (\r
+       IN              const   ib_cq_handle_t                          h_uvp_cq,\r
+       IN      OUT                     uint32_t*                       const   p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
+\r
+void  \r
+mlnx_post_resize_cq (\r
+    IN         const ib_cq_handle_t            h_uvp_cq,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         const uint32_t                          size,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_cq (\r
+       IN              const   ib_cq_handle_t          h_uvp_cq,\r
+               OUT                     uint32_t* const         p_size,\r
+       IN      OUT                     ci_umv_buf_t            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_destroy_cq (\r
+    IN         const ib_cq_handle_t            h_uvp_cq);\r
+\r
+void  \r
+mlnx_post_destroy_cq (\r
+    IN         const ib_cq_handle_t            h_uvp_cq,\r
+    IN         ib_api_status_t                 ioctl_status);\r
+\r
+/************* QP Management *************************/\r
+void  \r
+mlnx_get_qp_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_create_qp (\r
+    IN         const   ib_pd_handle_t          h_uvp_pd,// Fix me: if needed\r
+    IN         const   ib_qp_create_t          *p_create_attr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_create_qp (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_qp_handle_t                          *ph_uvp_qp,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_modify_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         const ib_qp_mod_t                       *p_modify_attr, // Fixme\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_modify_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_qp (\r
+    IN         ib_qp_handle_t                          h_uvp_qp,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_query_qp (\r
+    IN         ib_qp_handle_t                          h_uvp_qp,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         ib_qp_attr_t                            *p_query_attr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_destroy_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp);\r
+\r
+void  \r
+mlnx_post_destroy_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                 ioctl_status );\r
+\r
+/************* MR/MW Management *************************/\r
+void  \r
+mlnx_get_mrw_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_register_mr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_create_t            *p_mr_create,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_register_mr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         const uint32_t                          *p_lkey,\r
+    IN         const uint32_t                          *p_rkey,\r
+    OUT                const ib_mr_handle_t            *ph_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_query_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         const ib_mr_attr_t                      *p_mr_query,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_modify_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_pd_handle_t            h_uvp_pd                OPTIONAL,\r
+    IN         const ib_mr_mod_t                       mr_mod_mask,\r
+    IN         const ib_mr_create_t            *p_mr_create    OPTIONAL,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_modify_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_pd_handle_t            h_uvp_pd        OPTIONAL,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         const uint32_t                          *p_lkey,\r
+    IN         const uint32_t                          *p_rkey,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_register_smr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_access_t                       access_ctrl,\r
+    IN         void                                            *p_vaddr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_register_smr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         const void                                      *p_vaddr,\r
+    IN         const uint32_t                          *p_lkey,\r
+    IN         const uint32_t                          *p_rkey,\r
+    OUT                const ib_mr_handle_t            *ph_uvp_smr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_deregister_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_deregister_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_create_mw (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_create_mw (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              net32_t                                         rkey,\r
+               OUT                     ib_mw_handle_t                          *ph_uvp_mw,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_query_mw (\r
+       IN              const   ib_mw_handle_t                          h_uvp_mw,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              net32_t                                         rkey,\r
+               OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_destroy_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw);\r
+    // IN OUT  ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_destroy_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw,\r
+    IN         ib_api_status_t                 ioctl_status);\r
+\r
+\r
+/************* MCAST Management *************************/\r
+void  \r
+mlnx_get_mcast_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+\r
+ib_api_status_t  \r
+mlnx_pre_attach_mcast (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         const ib_gid_t                          *p_mcast_gid,\r
+    IN         const uint16_t                          mcast_lid,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_attach_mcast (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    OUT                ib_mcast_handle_t                       *ph_mcast,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_detach_mcast (\r
+    IN         ib_mcast_handle_t                       h_uvp_mcast,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_detach_mcast (\r
+    IN         ib_mcast_handle_t                       h_uvp_mcast,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+\r
+/************* OS BYPASS Management *************************/\r
+void  \r
+mlnx_get_osbypass_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_post_send (\r
+       IN              const   void*           __ptr64                 h_qp,\r
+       IN                              ib_send_wr_t*   const           p_send_wr,\r
+               OUT                     ib_send_wr_t**                          pp_send_failure );\r
+\r
+ib_api_status_t   \r
+mlnx_post_recv (\r
+       IN              const   void* __ptr64                           h_qp,\r
+       IN                              ib_recv_wr_t*   const           p_recv_wr,\r
+               OUT                     ib_recv_wr_t**                          pp_recv_failure );\r
+\r
+ib_api_status_t  \r
+mlnx_bind_mw (\r
+       IN              const   ib_mw_handle_t                          h_uvp_mw,\r
+       IN              const   ib_qp_handle_t                          h_uvp_qp,\r
+       IN                              ib_bind_wr_t                            *p_mw_bind,\r
+               OUT                     net32_t* const                          p_rkey );\r
+\r
+ib_api_status_t  \r
+mlnx_poll_cq (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN      OUT                     ib_wc_t**       const                   pp_free_wclist,\r
+               OUT                     ib_wc_t**       const                   pp_done_wclist );\r
+\r
+ib_api_status_t  \r
+mlnx_enable_cq_notify (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN              const   boolean_t                                       solicited );\r
+\r
+ib_api_status_t  \r
+mlnx_enable_ncomp_cq_notify (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN              const   uint32_t                                        n_cqes );\r
+\r
+ib_api_status_t\r
+mlnx_peek_cq (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+               OUT                     uint32_t* const                         p_n_cqes );\r
+\r
+#endif\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_mcast.c b/trunk/hw/mthca/user/mlnx_ual_mcast.c
new file mode 100644 (file)
index 0000000..423d4a3
--- /dev/null
@@ -0,0 +1,123 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_mcast.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include "mlnx_ual_main.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_mcast.tmh"\r
+#endif\r
+\r
+void\r
+mlnx_get_mcast_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Multicast Support Verbs\r
+     */\r
+    p_uvp->pre_attach_mcast  = NULL;\r
+    p_uvp->post_attach_mcast = NULL;\r
+    p_uvp->pre_detach_mcast  = NULL;\r
+    p_uvp->post_detach_mcast = NULL;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_attach_mcast (\r
+    IN const   ib_qp_handle_t          h_uvp_qp,\r
+    IN const   ib_gid_t                        *p_mcast_gid,\r
+    IN const   uint16_t                        mcast_lid,\r
+    IN OUT             ci_umv_buf_t            *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+#if 1\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+    p_umv_buf->command = TRUE;\r
+#endif\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+\r
+void\r
+mlnx_post_attach_mcast (\r
+    IN         const ib_qp_handle_t    h_uvp_qp,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    OUT                ib_mcast_handle_t               *ph_mcast,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_detach_mcast (\r
+    IN         ib_mcast_handle_t       h_uvp_mcast,\r
+    IN OUT     ci_umv_buf_t            *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+#if 1\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+#endif\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_detach_mcast (\r
+    IN         ib_mcast_handle_t       h_uvp_mcast,\r
+    IN         ib_api_status_t         ioctl_status,\r
+    IN OUT     ci_umv_buf_t            *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_mrw.c b/trunk/hw/mthca/user/mlnx_ual_mrw.c
new file mode 100644 (file)
index 0000000..b74415c
--- /dev/null
@@ -0,0 +1,445 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_mrw.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include "mlnx_ual_main.h"\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_mrw.tmh"\r
+#endif\r
+\r
+void\r
+mlnx_get_mrw_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Memory Management Verbs\r
+     */\r
+//    p_uvp->pre_register_mr    = NULL;\r
+//    p_uvp->post_register_mr   = NULL;\r
+//    p_uvp->pre_query_mr       = NULL;\r
+//    p_uvp->post_query_mr      = NULL;\r
+//    p_uvp->pre_deregister_mr  = NULL;\r
+//    p_uvp->post_deregister_mr = NULL;\r
+//    p_uvp->pre_modify_mr      = NULL;\r
+//    p_uvp->post_modify_mr     = NULL;\r
+//    p_uvp->pre_register_smr   = NULL;\r
+//    p_uvp->post_register_smr  = NULL;\r
+\r
+    /*\r
+     * Memory Window Verbs\r
+     */\r
+#ifndef WIN_TO_BE_CHANGED\r
+       p_uvp->pre_create_mw    = NULL;\r
+       p_uvp->post_create_mw = NULL;\r
+       p_uvp->pre_query_mw     = NULL;\r
+       p_uvp->post_query_mw    = NULL;\r
+       p_uvp->pre_destroy_mw = NULL;\r
+       p_uvp->post_destroy_mw = NULL;\r
+#else\r
+    p_uvp->pre_create_mw  = mlnx_pre_create_mw;\r
+    p_uvp->post_create_mw = mlnx_post_create_mw;\r
+    p_uvp->pre_query_mw   = mlnx_pre_query_mw;\r
+    p_uvp->post_query_mw  = mlnx_post_query_mw;\r
+    p_uvp->pre_destroy_mw = mlnx_pre_destroy_mw;\r
+    p_uvp->post_destroy_mw = mlnx_post_destroy_mw;\r
+#endif\r
+    /* register_pmr is not supported in user-mode */\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_register_mr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_create_t            *p_mr_create,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_register_mr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    IN         const uint32_t                  *p_lkey,\r
+    IN         const uint32_t                  *p_rkey,\r
+    OUT                const ib_mr_handle_t            *ph_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_query_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    IN         const ib_mr_attr_t              *p_mr_query,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_modify_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_pd_handle_t            h_uvp_pd        OPTIONAL,\r
+    IN         const ib_mr_mod_t               mr_mod_mask,\r
+    IN         const ib_mr_create_t            *p_mr_create    OPTIONAL,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_modify_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_pd_handle_t            h_uvp_pd        OPTIONAL,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    IN         const uint32_t                  *p_lkey,\r
+    IN         const uint32_t                  *p_rkey,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_register_smr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         const ib_access_t               access_ctrl,\r
+    IN         void                            *p_vaddr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_register_smr (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    IN         const void                      *p_vaddr,\r
+    IN         const uint32_t                  *p_lkey,\r
+    IN         const uint32_t                  *p_rkey,\r
+    OUT                const ib_mr_handle_t            *ph_uvp_smr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_deregister_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_deregister_mr (\r
+    IN         const ib_mr_handle_t            h_uvp_mr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
\r
+#ifdef WIN_TO_BE_CHANGED\r
+\r
+ib_api_status_t\r
+mlnx_pre_create_mw (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    ib_api_status_t status = IB_SUCCESS;\r
+    mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*) h_uvp_pd);\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_mw_info_t *p_new_mw;\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    CL_ASSERT(p_pd_info);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT(p_hobul);\r
+\r
+    p_new_mw = cl_zalloc (sizeof (mlnx_ual_mw_info_t));\r
+    if (p_new_mw == NULL)\r
+    {\r
+       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+           ("Failed to alloc memory\n"));\r
+       status = IB_INSUFFICIENT_MEMORY;\r
+       goto cleanup;\r
+    }\r
+    p_new_mw->h_uvp_pd = h_uvp_pd;\r
+\r
+    p_umv_buf->input_size = p_umv_buf->output_size = \r
+       sizeof (mlnx_ual_mw_info_t *);\r
+\r
+    p_umv_buf->p_inout_buf = cl_zalloc (p_umv_buf->input_size);\r
+    if (p_umv_buf->p_inout_buf == NULL)\r
+    {\r
+       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+           ("Failed to alloc memory for priv buffer\n"));\r
+       status = IB_INSUFFICIENT_MEMORY;\r
+       goto cleanup;\r
+    }\r
+    p_umv_buf->status = IB_SUCCESS;\r
+    p_umv_buf->command = TRUE;\r
+\r
+    cl_memcpy (p_umv_buf->p_inout_buf, &p_new_mw, p_umv_buf->input_size);\r
+\r
+cleanup:\r
+    if (IB_SUCCESS != status)\r
+    {\r
+        if (p_new_mw)\r
+        {\r
+            cl_free (p_new_mw);\r
+        }\r
+    }\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_create_mw (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              net32_t                                         rkey,\r
+               OUT                     ib_mw_handle_t                          *ph_uvp_mw,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+    ib_api_status_t status;\r
+    mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*) h_uvp_pd);\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_mw_info_t *p_new_mw;\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    CL_ASSERT(p_pd_info);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT(p_hobul);\r
+    \r
+\r
+    status = ioctl_status;\r
+\r
+    CL_ASSERT (p_umv_buf->p_inout_buf);\r
+    cl_memcpy (&p_new_mw, p_umv_buf->p_inout_buf, p_umv_buf->input_size);\r
+    \r
+\r
+    *ph_uvp_mw = (ib_mw_handle_t) p_new_mw;\r
+\r
+    if (IB_SUCCESS == status)\r
+    {\r
+       if (IB_SUCCESS != p_umv_buf->status) \r
+       {\r
+           UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+               ("Bad status %ld\n", p_umv_buf->status));\r
+           status = p_umv_buf->status;\r
+           goto cleanup;\r
+       }\r
+\r
+       p_new_mw->rkey = rkey;\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+       if (HH_OK !=\r
+           THHUL_mwm_alloc_mw (p_hobul->hhul_hca_hndl,\r
+           rkey,\r
+           &p_new_mw->hhul_mw_hndl))\r
+       {\r
+           UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+               ("thhul_alloc_mw failed\n"));\r
+           status = IB_ERROR;\r
+           goto cleanup;\r
+       }\r
+#endif \r
+    }\r
+    else\r
+    {\r
+       cl_free (p_new_mw);\r
+    }\r
+\r
+cleanup:\r
+    cl_free (p_umv_buf->p_inout_buf);\r
+    p_umv_buf->p_inout_buf = NULL;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+    p_umv_buf->p_inout_buf = NULL;;\r
+    p_umv_buf->input_size = 0;\r
+    p_umv_buf->output_size = 0;\r
+    p_umv_buf->status = IB_SUCCESS;\r
+    p_umv_buf->command = TRUE;\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_query_mw (\r
+       IN              const   ib_mw_handle_t                          h_uvp_mw,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN                              net32_t                                         rkey,\r
+               OUT                     ib_pd_handle_t                          *ph_pd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    *ph_pd = ((mlnx_ual_mw_info_t *)((void*)h_uvp_mw))->h_uvp_pd;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_destroy_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw)\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_destroy_mw (\r
+    IN         const ib_mw_handle_t            h_uvp_mw,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+    ib_api_status_t status = IB_SUCCESS;\r
+    mlnx_ual_pd_info_t *p_pd_info;\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_mw_info_t *p_mw_info = (mlnx_ual_mw_info_t *)((void*) h_uvp_mw);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_mw_info);\r
+\r
+    p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_mw_info->h_uvp_pd);\r
+    CL_ASSERT(p_pd_info);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT(p_hobul);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+    if (HH_OK !=\r
+        THHUL_mwm_free_mw (p_hobul->hhul_hca_hndl, p_mw_info->hhul_mw_hndl))\r
+    {\r
+        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                  ("thhul_free_mw failed\n"));\r
+        status = IB_ERROR;\r
+    }\r
+#endif\r
+    if (status == IB_SUCCESS)\r
+    {\r
+        cl_free (p_mw_info);\r
+    }\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+#endif\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_osbypass.c b/trunk/hw/mthca/user/mlnx_ual_osbypass.c
new file mode 100644 (file)
index 0000000..b09ce3f
--- /dev/null
@@ -0,0 +1,232 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_osbypass.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include "mt_l2w.h"\r
+#include "mlnx_uvp.h"\r
+#include "mx_abi.h"\r
+\r
+#include "mlnx_ual_main.h"\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_osbypass.tmh"\r
+#endif\r
+\r
+\r
+void\r
+mlnx_get_osbypass_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Work Request Processing Verbs\r
+     * Should the types be same as Verbs?\r
+     */\r
+    p_uvp->post_send = mlnx_post_send;\r
+    p_uvp->post_recv = mlnx_post_recv;\r
+\r
+    /*\r
+     * Completion Processing and \r
+     * Completion Notification Request Verbs.\r
+     * Should the types be same as Verbs?\r
+     */\r
+    p_uvp->poll_cq  = mlnx_poll_cq;\r
+    p_uvp->rearm_cq = mlnx_enable_cq_notify;\r
+    p_uvp->rearm_n_cq = NULL; /* mlnx_enable_ncomp_cq_notify: Not implemented */;\r
+    p_uvp->peek_cq  = NULL; /* mlnx_peek_cq: Not implemented */\r
+\r
+    /* Memory window bind */\r
+    p_uvp->bind_mw = NULL; /* mlnx_bind_mw: Not implemented */\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_post_send (\r
+       IN              const   void*           __ptr64                 h_qp,\r
+       IN                              ib_send_wr_t*   const           p_send_wr,\r
+               OUT                     ib_send_wr_t**                          pp_send_failure )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+    mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp);\r
+       mlnx_ual_hobul_t *p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+\r
+    UVP_ENTER(UVP_DBG_QP);\r
+\r
+    CL_ASSERT (p_qp_info || p_qp_info->h_uvp_pd || p_qp_info->h_uvp_pd->p_hobul);\r
+    p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+\r
+       CL_ASSERT( p_send_wr );\r
+\r
+       err = p_hobul->ibv_ctx->ops.post_send(p_qp_info->ibv_qp, p_send_wr, pp_send_failure );\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_post_send failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else if (err == -EINVAL) \r
+                       status = IB_INVALID_WR_TYPE;\r
+               else if (err == -ERANGE)\r
+                       status = IB_INVALID_MAX_SGE;\r
+               else if (err == -EBUSY)\r
+                       status = IB_INVALID_QP_STATE;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+       }\r
+\r
+    UVP_EXIT(UVP_DBG_QP);\r
+    return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_post_recv (\r
+       IN              const   void*           __ptr64                 h_qp,\r
+       IN                              ib_recv_wr_t*   const           p_recv_wr,\r
+               OUT                     ib_recv_wr_t**                          pp_recv_failure )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp);\r
+       mlnx_ual_hobul_t *p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+\r
+    UVP_ENTER(UVP_DBG_QP);\r
+\r
+    CL_ASSERT (p_qp_info || p_qp_info->h_uvp_pd || p_qp_info->h_uvp_pd->p_hobul);\r
+    p_hobul = p_qp_info->h_uvp_pd->p_hobul;\r
+\r
+       CL_ASSERT( p_recv_wr );\r
+\r
+       err = p_hobul->ibv_ctx->ops.post_recv(p_qp_info->ibv_qp, p_recv_wr, pp_recv_failure );\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else if (err == -EINVAL) \r
+                       status = IB_INVALID_WR_TYPE;\r
+               else if (err == -ERANGE)\r
+                       status = IB_INVALID_MAX_SGE;\r
+               else if (err == -EBUSY)\r
+                       status = IB_INVALID_QP_STATE;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+       }\r
+\r
+    UVP_EXIT(UVP_DBG_QP);\r
+    return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_poll_cq (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN      OUT                     ib_wc_t**       const                   pp_free_wclist,\r
+               OUT                     ib_wc_t**       const                   pp_done_wclist )\r
+{\r
+       int err;\r
+    ib_api_status_t status = IB_SUCCESS;\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq);\r
+\r
+    UVP_ENTER(UVP_DBG_CQ);\r
+    CL_ASSERT (p_cq_info);\r
+\r
+    p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul;\r
+    CL_ASSERT (p_hobul);\r
+\r
+    if (!pp_free_wclist || !*pp_free_wclist || !pp_done_wclist)\r
+    {\r
+        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ ,("Passed in bad params\n")); \r
+        status = IB_INVALID_PARAMETER;\r
+               goto err_invalid_params;\r
+    }\r
+\r
+       err = p_hobul->ibv_ctx->ops.poll_cq_list(p_cq_info->ibv_cq, pp_free_wclist, pp_done_wclist );\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_poll_cq failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+       }else if (!*pp_done_wclist)\r
+                       status = IB_NOT_FOUND;\r
+       \r
+\r
+err_invalid_params:\r
+\r
+       if (status != IB_NOT_FOUND){\r
+               UVP_PRINT_EXIT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ  ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+       }else\r
+               UVP_EXIT(UVP_DBG_CQ);\r
+\r
+    return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_enable_cq_notify (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN              const   boolean_t                                       solicited )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       mlnx_ual_hobul_t *p_hobul;\r
+       mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq);\r
+\r
+       UVP_ENTER(UVP_DBG_CQ);\r
+       CL_ASSERT (p_cq_info);\r
+\r
+       p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul;\r
+       CL_ASSERT (p_hobul);\r
+\r
+       err = p_hobul->ibv_ctx->ops.req_notify_cq(p_cq_info->ibv_cq, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_enable_cq_notify failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto exit;\r
+       }\r
+\r
+exit:\r
+               UVP_EXIT(UVP_DBG_CQ);\r
+               return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_enable_ncomp_cq_notify (\r
+       IN              const   void*           __ptr64                 h_cq,\r
+       IN              const   uint32_t                                        n_cqes )\r
+{\r
+       // Not yet implemented\r
+    ib_api_status_t status = IB_UNSUPPORTED;\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mlnx_enable_ncomp_cq_notify is not implemented yet\n"));\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+    return status;\r
+}\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_pd.c b/trunk/hw/mthca/user/mlnx_ual_pd.c
new file mode 100644 (file)
index 0000000..01cad30
--- /dev/null
@@ -0,0 +1,179 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_pd.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+\r
+#include "mt_l2w.h"\r
+#include "mlnx_ual_main.h"\r
+#include "mlnx_uvp.h"\r
+#include "mx_abi.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_pd.tmh"\r
+#endif\r
+\r
+void\r
+mlnx_get_pd_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * Protection Domain\r
+     */\r
+    p_uvp->pre_allocate_pd    = mlnx_pre_allocate_pd;\r
+    p_uvp->post_allocate_pd   = mlnx_post_allocate_pd;\r
+    p_uvp->pre_deallocate_pd  = mlnx_pre_deallocate_pd;\r
+    p_uvp->post_deallocate_pd = mlnx_post_deallocate_pd;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_pre_allocate_pd (\r
+    IN         const ib_ca_handle_t            h_uvp_ca,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_alloc_pd_resp) );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp);\r
+       p_umv_buf->command = TRUE;\r
+       \r
+err_memory:\r
+               UVP_EXIT(UVP_DBG_SHIM);\r
+               return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_allocate_pd (\r
+       IN                              ib_ca_handle_t                          h_uvp_ca,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_pd_handle_t                          *ph_uvp_pd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       struct ibv_alloc_pd_resp *p_resp;\r
+       struct ibv_pd *ibv_pd;\r
+       mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca);\r
+       mlnx_ual_pd_info_t *p_new_pd;\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(p_hobul);\r
+       CL_ASSERT(p_umv_buf);\r
+       p_resp = (struct ibv_alloc_pd_resp *)p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+\r
+               /* allocate ibv_pd */\r
+               ibv_pd = p_hobul->ibv_ctx->ops.alloc_pd(p_hobul->ibv_ctx, p_resp);\r
+               if (IS_ERR(ibv_pd)) {\r
+                       err = PTR_ERR(ibv_pd);\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_pd failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_alloc_pd;\r
+               }\r
+\r
+               /* allocate pd */\r
+               p_new_pd = (mlnx_ual_pd_info_t *)cl_zalloc( sizeof(mlnx_ual_pd_info_t) );\r
+               if( !p_new_pd ) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+\r
+               /* return results */\r
+               p_new_pd->ibv_pd = ibv_pd;\r
+               p_new_pd->p_hobul = p_hobul;\r
+               *ph_uvp_pd = (ib_pd_handle_t)p_new_pd;\r
+       }\r
+       goto end;\r
+       \r
+err_memory: \r
+       p_hobul->ibv_ctx->ops.dealloc_pd(ibv_pd);\r
+err_alloc_pd:\r
+end:   \r
+       if (p_resp)\r
+               cl_free( p_resp );\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_deallocate_pd (\r
+    IN         const ib_pd_handle_t            h_uvp_pd)\r
+{\r
+       mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd);\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_pd_info);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_deallocate_pd (\r
+    IN         const ib_pd_handle_t            h_uvp_pd,\r
+    IN         ib_api_status_t                 ioctl_status )\r
+{\r
+       int err;\r
+    mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd);\r
+    UNREFERENCED_PARAMETER(ioctl_status);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_pd_info || p_pd_info->ibv_pd);\r
+\r
+       err = p_pd_info->p_hobul->ibv_ctx->ops.dealloc_pd( p_pd_info->ibv_pd );\r
+       if (err) \r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_pd failed (%d)\n", err));\r
+\r
+    cl_free (p_pd_info);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
diff --git a/trunk/hw/mthca/user/mlnx_ual_qp.c b/trunk/hw/mthca/user/mlnx_ual_qp.c
new file mode 100644 (file)
index 0000000..ca3e41b
--- /dev/null
@@ -0,0 +1,682 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_ual_qp.c 256 2005-08-07 13:23:31Z sleybo $\r
+ */\r
+\r
+#include "mt_l2w.h"\r
+#include "mlnx_uvp.h"\r
+#include "mx_abi.h"\r
+\r
+#include "mlnx_ual_main.h"\r
+#if defined(EVENT_TRACING)\r
+#include "mlnx_ual_qp.tmh"\r
+#endif\r
+\r
+\r
+void\r
+mlnx_get_qp_interface (\r
+    IN OUT     uvp_interface_t         *p_uvp )\r
+{\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_uvp);\r
+\r
+    /*\r
+     * QP Management Verbs\r
+     */\r
+    p_uvp->pre_create_qp   = mlnx_pre_create_qp;\r
+    p_uvp->post_create_qp  = mlnx_post_create_qp;\r
+\r
+    // !!! none for create_spl_qp, UAL will return error !!!\r
+\r
+    p_uvp->pre_modify_qp   = mlnx_pre_modify_qp;\r
+    p_uvp->post_modify_qp  = mlnx_post_modify_qp;\r
+    p_uvp->pre_query_qp    = NULL;\r
+    p_uvp->post_query_qp   = mlnx_post_query_qp;\r
+    p_uvp->pre_destroy_qp  = mlnx_pre_destroy_qp;\r
+    p_uvp->post_destroy_qp = mlnx_post_destroy_qp;\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+IB_ts_t\r
+map_ibal_qp_type (ib_qp_type_t ibal_qp_type)\r
+{\r
+    if      (ibal_qp_type == IB_QPT_RELIABLE_CONN) return IB_TS_RC;\r
+    else if (ibal_qp_type == IB_QPT_UNRELIABLE_CONN) return IB_TS_UC;\r
+  //  else if (ibal_qp_type == IB_QPT_RELIABLE_DGRM) return IB_TS_RD;\r
+    else if (ibal_qp_type == IB_QPT_UNRELIABLE_DGRM) return IB_TS_UD;\r
+    else if (ibal_qp_type == IB_QPT_RAW_IPV6) return IB_TS_RAW;\r
+    else if (ibal_qp_type == IB_QPT_RAW_ETHER) return IB_TS_RAW;\r
+    else return IB_TS_UD;\r
+}\r
+#endif\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+ib_api_status_t\r
+       mlnx_pre_create_qp (\r
+       IN              const ib_pd_handle_t            h_uvp_pd,\r
+       IN              const ib_qp_create_t            *p_create_attr,\r
+       IN OUT  ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       int err;\r
+       struct ibv_qp *ibv_qp;\r
+       struct ibv_qp_init_attr attr;\r
+       struct ibv_create_qp *p_create_qp;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
+       mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+\r
+       UVP_ENTER(UVP_DBG_QP);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_qp);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       /* convert attributes */\r
+       attr.send_cq                                            = p_create_attr->h_sq_cq->ibv_cq;\r
+       attr.recv_cq                                                    = p_create_attr->h_rq_cq->ibv_cq;\r
+       attr.srq                                                                = NULL; /* absent in IBAL */\r
+       attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
+       attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
+       attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
+       attr.cap.max_recv_sge           = p_create_attr->rq_sge;\r
+       attr.cap.max_inline_data        = 0;                    /* absent in IBAL */\r
+       attr.qp_type                                                    = p_create_attr->qp_type;\r
+       attr.sq_sig_all                                         = p_create_attr->sq_signaled;\r
+       \r
+       /* allocate ibv_qp */\r
+       p_create_qp = (struct ibv_create_qp *)p_umv_buf->p_inout_buf;\r
+       ibv_qp = p_hobul->ibv_ctx->ops.create_qp_pre(p_pd->ibv_pd, &attr, p_create_qp);\r
+       if (IS_ERR(ibv_qp)) {\r
+               err = PTR_ERR(ibv_qp);\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_create_qp_pre failed (%d)\n", err));\r
+               //fix return values\r
+               if(err == -EINVAL && (attr.cap.max_send_wr     > 65536 ||attr.cap.max_recv_wr     > 65536 ))\r
+                       status = IB_INVALID_MAX_WRS;\r
+               else if(err == -EINVAL && (attr.cap.max_send_sge> 64 ||attr.cap.max_recv_sge> 64 ))\r
+                       status = IB_INVALID_MAX_SGE;\r
+               else if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| \r
+                       attr.cap.max_send_wr == 0 || attr.cap.max_recv_wr == 0))\r
+                       status = IB_INVALID_SETTING;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+\r
+               goto err_alloc_qp;\r
+       }\r
+\r
+       goto end;\r
+               \r
+err_alloc_qp:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_memory:\r
+end:\r
+               UVP_EXIT(UVP_DBG_QP);\r
+               return status;\r
+}\r
+\r
+#else\r
+\r
+ib_api_status_t\r
+mlnx_pre_create_qp (\r
+    IN         const   ib_pd_handle_t          h_uvp_pd,\r
+    IN         const   ib_qp_create_t          *p_create_attr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+    ib_api_status_t status = IB_SUCCESS;\r
+    mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd);\r
+    mlnx_ual_hobul_t *p_hobul;\r
+    mlnx_ual_qp_info_t *p_new_qp = NULL;\r
+#ifdef WIN_TO_BE_CHANGED\r
+    HHUL_qp_init_attr_t ul_qp_init_attr;\r
+#endif\r
+    size_t size=0;\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_pd_info);\r
+    CL_ASSERT(p_umv_buf);\r
+    CL_ASSERT(p_create_attr);\r
+\r
+    p_hobul = p_pd_info->p_hobul;\r
+    CL_ASSERT(p_hobul);\r
+\r
+    do \r
+    {\r
+        /* CA should be initialized */\r
+#ifdef WIN_TO_BE_CHANGED\r
+        if (!p_hobul->p_hca_ul_info) \r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("INVALID hca_ul_info buffer\n"));\r
+            status = IB_INVALID_CA_HANDLE;\r
+            break;\r
+        }\r
+        \r
+        if (!p_hobul->p_hca_ul_resources) \r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("INVALID hca_ul_resources buffer\n"));\r
+            status = IB_RESOURCE_BUSY;\r
+            break;\r
+        }\r
+        if (!p_pd_info->p_pd_ul_resources) \r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("INVALID pd_ul_resources buffer\n"));\r
+            status = IB_RESOURCE_BUSY;\r
+            break;\r
+        }\r
+#endif\r
+\r
+        p_new_qp = cl_zalloc (sizeof(mlnx_ual_qp_info_t));\r
+        if (!p_new_qp) \r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("Failed alloc new QP\n"));\r
+            status = IB_INSUFFICIENT_MEMORY;\r
+            break;\r
+        }\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+        p_new_qp->p_qp_ul_resources = \r
+                cl_zalloc(p_hobul->p_hca_ul_info->qp_ul_resources_sz);\r
+        if (!p_new_qp->p_qp_ul_resources) \r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("Failed alloc new QP UL resources\n"));\r
+            status = IB_INSUFFICIENT_MEMORY;\r
+            break;\r
+        }\r
+        cl_memclr (&ul_qp_init_attr, sizeof (HHUL_qp_init_attr_t));\r
+\r
+        ul_qp_init_attr.qp_cap.max_oust_wr_sq = p_create_attr->sq_depth;\r
+        ul_qp_init_attr.qp_cap.max_oust_wr_rq = p_create_attr->rq_depth;\r
+        ul_qp_init_attr.qp_cap.max_sg_size_sq = p_create_attr->sq_sge;\r
+        ul_qp_init_attr.qp_cap.max_sg_size_rq = p_create_attr->rq_sge;\r
+        ul_qp_init_attr.ts_type = map_ibal_qp_type (p_create_attr->qp_type);\r
+        ul_qp_init_attr.srq = HHUL_INVAL_SRQ_HNDL;\r
+        /*\r
+         * save the qp_type to qp_info to use later on\r
+         */\r
+        p_new_qp->type = ul_qp_init_attr.ts_type;\r
+        ul_qp_init_attr.sq_sig_type =\r
+           (p_create_attr->sq_signaled) ? VAPI_SIGNAL_ALL_WR:VAPI_SIGNAL_REQ_WR;\r
+        ul_qp_init_attr.rq_sig_type = VAPI_SIGNAL_ALL_WR;\r
+        ul_qp_init_attr.pd    = p_pd_info->hhul_pd_hndl; \r
+        ul_qp_init_attr.sq_cq = \r
+            ((mlnx_ual_cq_info_t *)(p_create_attr->h_sq_cq))->hhul_cq_hndl; \r
+        ul_qp_init_attr.rq_cq = \r
+            ((mlnx_ual_cq_info_t *)(p_create_attr->h_rq_cq))->hhul_cq_hndl; \r
+\r
+        if (HH_OK != \r
+            THHUL_qpm_create_qp_prep (p_hobul->hhul_hca_hndl,\r
+                                      &ul_qp_init_attr,\r
+                                      &p_new_qp->hhul_qp_hndl,\r
+                                      &p_new_qp->ul_qp_cap,\r
+                                      p_new_qp->p_qp_ul_resources))\r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("Calling THHUL_qpm_create_qp_prep Failed\n"));\r
+            status = IB_RESOURCE_BUSY;\r
+            break;\r
+        }\r
+\r
+        /* \r
+         * Store the parent PD of this QP\r
+         */    \r
+        p_new_qp->h_uvp_pd = h_uvp_pd;\r
+           \r
+        size = p_hobul->p_hca_ul_info->qp_ul_resources_sz + \r
+               sizeof (uint32_t) + sizeof (mlnx_ual_qp_info_t *);\r
+#endif\r
+        p_umv_buf->p_inout_buf = cl_zalloc(size);\r
+        if (!p_umv_buf->p_inout_buf)\r
+        {\r
+            UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                      ("Failed alloc user private buffer\n"));\r
+            status = IB_INSUFFICIENT_MEMORY;\r
+            break;\r
+        }\r
+            \r
+        /* \r
+         * We only set the input_size up to qp_ul_resources_sz + sizeof (qp_idx)\r
+         * The rest of the buffer we store the pointer to our allocated\r
+         * qp_info struct in order to retrieve it later in the post.\r
+         */\r
+        p_umv_buf->input_size = p_umv_buf->output_size = \r
+               (uint32_t)size - sizeof (mlnx_ual_qp_info_t *);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+        cl_memcpy (p_umv_buf->p_inout_buf,\r
+                   p_new_qp->p_qp_ul_resources,\r
+                   p_hobul->p_hca_ul_info->qp_ul_resources_sz);\r
+#endif\r
+        /* \r
+         * Store the pointer of our qp_info struct to inout_buf and retrieve\r
+         * it later in the post\r
+         */\r
+        cl_memcpy ( ( (uint8_t *)p_umv_buf->p_inout_buf + size - \r
+                     sizeof (mlnx_ual_qp_info_t *)),\r
+                   &p_new_qp,\r
+                   sizeof (mlnx_ual_qp_info_t *));\r
+       p_umv_buf->command = TRUE;\r
+        \r
+    } while (0);\r
+\r
+    /* \r
+     * clean_up if required \r
+     */\r
+    if (IB_SUCCESS != status) \r
+    {\r
+        if (p_new_qp) \r
+        {\r
+#ifdef WIN_TO_BE_CHANGED\r
+            if (p_new_qp->hhul_qp_hndl)\r
+            {\r
+               THHUL_qpm_destroy_qp_done (p_hobul->hhul_hca_hndl, \r
+                                                  p_new_qp->hhul_qp_hndl);\r
+            }\r
+#endif                                         \r
+            if (p_new_qp->p_qp_ul_resources);\r
+            {\r
+                cl_free (p_new_qp->p_qp_ul_resources);\r
+            }\r
+            cl_free (p_new_qp);\r
+        }\r
+       if (p_umv_buf->p_inout_buf)\r
+       {\r
+           cl_free ( p_umv_buf->p_inout_buf );\r
+       }\r
+    }\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+#endif\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+void\r
+       mlnx_post_create_qp (\r
+               IN              const ib_pd_handle_t                            h_uvp_pd,\r
+               IN                              ib_api_status_t                         ioctl_status,\r
+                       OUT             ib_qp_handle_t                          *ph_uvp_qp,\r
+               IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       struct ibv_qp *ibv_qp;\r
+       struct ibv_create_qp_resp *p_resp;\r
+       struct ibv_create_qp *p_create_qp;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
+       mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+       mlnx_ual_qp_info_t *p_new_qp = NULL;\r
+\r
+       UVP_ENTER(UVP_DBG_QP);\r
+\r
+       CL_ASSERT(p_hobul);\r
+       CL_ASSERT(p_umv_buf);\r
+       p_resp = (struct ibv_create_qp_resp *)p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+\r
+               /* allocate ibv_qp */\r
+               ibv_qp = p_hobul->ibv_ctx->ops.create_qp_post(p_pd->ibv_pd, p_resp);\r
+               if (IS_ERR(ibv_qp)) {\r
+                       err = PTR_ERR(ibv_qp);\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_create_qp_post failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_create_cq;\r
+               }\r
+\r
+               /* allocate qp */\r
+               p_new_qp = (mlnx_ual_qp_info_t *)cl_zalloc( sizeof(mlnx_ual_qp_info_t) );\r
+               if( !p_new_qp ) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+\r
+               /* return results */\r
+               p_new_qp->h_uvp_pd = p_pd;\r
+               p_new_qp->ibv_qp = ibv_qp;\r
+               *ph_uvp_qp = (ib_qp_handle_t)p_new_qp;\r
+       }\r
+       goto end;\r
+       \r
+err_memory: \r
+       p_hobul->ibv_ctx->ops.destroy_qp(ibv_qp);\r
+err_create_cq:\r
+end:   \r
+       if (p_resp)\r
+               cl_free( p_resp );\r
+       UVP_EXIT(UVP_DBG_QP);\r
+       return;\r
+}\r
+\r
+#else\r
+\r
+void\r
+mlnx_post_create_qp (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_qp_handle_t                          *ph_uvp_qp,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*)h_uvp_pd);\r
+       mlnx_ual_hobul_t *p_hobul;\r
+       mlnx_ual_qp_info_t *p_new_qp;\r
+       size_t buf_size=0;\r
+\r
+       UVP_ENTER(UVP_DBG_QP);\r
+       CL_ASSERT(p_pd_info);\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       p_hobul = p_pd_info->p_hobul;\r
+       CL_ASSERT(p_hobul);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+       buf_size = p_hobul->p_hca_ul_info->qp_ul_resources_sz + \r
+               sizeof (uint32_t) + sizeof (mlnx_ual_qp_info_t *);\r
+#endif\r
+\r
+       /* Retrieve our qp_info back from priv buffer */\r
+       cl_memcpy (&p_new_qp, ((uint8_t *)p_umv_buf->p_inout_buf + buf_size -\r
+               sizeof (mlnx_ual_qp_info_t *)), sizeof (mlnx_ual_qp_info_t *));\r
+       CL_ASSERT(p_new_qp);\r
+\r
+       *ph_uvp_qp = (ib_qp_handle_t) p_new_qp;\r
+\r
+       if ( ioctl_status == IB_SUCCESS )\r
+       {\r
+               if (IB_SUCCESS != p_umv_buf->status) \r
+               {\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,\r
+                               ("Bad status %ld\n", p_umv_buf->status));\r
+                       goto err;\r
+               }\r
+               else if ((buf_size - sizeof (mlnx_ual_qp_info_t *)) != \r
+                       p_umv_buf->output_size) \r
+               {\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,\r
+                               ("Bad user priv buffer size exp = %d, res = %ld\n",\r
+                               buf_size, p_umv_buf->output_size));\r
+                       goto err;\r
+               }\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+               cl_memcpy (p_new_qp->p_qp_ul_resources,\r
+                       p_umv_buf->p_inout_buf,\r
+                       p_hobul->p_hca_ul_info->qp_ul_resources_sz);\r
+\r
+               cl_memcpy (&p_new_qp->qp_idx,\r
+                       ((uint8_t *)p_umv_buf->p_inout_buf + \r
+                       p_hobul->p_hca_ul_info->qp_ul_resources_sz),\r
+                       sizeof (uint32_t));\r
+\r
+               if (HH_OK !=\r
+                       THHUL_qpm_create_qp_done (p_hobul->hhul_hca_hndl,\r
+                       p_new_qp->hhul_qp_hndl,\r
+                       p_new_qp->qp_idx,\r
+                       p_new_qp->p_qp_ul_resources))\r
+               {\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,\r
+                               ("Call THHUL_qpm_create_qp_done Failed\n"));\r
+                       goto err;\r
+               }\r
+#endif\r
+               UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_QP ,\r
+                       ("Newly created QP qp_idx 0x%x\n",p_new_qp->qp_idx)); \r
+       }\r
+       else\r
+       {\r
+err:\r
+               if (p_new_qp->p_qp_ul_resources)\r
+                       cl_free (p_new_qp->p_qp_ul_resources);\r
+\r
+               cl_free (p_new_qp);\r
+               *ph_uvp_qp = NULL;\r
+       }\r
+\r
+       cl_free (p_umv_buf->p_inout_buf);\r
+       p_umv_buf->p_inout_buf = NULL;\r
+\r
+       UVP_EXIT(UVP_DBG_QP);\r
+       return;\r
+}\r
+\r
+#endif\r
+\r
+ib_api_status_t\r
+mlnx_pre_modify_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         const ib_qp_mod_t               *p_modify_attr,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       ib_api_status_t status = IB_SUCCESS;\r
+    UNREFERENCED_PARAMETER(h_uvp_qp);\r
+    UNREFERENCED_PARAMETER(p_modify_attr);\r
+\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_modify_qp_resp) );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = 0;\r
+       p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp);\r
+       p_umv_buf->command = TRUE;\r
+       \r
+err_memory:\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_modify_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                 ioctl_status,\r
+    IN OUT     ci_umv_buf_t                    *p_umv_buf)\r
+{\r
+       int err;\r
+    ib_api_status_t status;\r
+       struct ibv_modify_qp_resp *p_resp; \r
+       struct ibv_qp_attr attr;\r
+       mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_umv_buf);\r
+\r
+       p_resp = (struct ibv_modify_qp_resp *)p_umv_buf->p_inout_buf;\r
+\r
+    if (IB_SUCCESS == ioctl_status) \r
+    {\r
+               memset( &attr, 0, sizeof(attr));\r
+               attr.qp_state = p_resp->qp_state;\r
+               if (p_qp_info->ibv_qp)\r
+                       err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.modify_qp(\r
+                               h_uvp_qp->ibv_qp, &attr, p_resp->attr_mask);\r
+               if (err) {\r
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_modify_qp;\r
+               }\r
+               UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_SHIM ,\r
+                       ("Committed to modify QP to state %d\n", p_resp->qp_state));\r
+    }\r
+\r
+\r
+err_modify_qp:\r
+    if (p_resp)\r
+       cl_free (p_resp);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_query_qp (\r
+    IN         ib_qp_handle_t                          h_uvp_qp,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf)\r
+{\r
+    UNREFERENCED_PARAMETER(h_uvp_qp);\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+       p_umv_buf->input_size = p_umv_buf->output_size = 0;\r
+       p_umv_buf->command = FALSE;\r
+       p_umv_buf->status = IB_SUCCESS;\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+\r
+void\r
+mlnx_post_query_qp (\r
+       IN                              ib_qp_handle_t                          h_uvp_qp,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN      OUT                     ib_qp_attr_t                            *p_query_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf)\r
+{\r
+       struct mthca_qp *p_qp_info = (struct mthca_qp *)h_uvp_qp->ibv_qp;\r
+       UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       if(IB_SUCCESS == ioctl_status)\r
+       {\r
+               p_query_attr->sq_max_inline = p_qp_info->max_inline_data;\r
+               p_query_attr->sq_sge = p_qp_info->sq.max_gs;\r
+               p_query_attr->sq_depth = p_qp_info->sq.max;\r
+               p_query_attr->rq_sge = p_qp_info->rq.max_gs;\r
+               p_query_attr->rq_depth = p_qp_info->rq.max;\r
+       }\r
+       UVP_EXIT(UVP_DBG_SHIM);\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_pre_destroy_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp)\r
+{\r
+    UNREFERENCED_PARAMETER(h_uvp_qp);\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return IB_SUCCESS;\r
+}\r
+\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+void\r
+mlnx_post_destroy_qp (\r
+       IN              const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+       int err;\r
+       mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+    UNREFERENCED_PARAMETER(ioctl_status);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+\r
+    CL_ASSERT(p_qp_info || p_qp_info->ibv_qp);\r
+\r
+       err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.destroy_qp( p_qp_info->ibv_qp );\r
+       if (err) \r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", err));\r
+\r
+    cl_free (p_qp_info);\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+#else\r
+\r
+void\r
+mlnx_post_destroy_qp (\r
+    IN         const ib_qp_handle_t            h_uvp_qp,\r
+    IN         ib_api_status_t                 ioctl_status)\r
+{\r
+    mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp);\r
+    mlnx_ual_pd_info_t *p_pd_info;\r
+    mlnx_ual_hobul_t *p_hobul;\r
+\r
+    UNREFERENCED_PARAMETER(ioctl_status);\r
+\r
+    UVP_ENTER(UVP_DBG_SHIM);\r
+    CL_ASSERT(p_qp_info);\r
+\r
+    p_pd_info = (mlnx_ual_pd_info_t *)((void *)p_qp_info->h_uvp_pd);\r
+    CL_ASSERT(p_pd_info);\r
\r
+    p_hobul = p_pd_info->p_hobul;      \r
+    CL_ASSERT(p_hobul);\r
+\r
+#ifdef WIN_TO_BE_CHANGED\r
+    if (HH_OK !=\r
+        THHUL_qpm_destroy_qp_done (p_hobul->hhul_hca_hndl, \r
+                                   p_qp_info->hhul_qp_hndl))\r
+    {\r
+        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
+                  ("THHUL_destroy_qp_done failed\n"));\r
+    }\r
+#endif\r
+    if (p_qp_info->p_qp_ul_resources);\r
+    {\r
+        cl_free (p_qp_info->p_qp_ul_resources);\r
+        p_qp_info->p_qp_ul_resources = NULL; \r
+    }\r
+\r
+    cl_free (p_qp_info);\r
+\r
+    UVP_EXIT(UVP_DBG_SHIM);\r
+    return;\r
+}\r
+\r
+#endif\r
diff --git a/trunk/hw/mthca/user/mlnx_uvp.c b/trunk/hw/mthca/user/mlnx_uvp.c
new file mode 100644 (file)
index 0000000..af4fd47
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca.c 3670 2005-10-05 19:51:57Z roland $
+ */
+
+#include "mt_l2w.h"
+#include "mlnx_uvp.h"
+
+#if defined(EVENT_TRACING)
+#include "mlnx_uvp.tmh"
+#endif
+
+#include "mx_abi.h"
+
+size_t g_page_size = 0;
+
+#ifndef PCI_VENDOR_ID_MELLANOX
+#define PCI_VENDOR_ID_MELLANOX                 0x15b3
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR
+#define PCI_DEVICE_ID_MELLANOX_TAVOR           0x5a44
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT    0x6278
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL
+#define PCI_DEVICE_ID_MELLANOX_ARBEL           0x6282
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD       0x5e8c
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI
+#define PCI_DEVICE_ID_MELLANOX_SINAI           0x6274
+#endif
+
+#ifndef PCI_VENDOR_ID_TOPSPIN
+#define PCI_VENDOR_ID_TOPSPIN                  0x1867
+#endif
+
+#define HCA(v, d, t) \
+       { PCI_VENDOR_ID_##v,    PCI_DEVICE_ID_MELLANOX_##d, MTHCA_##t }
+
+static struct pci_device_id {
+       unsigned                vendor;
+       unsigned                device;
+       enum mthca_hca_type     type;
+} mthca_pci_table[] = {
+       HCA( MELLANOX,  TAVOR,                          TAVOR),
+       HCA( MELLANOX,  ARBEL_COMPAT,   TAVOR),
+       HCA( MELLANOX,  ARBEL,                                  ARBEL),
+       HCA( MELLANOX,  SINAI_OLD,              ARBEL),
+       HCA( MELLANOX,  SINAI,                                  ARBEL),
+       HCA( TOPSPIN,           TAVOR,                          TAVOR),
+       HCA( TOPSPIN,           ARBEL_COMPAT,   TAVOR),
+       HCA( TOPSPIN,           ARBEL,                                  ARBEL),
+       HCA( TOPSPIN,           SINAI_OLD,                      ARBEL),
+       HCA( TOPSPIN,           SINAI,                                  ARBEL),
+};
+
+static struct ibv_context_ops mthca_ctx_ops = {
+       NULL,   // mthca_query_device,
+       NULL,   // mthca_query_port,
+       mthca_alloc_pd,
+       mthca_free_pd,
+       mthca_reg_mr,
+       mthca_dereg_mr,
+       mthca_create_cq_pre,
+       mthca_create_cq_post,
+       mthca_poll_cq,
+       mthca_poll_cq_list,
+       NULL,   /* req_notify_cq */
+       NULL,  /* cq_event */
+       mthca_destroy_cq,
+       NULL,   // mthca_create_srq,
+       NULL,   // mthca_modify_srq,
+       NULL,   // mthca_destroy_srq,
+       NULL,   /* post_srq_recv */
+       mthca_create_qp_pre,
+       mthca_create_qp_post,
+       mthca_modify_qp,
+       mthca_destroy_qp,
+       NULL,   /* post_send */
+       NULL,   /* post_recv */
+       mthca_create_ah_pre,
+       mthca_create_ah_post,
+       mthca_destroy_ah,
+       mthca_attach_mcast,
+       mthca_detach_mcast
+};
+
+struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p)
+{
+       struct mthca_context    *       context;
+       struct ibv_alloc_pd_resp        pd_resp;
+       int                                             i;
+
+       /* allocate context */
+       context = cl_malloc(sizeof *context);
+       if (!context)
+               return NULL;
+
+       /* find page size  */
+       if (!g_page_size) {
+               SYSTEM_INFO sys_info;
+               GetSystemInfo(&sys_info);
+               g_page_size     = sys_info.dwPageSize;
+       }
+
+       /* calculate device type */
+       for (i = 0; i < sizeof mthca_pci_table / sizeof mthca_pci_table[0]; ++i) 
+               if (resp_p->vend_id == mthca_pci_table[i].vendor &&
+                       resp_p->dev_id == mthca_pci_table[i].device) 
+                       goto found;
+       goto err_dev_type;
+
+found:
+       context->hca_type = mthca_pci_table[i].type;
+       context->uar = (void*)(UINT_PTR)resp_p->uar_addr;
+       context->num_qps        = resp_p->qp_tab_size;
+       context->qp_table_shift = ffs(context->num_qps) - 1 - MTHCA_QP_TABLE_BITS;
+       context->qp_table_mask  = (1 << context->qp_table_shift) - 1;
+
+       if (mthca_is_memfree(&context->ibv_ctx)) {
+               context->db_tab = mthca_alloc_db_tab(resp_p->uarc_size);
+               if (!context->db_tab)
+                       goto err_alloc_db_tab;
+       } else
+               context->db_tab = NULL;
+
+       context->qp_table_mutex = CreateMutex( NULL, FALSE, NULL );
+       if (!context->qp_table_mutex)
+               goto err_mutex;
+       for (i = 0; i < MTHCA_QP_TABLE_SIZE; ++i)
+               context->qp_table[i].refcnt = 0;
+
+       cl_spinlock_construct(&context->uar_lock);
+       if (cl_spinlock_init(&context->uar_lock))
+               goto err_spinlock;
+
+       pd_resp.pd_handle = resp_p->pd_handle;
+       pd_resp.pdn = resp_p->pdn;
+       context->pd = mthca_alloc_pd(&context->ibv_ctx, &pd_resp);
+       if (!context->pd)
+               goto err_unmap;
+
+       context->ibv_ctx.ops = mthca_ctx_ops;
+
+       if (mthca_is_memfree(&context->ibv_ctx)) {
+               context->ibv_ctx.ops.req_notify_cq = mthca_arbel_arm_cq;
+               context->ibv_ctx.ops.cq_event      = mthca_arbel_cq_event;
+               context->ibv_ctx.ops.post_send     = mthca_arbel_post_send;
+               context->ibv_ctx.ops.post_recv     = mthca_arbel_post_recv;
+               context->ibv_ctx.ops.post_srq_recv = mthca_arbel_post_srq_recv;
+       } else {
+               context->ibv_ctx.ops.req_notify_cq = mthca_tavor_arm_cq;
+               context->ibv_ctx.ops.cq_event      = NULL;
+               context->ibv_ctx.ops.post_send     = mthca_tavor_post_send;
+               context->ibv_ctx.ops.post_recv     = mthca_tavor_post_recv;
+               context->ibv_ctx.ops.post_srq_recv = mthca_tavor_post_srq_recv;
+       }
+
+       return &context->ibv_ctx;
+
+err_unmap:
+err_spinlock:
+err_mutex:
+       mthca_free_db_tab(context->db_tab);
+
+err_alloc_db_tab:
+err_dev_type:
+       cl_free(context);
+       return NULL;
+}
+
+void mthca_free_context(struct ibv_context *ibctx)
+{
+       struct mthca_context *context = to_mctx(ibctx);
+
+       cl_spinlock_destroy(&context->uar_lock);
+       mthca_free_pd(context->pd);
+       mthca_free_db_tab(context->db_tab);
+       cl_free(context);
+}
diff --git a/trunk/hw/mthca/user/mlnx_uvp.def b/trunk/hw/mthca/user/mlnx_uvp.def
new file mode 100644 (file)
index 0000000..55f9753
--- /dev/null
@@ -0,0 +1,10 @@
+#if DBG\r
+LIBRARY mthcaud.dll\r
+#else\r
+LIBRARY mthcau.dll\r
+#endif\r
+\r
+#ifndef _WIN64\r
+EXPORTS\r
+uvp_get_interface\r
+#endif\r
diff --git a/trunk/hw/mthca/user/mlnx_uvp.h b/trunk/hw/mthca/user/mlnx_uvp.h
new file mode 100644 (file)
index 0000000..1c1c78a
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca.h 4182 2005-11-28 21:14:30Z roland $
+ */
+
+#ifndef MTHCA_H
+#define MTHCA_H
+
+#include <cl_spinlock.h>
+#include <mlnx_uvp_verbs.h>
+#include <arch.h>
+#include "mlnx_uvp_debug.h"
+
+#define PFX            "mthca: "
+
+enum mthca_hca_type {
+       MTHCA_TAVOR,
+       MTHCA_ARBEL
+};
+
+enum {
+       MTHCA_CQ_ENTRY_SIZE = 0x20
+};
+
+enum {
+       MTHCA_QP_TABLE_BITS = 8,
+       MTHCA_QP_TABLE_SIZE = 1 << MTHCA_QP_TABLE_BITS,
+       MTHCA_QP_TABLE_MASK = MTHCA_QP_TABLE_SIZE - 1
+};
+
+enum {
+       MTHCA_DB_REC_PAGE_SIZE = 4096,
+       MTHCA_DB_REC_PER_PAGE  = MTHCA_DB_REC_PAGE_SIZE / 8
+};
+
+enum mthca_db_type {
+       MTHCA_DB_TYPE_INVALID   = 0x0,
+       MTHCA_DB_TYPE_CQ_SET_CI = 0x1,
+       MTHCA_DB_TYPE_CQ_ARM    = 0x2,
+       MTHCA_DB_TYPE_SQ        = 0x3,
+       MTHCA_DB_TYPE_RQ        = 0x4,
+       MTHCA_DB_TYPE_SRQ       = 0x5,
+       MTHCA_DB_TYPE_GROUP_SEP = 0x7
+};
+
+enum {
+       MTHCA_OPCODE_NOP            = 0x00,
+       MTHCA_OPCODE_RDMA_WRITE     = 0x08,
+       MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,
+       MTHCA_OPCODE_SEND           = 0x0a,
+       MTHCA_OPCODE_SEND_IMM       = 0x0b,
+       MTHCA_OPCODE_RDMA_READ      = 0x10,
+       MTHCA_OPCODE_ATOMIC_CS      = 0x11,
+       MTHCA_OPCODE_ATOMIC_FA      = 0x12,
+       MTHCA_OPCODE_BIND_MW        = 0x18,
+       MTHCA_OPCODE_INVALID        = 0xff
+};
+
+struct mthca_ah_page;
+
+struct mthca_db_table;
+
+struct mthca_context {
+       struct ibv_context     ibv_ctx;
+       void                  *uar;
+       cl_spinlock_t     uar_lock;
+       struct mthca_db_table *db_tab;
+       struct ibv_pd         *pd;
+       struct {
+               struct mthca_qp **table;
+               int               refcnt;
+       }                      qp_table[MTHCA_QP_TABLE_SIZE];
+       HANDLE        qp_table_mutex;
+       int                    num_qps;
+       int                    qp_table_shift;
+       int                    qp_table_mask;
+       enum mthca_hca_type hca_type;
+};
+
+struct mthca_pd {
+       struct ibv_pd         ibv_pd;
+       struct mthca_ah_page *ah_list;
+       HANDLE       ah_mutex;
+       uint32_t              pdn;
+};
+
+struct mthca_cq {
+       struct ibv_cq      ibv_cq;
+       void              *buf;
+       cl_spinlock_t lock;
+       struct ibv_mr     mr;
+       uint32_t           cqn;
+       uint32_t           cons_index;
+
+       /* Next fields are mem-free only */
+       int                set_ci_db_index;
+       uint32_t          *set_ci_db;
+       int                arm_db_index;
+       uint32_t          *arm_db;
+       int                arm_sn;
+};
+
+struct mthca_srq {
+       struct ibv_srq     ibv_srq;
+       void              *buf;
+       void              *last;
+       cl_spinlock_t lock;
+       struct ibv_mr     *mr;
+       uint64_t          *wrid;
+       uint32_t           srqn;
+       int                max;
+       int                max_gs;
+       int                wqe_shift;
+       int                first_free;
+       int                last_free;
+       int                buf_size;
+
+       /* Next fields are mem-free only */
+       int                db_index;
+       uint32_t          *db;
+       uint16_t           counter;
+};
+
+struct mthca_wq {
+       cl_spinlock_t lock;
+       int                max;
+       unsigned           next_ind;
+       unsigned           last_comp;
+       unsigned           head;
+       unsigned           tail;
+       void              *last;
+       int                max_gs;
+       int                wqe_shift;
+
+       /* Next fields are mem-free only */
+       int                db_index;
+       uint32_t          *db;
+};
+
+struct mthca_qp {
+       struct ibv_qp    ibv_qp;
+       uint8_t            *buf;
+       uint64_t        *wrid;
+       int              send_wqe_offset;
+       int              max_inline_data;
+       int              buf_size;
+       struct mthca_wq  sq;
+       struct mthca_wq  rq;
+       struct ibv_mr   mr;
+       int              sq_sig_all;
+};
+
+struct mthca_av {
+       uint32_t port_pd;
+       uint8_t  reserved1;
+       uint8_t  g_slid;
+       uint16_t dlid;
+       uint8_t  reserved2;
+       uint8_t  gid_index;
+       uint8_t  msg_sr;
+       uint8_t  hop_limit;
+       uint32_t sl_tclass_flowlabel;
+       uint32_t dgid[4];
+};
+
+struct mthca_ah {
+       struct ibv_ah         ibv_ah;
+       struct mthca_av      *av;
+       struct mthca_ah_page *page;
+       uint32_t              key;
+};
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+       return (val + align - 1) & ~(align - 1);
+}
+
+static inline uintptr_t db_align(uint32_t *db)
+{
+       return (uintptr_t) db & ~((uintptr_t) MTHCA_DB_REC_PAGE_SIZE - 1);
+}
+
+#define to_mxxx(xxx, type)                                             \
+       ((struct mthca_##type *)                                        \
+        ((uint8_t *) ib##xxx - offsetof(struct mthca_##type, ibv_##xxx)))
+
+static inline struct mthca_context *to_mctx(struct ibv_context *ibctx)
+{
+       return to_mxxx(ctx, context);
+}
+
+static inline struct mthca_pd *to_mpd(struct ibv_pd *ibpd)
+{
+       return to_mxxx(pd, pd);
+}
+
+static inline struct mthca_cq *to_mcq(struct ibv_cq *ibcq)
+{
+       return to_mxxx(cq, cq);
+}
+
+static inline struct mthca_srq *to_msrq(struct ibv_srq *ibsrq)
+{
+       return to_mxxx(srq, srq);
+}
+
+static inline struct mthca_qp *to_mqp(struct ibv_qp *ibqp)
+{
+       return to_mxxx(qp, qp);
+}
+
+static inline struct mthca_ah *to_mah(struct ibv_ah *ibah)
+{
+       return to_mxxx(ah, ah);
+}
+
+static inline int mthca_is_memfree(struct ibv_context *ibctx)
+{
+       return to_mctx(ibctx)->hca_type == MTHCA_ARBEL;
+}
+
+extern int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,
+                         uint32_t **db);
+extern void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn);
+extern void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index);
+extern struct mthca_db_table *mthca_alloc_db_tab(int uarc_size);
+extern void mthca_free_db_tab(struct mthca_db_table *db_tab);
+
+extern int mthca_query_device(struct ibv_context *context,
+                             struct ibv_device_attr *attr);
+extern int mthca_query_port(struct ibv_context *context, uint8_t port,
+                           struct ibv_port_attr *attr);
+
+extern         struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, 
+       struct ibv_alloc_pd_resp *resp_p);
+
+extern int mthca_free_pd(struct ibv_pd *pd);
+
+extern struct ibv_mr *mthca_reg_mr(struct ibv_pd *pd, void *addr,
+                                  size_t length, enum ibv_access_flags access);
+extern int mthca_dereg_mr(struct ibv_mr *mr);
+
+extern struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe,
+                                struct ibv_create_cq *req);
+extern struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
+                                struct ibv_create_cq_resp *resp);
+extern int mthca_destroy_cq(struct ibv_cq *cq);
+extern int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc);
+extern int mthca_poll_cq_list(struct ibv_cq *ibcq, 
+       struct _ib_wc** const pp_free_wclist,
+       struct _ib_wc** const pp_done_wclist );
+extern int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);
+extern int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);
+extern void mthca_arbel_cq_event(struct ibv_cq *cq);
+extern void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,
+                          struct mthca_srq *srq);
+extern void mthca_init_cq_buf(struct mthca_cq *cq, int nent);
+
+extern struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
+                                       struct ibv_srq_init_attr *attr);
+extern int mthca_modify_srq(struct ibv_srq *srq,
+                           struct ibv_srq_attr *attr,
+                           enum ibv_srq_attr_mask mask);
+extern int mthca_destroy_srq(struct ibv_srq *srq);
+extern int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
+                              struct mthca_srq *srq);
+extern void mthca_free_srq_wqe(struct mthca_srq *srq, int ind);
+extern int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
+                                    struct _ib_recv_wr *wr,
+                                    struct _ib_recv_wr **bad_wr);
+extern int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
+                                    struct _ib_recv_wr *wr,
+                                    struct _ib_recv_wr **bad_wr);
+extern struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
+       struct ibv_qp_init_attr *attr, struct ibv_create_qp *req);
+extern struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
+       struct ibv_create_qp_resp *resp);
+extern int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+                          enum ibv_qp_attr_mask attr_mask);
+extern int mthca_destroy_qp(struct ibv_qp *qp);
+extern void mthca_init_qp_indices(struct mthca_qp *qp);
+extern int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+                                struct _ib_send_wr **bad_wr);
+extern int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+                                struct _ib_recv_wr **bad_wr);
+extern int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+                                struct _ib_send_wr **bad_wr);
+extern int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+                                struct _ib_recv_wr **bad_wr);
+extern int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
+                             ib_qp_type_t type, struct mthca_qp *qp);
+extern struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn);
+extern int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp);
+extern void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);
+extern int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
+                             int index, int *dbd, uint32_t *new_wqe);
+extern int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req);
+extern         struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd, 
+               struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp);
+extern int mthca_destroy_ah(struct ibv_ah *ah);
+extern int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
+                         struct mthca_ah *ah, struct ibv_create_ah_resp *resp);
+extern void mthca_free_av(struct mthca_ah *ah);
+extern int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+extern int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
+void mthca_free_context(struct ibv_context *ibctx);
+
+#endif /* MTHCA_H */
diff --git a/trunk/hw/mthca/user/mlnx_uvp.rc b/trunk/hw/mthca/user/mlnx_uvp.rc
new file mode 100644 (file)
index 0000000..f3d2e34
--- /dev/null
@@ -0,0 +1,48 @@
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <oib_ver.h>\r
+\r
+#define VER_FILETYPE                           VFT_DLL\r
+#define VER_FILESUBTYPE                                VFT2_UNKNOWN\r
+\r
+#ifdef DBG\r
+#define VER_FILEDESCRIPTION_STR     "HCA User Mode Verb Provider (checked)"\r
+#define VER_INTERNALNAME_STR           "mthcaud.dll"\r
+#define VER_ORIGINALFILENAME_STR       "mthcaud.dll"\r
+#else\r
+#define VER_FILEDESCRIPTION_STR     "HCA User Mode Verb Provider"\r
+#define VER_INTERNALNAME_STR           "mthcau.dll"\r
+#define VER_ORIGINALFILENAME_STR       "mthcau.dll"\r
+#endif\r
+\r
+#include <common.ver>\r
diff --git a/trunk/hw/mthca/user/mlnx_uvp_abi.h b/trunk/hw/mthca/user/mlnx_uvp_abi.h
new file mode 100644 (file)
index 0000000..319e943
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca-abi.h 3048 2005-08-10 04:00:24Z roland $
+ */
+
+#ifndef MTHCA_UVP_ABI_H
+#define MTHCA_ABI_H
+
+#include "mlnx_uvp_kern_abi.h"
+
+struct mthca_alloc_ucontext_resp {
+       struct ibv_get_context_resp     ibv_resp;
+};
+
+struct mthca_create_srq {
+       uint32_t                                lkey;
+       uint32_t                                db_index;
+       uint64_t                                db_page;
+       struct ibv_create_srq           ibv_cmd;
+};
+
+struct mthca_create_srq_resp {
+       struct ibv_create_srq_resp      ibv_resp;
+       uint32_t                                srqn;
+       uint32_t                                reserved;
+};
+
+struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
+void mthca_free_context(struct ibv_context *ibctx);
+
+
+#endif /* MTHCA_ABI_H */
diff --git a/trunk/hw/mthca/user/mlnx_uvp_ah.c b/trunk/hw/mthca/user/mlnx_uvp_ah.c
new file mode 100644 (file)
index 0000000..52ee7a9
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ah.c 4001 2005-11-09 18:26:28Z roland $
+ */
+
+#include "mt_l2w.h"
+#include "mlnx_uvp.h"
+#include "mx_abi.h"
+
+struct mthca_ah_page {
+       struct mthca_ah_page *prev, *next;
+       void                 *buf;
+       struct ibv_mr        mr;
+       int                   use_cnt;
+       unsigned              free[0];
+};
+
+static struct mthca_ah_page *__add_page(struct mthca_pd *pd, int per_page)
+{
+       struct mthca_ah_page *page;
+       int i;
+
+       page = cl_malloc(sizeof *page + per_page * sizeof (int));
+       if (!page)
+               return NULL;
+
+       page->use_cnt = 0;
+       for (i = 0; i < per_page; ++i)
+               page->free[i] = ~0;
+
+       page->prev = NULL;
+       page->next = pd->ah_list;
+       pd->ah_list = page;
+       if (page->next)
+               page->next->prev = page;
+
+       return page;
+}
+
+int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
+                  struct mthca_ah *ah, struct ibv_create_ah_resp *resp)
+{
+       if (mthca_is_memfree(pd->ibv_pd.context)) {
+               ah->av = cl_malloc(sizeof *ah->av);
+               if (!ah->av)
+                       return -ENOMEM;
+       } else {
+               struct mthca_ah_page *page;
+               int ps;
+               int pp;
+               int i, j;
+
+               ps = g_page_size;
+               pp = ps / (sizeof *ah->av * 8 * sizeof (int));
+
+               WaitForSingleObject( pd->ah_mutex, INFINITE );
+               for (page = pd->ah_list; page; page = page->next)
+                       if (page->use_cnt < ps / (int)(sizeof *ah->av))
+                               for (i = 0; i < pp; ++i)
+                                       if (page->free[i])
+                                               goto found;
+
+               page = __add_page(pd, pp);
+               if (!page) {
+                       ReleaseMutex( pd->ah_mutex );
+                       return -ENOMEM;
+               }
+
+       found:
+
+               // fill mr parameters
+               page->buf = (void*)(ULONG_PTR)resp->start;
+               page->mr.handle = resp->mr.mr_handle;
+               page->mr.lkey = resp->mr.lkey;
+               page->mr.rkey = resp->mr.rkey;
+               page->mr.pd = (struct ibv_pd*)pd;
+               page->mr.context = pd->ibv_pd.context;
+               
+               ++page->use_cnt;
+
+               for (i = 0, j = -1; i < pp; ++i)
+                       if (page->free[i]) {
+                               j = ffs(page->free[i]);
+                               page->free[i] &= ~(1 << (j - 1));
+                               ah->av = (struct mthca_av *)((uint8_t*)page->buf +
+                                       (i * 8 * sizeof (int) + (j - 1)) * sizeof *ah->av);
+                               break;
+                       }
+
+               ah->key  = page->mr.lkey;
+               ah->page = page;
+
+               ReleaseMutex( pd->ah_mutex );
+       }
+
+       memset(ah->av, 0, sizeof *ah->av);
+
+       ah->av->port_pd = cl_hton32(pd->pdn | (attr->port_num << 24));
+       ah->av->g_slid  = attr->src_path_bits;
+       ah->av->dlid    = cl_hton16(attr->dlid);
+       ah->av->msg_sr  = (3 << 4) | /* 2K message */
+       attr->static_rate;
+       ah->av->sl_tclass_flowlabel = cl_hton32(attr->sl << 28);
+       if (attr->is_global) {
+               ah->av->g_slid |= 0x80;
+               /* XXX get gid_table length */
+               ah->av->gid_index = (attr->port_num - 1) * 32 +
+                       attr->grh.sgid_index;
+               ah->av->hop_limit = attr->grh.hop_limit;
+               ah->av->sl_tclass_flowlabel |=
+                       cl_hton32((attr->grh.traffic_class << 20) |
+                                   attr->grh.flow_label);
+               memcpy(ah->av->dgid, attr->grh.dgid.raw, 16);
+       } else {
+               /* Arbel workaround -- low byte of GID must be 2 */
+               ah->av->dgid[3] = cl_hton32(2);
+       }
+
+       return 0;
+}
+
+void mthca_free_av(struct mthca_ah *ah)
+{
+       if (mthca_is_memfree(ah->ibv_ah.pd->context)) {
+               cl_free(ah->av);
+       } else {
+               struct mthca_pd *pd = to_mpd(ah->ibv_ah.pd);
+               struct mthca_ah_page *page;
+               int i;
+
+               WaitForSingleObject( pd->ah_mutex, INFINITE );
+               page = ah->page;
+               i = ((uint8_t *)ah->av - (uint8_t *)page->buf) / sizeof *ah->av;
+               page->free[i / (8 * sizeof (int))] |= 1 << (i % (8 * sizeof (int)));
+
+               if (!--page->use_cnt) {
+                       if (page->prev)
+                               page->prev->next = page->next;
+                       else
+                               pd->ah_list = page->next;
+                       if (page->next)
+                               page->next->prev = page->prev;
+
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+                       cl_free(page->buf);
+#else
+                       VirtualFree( page->buf, 0, MEM_RELEASE);
+#endif
+                       
+                       cl_free(page);
+               }
+
+               ReleaseMutex( pd->ah_mutex );
+       }
+}
+
+//NB: temporary, for support of modify_qp
+void mthca_set_av_params(      struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
+{
+       struct mthca_av *av      = ah_p->av;
+       struct ibv_ah *ib_ah_p = (struct ibv_ah *)ah_p;
+       struct mthca_pd *pd = (struct mthca_pd *)ib_ah_p->pd;
+
+       // taken from mthca_alloc_av
+       //TODO: why cl_hton32 ?
+       av->port_pd = cl_hton32(pd->pdn | (ah_attr->port_num << 24));
+       av->g_slid      = ah_attr->src_path_bits;
+       //TODO: why cl_hton16 ?
+       av->dlid                = cl_hton16(ah_attr->dlid);
+       av->msg_sr      = (3 << 4) | /* 2K message */
+               ah_attr->static_rate;
+       //TODO: why cl_hton32 ?
+       av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28);
+       if (ah_attr->is_global) {
+               av->g_slid |= 0x80;
+               av->gid_index = (ah_attr->port_num - 1) * 32 +
+                       ah_attr->grh.sgid_index;
+               av->hop_limit = ah_attr->grh.hop_limit;
+               av->sl_tclass_flowlabel |= cl_hton32((ah_attr->grh.traffic_class << 20) |
+                       ah_attr->grh.flow_label);
+               memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
+       } else {
+               /* Arbel workaround -- low byte of GID must be 2 */
+               //TODO: why cl_hton32 ?
+               av->dgid[3] = cl_hton32(2);
+       }
+}
+
diff --git a/trunk/hw/mthca/user/mlnx_uvp_cq.c b/trunk/hw/mthca/user/mlnx_uvp_cq.c
new file mode 100644 (file)
index 0000000..d1b80d4
--- /dev/null
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: cq.c 4005 2005-11-09 20:17:19Z roland $
+ */
+
+#include <mt_l2w.h>
+
+#include <opcode.h>
+
+#include "mlnx_uvp.h"
+#include "mlnx_uvp_debug.h"
+#if defined(EVENT_TRACING)
+#include "mlnx_uvp_cq.tmh"
+#endif
+
+#include "mlnx_uvp_doorbell.h"
+
+enum {
+       MTHCA_CQ_DOORBELL       = 0x20
+};
+
+enum {
+       CQ_OK           =  0,
+       CQ_EMPTY        = -1,
+       CQ_POLL_ERR     = -2
+};
+
+#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)
+#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
+
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
+
+enum {
+       MTHCA_CQ_ENTRY_OWNER_SW     = 0x00,
+       MTHCA_CQ_ENTRY_OWNER_HW     = 0x80,
+       MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
+};
+
+enum {
+       SYNDROME_LOCAL_LENGTH_ERR        = 0x01,
+       SYNDROME_LOCAL_QP_OP_ERR         = 0x02,
+       SYNDROME_LOCAL_EEC_OP_ERR        = 0x03,
+       SYNDROME_LOCAL_PROT_ERR          = 0x04,
+       SYNDROME_WR_FLUSH_ERR            = 0x05,
+       SYNDROME_MW_BIND_ERR             = 0x06,
+       SYNDROME_BAD_RESP_ERR            = 0x10,
+       SYNDROME_LOCAL_ACCESS_ERR        = 0x11,
+       SYNDROME_REMOTE_INVAL_REQ_ERR    = 0x12,
+       SYNDROME_REMOTE_ACCESS_ERR       = 0x13,
+       SYNDROME_REMOTE_OP_ERR           = 0x14,
+       SYNDROME_RETRY_EXC_ERR           = 0x15,
+       SYNDROME_RNR_RETRY_EXC_ERR       = 0x16,
+       SYNDROME_LOCAL_RDD_VIOL_ERR      = 0x20,
+       SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
+       SYNDROME_REMOTE_ABORTED_ERR      = 0x22,
+       SYNDROME_INVAL_EECN_ERR          = 0x23,
+       SYNDROME_INVAL_EEC_STATE_ERR     = 0x24
+};
+
+struct mthca_cqe {
+       uint32_t        my_qpn;
+       uint32_t        my_ee;
+       uint32_t        rqpn;
+       uint16_t        sl_g_mlpath;
+       uint16_t        rlid;
+       uint32_t        imm_etype_pkey_eec;
+       uint32_t        byte_cnt;
+       uint32_t        wqe;
+       uint8_t         opcode;
+       uint8_t         is_send;
+       uint8_t         reserved;
+       uint8_t         owner;
+};
+
+struct mthca_err_cqe {
+       uint32_t        my_qpn;
+       uint32_t        reserved1[3];
+       uint8_t         syndrome;
+       uint8_t         vendor_err;
+       uint16_t        db_cnt;
+       uint32_t        reserved2;
+       uint32_t        wqe;
+       uint8_t         opcode;
+       uint8_t         reserved3[2];
+       uint8_t         owner;
+};
+
+static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
+{
+       return (struct mthca_cqe *)((uint8_t*)cq->buf + entry * MTHCA_CQ_ENTRY_SIZE);
+}
+
+static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
+{
+       struct mthca_cqe *cqe = get_cqe(cq, i);
+       return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
+}
+
+static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
+{
+       return cqe_sw(cq, cq->cons_index & cq->ibv_cq.cqe);
+}
+
+static inline void set_cqe_hw(struct mthca_cqe *cqe)
+{
+       cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
+}
+
+/*
+ * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
+ * should be correct before calling update_cons_index().
+ */
+static inline void update_cons_index(struct mthca_cq *cq, int incr)
+{
+       uint32_t doorbell[2];
+
+       if (mthca_is_memfree(cq->ibv_cq.context)) {
+               *cq->set_ci_db = cl_hton32(cq->cons_index);
+               mb();
+       } else {
+               doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
+               doorbell[1] = cl_hton32(incr - 1);
+
+               mthca_write64(doorbell, to_mctx(cq->ibv_cq.context), MTHCA_CQ_DOORBELL);
+       }
+}
+
+
+static void dump_cqe(void *cqe_ptr)
+{
+       uint32_t *cqe = cqe_ptr;
+       int i;
+       (void) cqe;     /* avoid warning if mthca_dbg compiled away... */
+
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_CQ,("CQE content \n "));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_CQ,(" [%2x] %08x %08x %08x %08x \n",0
+               , cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3])));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_CQ,(" [%2x] %08x %08x %08x %08x\n",16
+               , cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7])));
+       
+}
+
+static int handle_error_cqe(struct mthca_cq *cq,
+                           struct mthca_qp *qp, int wqe_index, int is_send,
+                           struct mthca_err_cqe *cqe,
+                           struct _ib_wc *entry, int *free_cqe)
+{
+       int err;
+       int dbd;
+       uint32_t new_wqe;
+
+       if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
+               UVP_PRINT(TRACE_LEVEL_ERROR , UVP_DBG_CQ,("local QP operation err "
+                      "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
+                      cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe),
+                      cq->cqn, cq->cons_index));
+               dump_cqe(cqe);
+       }
+
+       /*
+        * For completions in error, only work request ID, status, vendor error
+        * (and freed resource count for RD) have to be set.
+        */
+       switch (cqe->syndrome) {
+       case SYNDROME_LOCAL_LENGTH_ERR:
+               entry->status = IB_WCS_LOCAL_LEN_ERR;
+               break;
+       case SYNDROME_LOCAL_QP_OP_ERR:
+               entry->status = IB_WCS_LOCAL_OP_ERR;
+               break;
+       case SYNDROME_LOCAL_EEC_OP_ERR:
+               entry->status = IB_WCS_LOCAL_EEC_OP_ERR;
+               break;
+       case SYNDROME_LOCAL_PROT_ERR:
+               entry->status = IB_WCS_LOCAL_PROTECTION_ERR;
+               break;
+       case SYNDROME_WR_FLUSH_ERR:
+               entry->status = IB_WCS_WR_FLUSHED_ERR;
+               break;
+       case SYNDROME_MW_BIND_ERR:
+               entry->status = IB_WCS_MEM_WINDOW_BIND_ERR;
+               break;
+       case SYNDROME_BAD_RESP_ERR:
+               entry->status = IB_WCS_BAD_RESP_ERR;
+               break;
+       case SYNDROME_LOCAL_ACCESS_ERR:
+               entry->status = IB_WCS_LOCAL_ACCESS_ERR;
+               break;
+       case SYNDROME_REMOTE_INVAL_REQ_ERR:
+               entry->status = IB_WCS_REM_INV_REQ_ERR;
+               break;
+       case SYNDROME_REMOTE_ACCESS_ERR:
+               entry->status = IB_WCS_REM_ACCESS_ERR;
+               break;
+       case SYNDROME_REMOTE_OP_ERR:
+               entry->status = IB_WCS_REM_OP_ERR;
+               break;
+       case SYNDROME_RETRY_EXC_ERR:
+               entry->status = IB_WCS_TIMEOUT_RETRY_ERR;
+               break;
+       case SYNDROME_RNR_RETRY_EXC_ERR:
+               entry->status = IB_WCS_RNR_RETRY_ERR;
+               break;
+       case SYNDROME_LOCAL_RDD_VIOL_ERR:
+               entry->status = IB_WCS_LOCAL_RDD_VIOL_ERR;
+               break;
+       case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
+               entry->status = IB_WCS_REM_INVALID_REQ_ERR;
+               break;
+       case SYNDROME_REMOTE_ABORTED_ERR:
+               entry->status = IB_WCS_REM_ABORT_ERR;
+               break;
+       case SYNDROME_INVAL_EECN_ERR:
+               entry->status = IB_WCS_INV_EECN_ERR;
+               break;
+       case SYNDROME_INVAL_EEC_STATE_ERR:
+               entry->status = IB_WCS_INV_EEC_STATE_ERR;
+               break;
+       default:
+               entry->status = IB_WCS_GENERAL_ERR;
+               break;
+       }
+
+       entry->vendor_specific = cqe->vendor_err;
+       
+       /*
+        * Mem-free HCAs always generate one CQE per WQE, even in the
+        * error case, so we don't have to check the doorbell count, etc.
+        */
+       if (mthca_is_memfree(cq->ibv_cq.context))
+               return 0;
+
+       err = mthca_free_err_wqe(qp, is_send, wqe_index, &dbd, &new_wqe);
+       if (err)
+               return err;
+
+       /*
+        * If we're at the end of the WQE chain, or we've used up our
+        * doorbell count, free the CQE.  Otherwise just update it for
+        * the next poll operation.
+        * 
+        * This doesn't apply to mem-free HCAs, which never use the
+        * doorbell count field.  In that case we always free the CQE.
+        */
+       if (mthca_is_memfree(cq->ibv_cq.context) ||
+           !(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))
+               return 0;
+
+       cqe->db_cnt   = cl_hton16(cl_ntoh16(cqe->db_cnt) - dbd);
+       cqe->wqe      = new_wqe;
+       cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
+
+       *free_cqe = 0;
+
+       return 0;
+}
+
+static inline int mthca_poll_one(struct mthca_cq *cq,
+                                struct mthca_qp **cur_qp,
+                                int *freed,
+                                struct _ib_wc *entry)
+{
+       struct mthca_wq *wq;
+       struct mthca_cqe *cqe;
+       uint32_t qpn;
+       int wqe_index;
+       int is_error;
+       int is_send;
+       int free_cqe = 1;
+       int err = 0;
+
+       UVP_ENTER(UVP_DBG_CQ);
+       
+       cqe = next_cqe_sw(cq);
+       if (!cqe)
+               return -EAGAIN;
+
+       /*
+        * Make sure we read CQ entry contents after we've checked the
+        * ownership bit.
+        */
+       rmb();
+
+       if(0){
+               UVP_PRINT(TRACE_LEVEL_VERBOSE,UVP_DBG_CQ,("%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
+                         cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn),
+                         cl_ntoh32(cqe->wqe)));
+               dump_cqe(cqe);
+       }
+       
+       qpn = cl_ntoh32(cqe->my_qpn);
+
+       is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
+               MTHCA_ERROR_CQE_OPCODE_MASK;
+       is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
+
+       if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->ibv_qp.qp_num) {
+               /*
+                * We do not have to take the QP table lock here,
+                * because CQs will be locked while QPs are removed
+                * from the table.
+                */
+               *cur_qp = mthca_find_qp(to_mctx(cq->ibv_cq.context), cl_ntoh32(cqe->my_qpn));
+               if (!*cur_qp) {
+                       UVP_PRINT(TRACE_LEVEL_WARNING,UVP_DBG_CQ, ("CQ entry for unknown QP %06x\n",
+                                  cl_ntoh32(cqe->my_qpn) & 0xffffff));
+                       err = -EINVAL;
+                       goto out;
+               }
+       }
+
+       entry->qp_num = (*cur_qp)->ibv_qp.qp_num;
+
+       if (is_send) {
+               wq = &(*cur_qp)->sq;
+               wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset) >> wq->wqe_shift);
+               entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max];
+       } else if ((*cur_qp)->ibv_qp.srq) {
+               struct mthca_srq * srq = to_msrq((*cur_qp)->ibv_qp.srq);
+               uint32_t wqe = cl_hton32(cqe->wqe);
+               wq = NULL;
+               wqe_index = wqe >> srq->wqe_shift;
+               entry->wr_id = srq->wrid[wqe_index];
+               mthca_free_srq_wqe(srq, wqe_index);
+       } else {
+               wq = &(*cur_qp)->rq;
+               wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift;
+               entry->wr_id = (*cur_qp)->wrid[wqe_index];
+       }
+
+       if (wq) {
+               if ((int)wq->last_comp < wqe_index)
+                       wq->tail += wqe_index - wq->last_comp;
+               else
+                       wq->tail += wqe_index + wq->max - wq->last_comp;
+
+               wq->last_comp = wqe_index;
+       }
+
+       if (is_error) {
+               err = handle_error_cqe(cq, *cur_qp, wqe_index, is_send,
+                                      (struct mthca_err_cqe *) cqe,
+                                      entry, &free_cqe);
+               goto out;
+       }
+
+       if (is_send) {
+               entry->recv.ud.recv_opt = 0;
+               switch (cqe->opcode) {
+               case MTHCA_OPCODE_RDMA_WRITE:
+                       entry->wc_type    = IB_WC_RDMA_WRITE;
+                       break;
+               case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                       entry->wc_type    = IB_WC_RDMA_WRITE;
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
+                       break;
+               case MTHCA_OPCODE_SEND:
+                       entry->wc_type    = IB_WC_SEND;
+                       break;
+               case MTHCA_OPCODE_SEND_IMM:
+                       entry->wc_type    = IB_WC_SEND;
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
+                       break;
+               case MTHCA_OPCODE_RDMA_READ:
+                       entry->wc_type    = IB_WC_RDMA_READ;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_ATOMIC_CS:
+                       entry->wc_type    = IB_WC_COMPARE_SWAP;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_ATOMIC_FA:
+                       entry->wc_type    = IB_WC_FETCH_ADD;
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);
+                       break;
+               case MTHCA_OPCODE_BIND_MW:
+                       entry->wc_type    = IB_WC_MW_BIND;
+                       break;
+               default:
+                       /* assume it's a send completion */
+                       entry->wc_type    = IB_WC_SEND;
+                       break;
+               }
+       } else {
+               entry->length = cl_ntoh32(cqe->byte_cnt);
+               switch (cqe->opcode & 0x1f) {
+               case IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE:
+               case IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
+                       entry->recv.ud.recv_opt  = IB_RECV_OPT_IMMEDIATE;
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
+                       entry->wc_type = IB_WC_RECV;
+                       break;
+               case IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+               case IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+                       entry->recv.ud.recv_opt  = IB_RECV_OPT_IMMEDIATE;
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
+                       entry->wc_type = IB_WC_RECV;
+                       break;
+               default:
+                       entry->recv.ud.recv_opt  = 0;
+                       entry->wc_type = IB_WC_RECV;
+                       break;
+               }
+               entry->recv.ud.remote_lid = cqe->rlid;
+               entry->recv.ud.remote_qp = cqe->rqpn & 0xffffff00;
+               entry->recv.ud.pkey_index     = (uint16_t)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16);
+               entry->recv.ud.remote_sl           = cl_ntoh16(cqe->sl_g_mlpath) >> 12;
+               entry->recv.ud.path_bits = cl_ntoh16(cqe->sl_g_mlpath) & 0x7f;
+               entry->recv.ud.recv_opt      |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ?
+                       IB_RECV_OPT_GRH_VALID : 0;
+       }
+
+       entry->status = IB_WCS_SUCCESS;
+
+out:
+       if (likely(free_cqe)) {
+               set_cqe_hw(cqe);
+               ++(*freed);
+               ++cq->cons_index;
+       }
+
+       UVP_EXIT(UVP_DBG_CQ);
+       return err;
+}
+
+int mthca_poll_cq(struct ibv_cq *ibcq, int num_entries, struct _ib_wc *entry)
+{
+       struct mthca_cq *cq = to_mcq(ibcq);
+       struct mthca_qp *qp = NULL;
+       int err = CQ_OK;
+       int freed = 0;
+       int npolled;
+       
+       cl_spinlock_acquire(&cq->lock);
+
+       for (npolled = 0; npolled < num_entries; ++npolled) {
+               err = mthca_poll_one(cq, &qp, &freed, entry + npolled);
+               if (err)
+                       break;
+       }
+
+       if (freed) {
+               wmb();
+               update_cons_index(cq, freed);
+       }
+
+       cl_spinlock_release(&cq->lock);
+
+       return (err == 0 || err == -EAGAIN) ? npolled : err;
+}
+
+int mthca_poll_cq_list(
+       IN              struct ibv_cq *ibcq, 
+       IN      OUT                     struct _ib_wc** const                           pp_free_wclist,
+               OUT                     struct _ib_wc** const                           pp_done_wclist )
+{
+       struct mthca_cq *cq = to_mcq(ibcq);
+       struct mthca_qp *qp = NULL;
+       int err = CQ_OK;
+       int freed = 0;
+       ib_wc_t         *wc_p, **next_pp;
+       uint32_t        wc_cnt = 0;
+
+       cl_spinlock_acquire(&cq->lock);
+
+       // loop through CQ
+       next_pp = pp_done_wclist;
+       wc_p = *pp_free_wclist;
+       while( wc_p ) {
+               // poll one CQE
+               err = mthca_poll_one(cq, &qp, &freed, wc_p);
+               if (err)
+                       break;
+
+               // prepare for the next loop
+               *next_pp = wc_p;
+               next_pp = &wc_p->p_next;
+               wc_p = wc_p->p_next;
+       }
+
+       // prepare the results
+       *pp_free_wclist = wc_p;         /* Set the head of the free list. */
+       *next_pp = NULL;                                                /* Clear the tail of the done list. */
+
+       // update consumer index
+       if (freed) {
+               wmb();
+               update_cons_index(cq, freed);
+       }
+
+       cl_spinlock_release(&cq->lock);
+       return (err == 0 || err == -EAGAIN)? 0 : err; 
+}
+
+int mthca_tavor_arm_cq(struct ibv_cq *cq, enum ib_cq_notify notify)
+{
+       uint32_t doorbell[2];
+
+       doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ?
+                            MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
+                            MTHCA_TAVOR_CQ_DB_REQ_NOT)      |
+                           to_mcq(cq)->cqn);
+       doorbell[1] = 0xffffffff;
+
+       mthca_write64(doorbell, to_mctx(cq->context), MTHCA_CQ_DOORBELL);
+
+       return 0;
+}
+
+int mthca_arbel_arm_cq(struct ibv_cq *ibvcq, enum ib_cq_notify notify)
+{
+       struct mthca_cq *cq = to_mcq(ibvcq);
+       uint32_t doorbell[2];
+       uint32_t sn;
+       uint32_t ci;
+
+       sn = cq->arm_sn & 3;
+       ci = cl_hton32(cq->cons_index);
+
+       doorbell[0] = ci;
+       doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
+                           (notify == IB_CQ_SOLICITED ? 1 : 2));
+
+       mthca_write_db_rec(doorbell, cq->arm_db);
+
+       /*
+        * Make sure that the doorbell record in host memory is
+        * written before ringing the doorbell via PCI MMIO.
+        */
+       wmb();
+
+       doorbell[0] = cl_hton32((sn << 28)                       |
+                           (notify == IB_CQ_SOLICITED ?
+                            MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
+                            MTHCA_ARBEL_CQ_DB_REQ_NOT)      |
+                           cq->cqn);
+       doorbell[1] = ci;
+
+       mthca_write64(doorbell, to_mctx(ibvcq->context), MTHCA_CQ_DOORBELL);
+
+       return 0;
+}
+
+void mthca_arbel_cq_event(struct ibv_cq *cq)
+{
+       to_mcq(cq)->arm_sn++;
+}
+
+static inline int is_recv_cqe(struct mthca_cqe *cqe)
+{
+       if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
+           MTHCA_ERROR_CQE_OPCODE_MASK)
+               return !(cqe->opcode & 0x01);
+       else
+               return !(cqe->is_send & 0x80);
+}
+
+void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn, struct mthca_srq *srq)
+{
+       struct mthca_cqe *cqe;
+       uint32_t prod_index;
+       int nfreed = 0;
+
+       cl_spinlock_acquire(&cq->lock);
+
+       /*
+        * First we need to find the current producer index, so we
+        * know where to start cleaning from.  It doesn't matter if HW
+        * adds new entries after this loop -- the QP we're worried
+        * about is already in RESET, so the new entries won't come
+        * from our QP and therefore don't need to be checked.
+        */
+       for (prod_index = cq->cons_index;
+            cqe_sw(cq, prod_index & cq->ibv_cq.cqe);
+            ++prod_index)
+               if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
+                       break;
+
+       /*
+        * Now sweep backwards through the CQ, removing CQ entries
+        * that match our QP by copying older entries on top of them.
+        */
+       while ((int) --prod_index - (int) cq->cons_index >= 0) {
+               cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
+               if (cqe->my_qpn == cl_hton32(qpn)) {
+                       if (srq && is_recv_cqe(cqe))
+                               mthca_free_srq_wqe(srq,
+                                                  cl_ntoh32(cqe->wqe) >> srq->wqe_shift);
+                       ++nfreed;
+               } else if (nfreed)
+                       memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe),
+                              cqe, MTHCA_CQ_ENTRY_SIZE);
+       }
+
+       if (nfreed) {
+               mb();
+               cq->cons_index += nfreed;
+               update_cons_index(cq, nfreed);
+       }
+
+       cl_spinlock_release(&cq->lock);
+}
+
+void mthca_init_cq_buf(struct mthca_cq *cq, int nent)
+{
+       int i;
+
+       for (i = 0; i < nent; ++i)
+               set_cqe_hw(get_cqe(cq, i));
+
+       cq->cons_index = 0;
+}
diff --git a/trunk/hw/mthca/user/mlnx_uvp_debug.c b/trunk/hw/mthca/user/mlnx_uvp_debug.c
new file mode 100644 (file)
index 0000000..3fc7113
--- /dev/null
@@ -0,0 +1,85 @@
+/*\r
+ * Copyright (c) 2005 Mellanox Technologies LTD.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ */\r
+\r
+// Author: Yossi Leybovich \r
+\r
+#include "mlnx_uvp_debug.h"\r
+#include <stdio.h> \r
+#include <stdarg.h>\r
+#include  <strsafe.h>\r
+\r
+#if !defined(EVENT_TRACING)\r
+\r
+\r
+#if DBG \r
+uint32_t g_mlnx_dbg_level = TRACE_LEVEL_WARNING;\r
+uint32_t g_mlnx_dbg_flags= UVP_DBG_QP | UVP_DBG_CQ|UVP_DBG_MEMORY;\r
+#endif\r
+\r
+VOID\r
+_UVP_PRINT(\r
+    IN char*   msg,\r
+    ...\r
+    )\r
+\r
+ {\r
+#if DBG\r
+#define     TEMP_BUFFER_SIZE        1024\r
+    va_list    list;\r
+    UCHAR      debugMessageBuffer[TEMP_BUFFER_SIZE];\r
+    HRESULT result;\r
+    \r
+    va_start(list, msg);\r
+    \r
+    if (msg) {\r
+\r
+        //\r
+        // Using new safe string functions instead of _vsnprintf. This function takes\r
+        // care of NULL terminating if the message is longer than the buffer.\r
+        //\r
+        \r
+        result = StringCbVPrintfA (debugMessageBuffer, sizeof(debugMessageBuffer), \r
+                                    msg, list);\r
+        if(((HRESULT)(result) < 0)) {\r
+            \r
+            OutputDebugString (": StringCbVPrintfA failed \n");\r
+            return;\r
+        }\r
+        OutputDebugString ( debugMessageBuffer);\r
+\r
+    }\r
+    va_end(list);\r
+\r
+    return;\r
+#endif //DBG\r
+}\r
+\r
+#endif //EVENT_TRACING\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_uvp_debug.h b/trunk/hw/mthca/user/mlnx_uvp_debug.h
new file mode 100644 (file)
index 0000000..58e2b47
--- /dev/null
@@ -0,0 +1,145 @@
+/*\r
+ * Copyright (c) 2005 Mellanox Technologies.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: mlnx_uvp_debug.h 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#ifndef  _MLNX_UVP_DEBUG_H_\r
+#define _MLNX_UVP_DEBUG_H_\r
+\r
+#include <complib/cl_debug.h>\r
+\r
+extern uint32_t                g_mlnx_dbg_level;\r
+extern uint32_t                g_mlnx_dbg_flags;\r
+\r
+\r
+#if defined(EVENT_TRACING)\r
+//\r
+// Software Tracing Definitions \r
+//\r
+//\r
+\r
+#define WPP_CONTROL_GUIDS \\r
+       WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(2C718E52,0D36,4bda,9E58,0FC601818D8F),  \\r
+       WPP_DEFINE_BIT( UVP_DBG_DEV) \\r
+       WPP_DEFINE_BIT( UVP_DBG_PNP) \\r
+       WPP_DEFINE_BIT( UVP_DBG_MAD) \\r
+       WPP_DEFINE_BIT( UVP_DBG_PO) \\r
+       WPP_DEFINE_BIT( UVP_DBG_CQ) \\r
+       WPP_DEFINE_BIT( UVP_DBG_QP) \\r
+       WPP_DEFINE_BIT( UVP_DBG_MEMORY) \\r
+       WPP_DEFINE_BIT( UVP_DBG_AV) \\r
+       WPP_DEFINE_BIT( UVP_DBG_SEND) \\r
+       WPP_DEFINE_BIT( UVP_DBG_RECV) \\r
+       WPP_DEFINE_BIT( UVP_DBG_LOW) \\r
+       WPP_DEFINE_BIT( UVP_DBG_SHIM))\r
+\r
+\r
+#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level  >= lvl)\r
+#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags)\r
+#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level  >= TRACE_LEVEL_VERBOSE)\r
+#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags)\r
+\r
+\r
+// begin_wpp config\r
+// UVP_ENTER(FLAG);\r
+// UVP_EXIT(FLAG);\r
+// USEPREFIX(UVP_PRINT, "%!FUNC!()  ");\r
+// USESUFFIX(UVP_ENTER, "%!FUNC!===>");\r
+// USESUFFIX(UVP_EXIT, "%!FUNC!<===");\r
+// end_wpp\r
+\r
+\r
+\r
+#else\r
+\r
+#include <wmistr.h>\r
+#include <evntrace.h>\r
+\r
+/*\r
+ * Debug macros\r
+ */\r
+\r
+\r
+#define UVP_DBG_DEV    (1 << 0)\r
+#define UVP_DBG_PNP    (1 << 1)\r
+#define UVP_DBG_MAD    (1 << 2)\r
+#define UVP_DBG_PO     (1 << 3)\r
+#define UVP_DBG_QP     (1 << 4)\r
+#define UVP_DBG_CQ     (1 << 5)\r
+#define UVP_DBG_MEMORY (1 << 6)\r
+#define UVP_DBG_AV     (1 << 7)\r
+#define UVP_DBG_SEND   (1 << 8)\r
+#define UVP_DBG_RECV   (1 << 9)\r
+#define UVP_DBG_LOW    (1 << 10)\r
+#define UVP_DBG_SHIM   (1 << 11)\r
+\r
+\r
+VOID\r
+       _UVP_PRINT(\r
+       IN char* msg,\r
+       ...);\r
+\r
+#if DBG\r
+\r
+#define UVP_PRINT(_level_,_flags_,_msg_)  \\r
+       if (_level_ <= g_mlnx_dbg_level && \\r
+                ((_flags_ & g_mlnx_dbg_flags) == _flags_)){\\r
+                _UVP_PRINT("[UVP] %s():",__FUNCTION__);\\r
+                if(_level_ == TRACE_LEVEL_ERROR) _UVP_PRINT ("***ERROR***  ");\\r
+               _UVP_PRINT _msg_  ;     \\r
+       }\r
+       \r
+\r
+//\r
+#else\r
+\r
+#define UVP_PRINT(lvl ,flags, msg) \r
+\r
+#endif\r
+\r
+\r
+#define UVP_ENTER(flags)\\r
+       UVP_PRINT(TRACE_LEVEL_VERBOSE, flags,("===>\n"));\r
+\r
+#define UVP_EXIT(flags)\\r
+       UVP_PRINT(TRACE_LEVEL_VERBOSE, flags,("<===\n"));\r
+\r
+#define UVP_PRINT_EXIT(_level_,_flag_,_msg_)   \\r
+       {\\r
+               if (status != IB_SUCCESS) {\\r
+                       UVP_PRINT(_level_,_flag_,_msg_);\\r
+               }\\r
+               UVP_EXIT(_flag_);\\r
+       }\r
+\r
+#endif //EVENT_TRACING\r
+\r
+#endif /*_MLNX_UVP_DEBUG_H_ */\r
+\r
diff --git a/trunk/hw/mthca/user/mlnx_uvp_doorbell.h b/trunk/hw/mthca/user/mlnx_uvp_doorbell.h
new file mode 100644 (file)
index 0000000..0882ee1
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: doorbell.h 2166 2005-04-12 16:37:00Z roland $
+ */
+
+#ifndef DOORBELL_H
+#define DOORBELL_H
+
+#if defined (_WIN64)
+
+
+#ifdef __WRITE_QWORD_ATOMIC__
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#  define MTHCA_PAIR_TO_64(val) ((uint64_t) val[1] << 32 | val[0])
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#  define MTHCA_PAIR_TO_64(val) ((uint64_t) val[0] << 32 | val[1])
+#else
+#  error __BYTE_ORDER not defined
+#endif
+
+static inline void mthca_write64(uint32_t val[2], struct mthca_context *ctx, int offset)
+{
+       *(volatile uint64_t *) (ctx->uar + offset) = MTHCA_PAIR_TO_64(val);
+}
+
+static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db)
+{
+       *(volatile uint64_t *) db = MTHCA_PAIR_TO_64(val);
+}
+
+#else
+
+static inline void mthca_write64(uint32_t val[2], struct mthca_context *ctx, int offset)
+{
+       cl_spinlock_acquire(&ctx->uar_lock);
+       *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset)     = val[0];
+       *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset + 4) = val[1];
+       cl_spinlock_release(&ctx->uar_lock);
+}
+
+static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db)
+{
+       *(volatile uint32_t *) db       = val[0];
+       mb();
+       *(volatile uint32_t *) (db + 1) = val[1];
+}
+
+#endif
+
+#elif defined(_WIN32)
+
+static inline void mthca_write64(uint32_t val[2], struct mthca_context *ctx, int offset)
+{
+       volatile uint64_t *target_p = (volatile uint64_t*)((uint8_t*)ctx->uar + offset);
+
+       cl_spinlock_acquire(&ctx->uar_lock);
+       *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset)     = val[0];
+       *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset + 4) = val[1];
+       cl_spinlock_release(&ctx->uar_lock);
+
+       //TODO: can we save mm0 and not to use emms, as Linux do ?
+       //__asm movq mm0,val
+       //__asm movq target_p,mm0
+       //__asm emms
+}
+
+static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db)
+{
+       //TODO: can we save mm0 and not to use emms, as Linux do ?
+       __asm movq mm0,val
+       __asm movq db,mm0
+       __asm emms
+}
+#endif
+
+#endif /* MTHCA_H */
diff --git a/trunk/hw/mthca/user/mlnx_uvp_kern_abi.h b/trunk/hw/mthca/user/mlnx_uvp_kern_abi.h
new file mode 100644 (file)
index 0000000..165af57
--- /dev/null
@@ -0,0 +1,644 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: kern-abi.h 4019 2005-11-11 00:33:09Z sean.hefty $
+ */
+
+#ifndef KERN_ABI_H
+#define KERN_ABI_H
+
+/*
+ * This file must be kept in sync with the kernel's version of
+ * drivers/infiniband/include/ib_user_verbs.h
+ */
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * Specifically:
+ *  - Do not use pointer types -- pass pointers in uint64_t instead.
+ *  - Make sure that any structure larger than 4 bytes is padded to a
+ *    multiple of 8 bytes.  Otherwise the structure size will be
+ *    different between 32-bit and 64-bit architectures.
+ */
+
+struct ibv_kern_async_event {
+       uint64_t element;
+       uint32_t event_type;
+       uint32_t reserved;
+};
+
+struct ibv_comp_event {
+       uint64_t cq_handle;
+};
+
+/*
+ * All commands from userspace should start with a uint32_t command field
+ * followed by uint16_t in_words and out_words fields (which give the
+ * length of the command block and response buffer if any in 32-bit
+ * words).  The kernel driver will read these fields first and read
+ * the rest of the command struct based on these value.
+ */
+
+struct ibv_query_params {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+};
+
+struct ibv_query_params_resp {
+       uint32_t num_cq_events;
+};
+
+struct ibv_get_context_resp {
+       uint64_t uar_addr;
+       uint64_t pd_handle;
+       uint32_t pdn;
+       uint32_t qp_tab_size;
+       uint32_t uarc_size;
+       uint32_t vend_id;
+       uint16_t dev_id;
+};
+
+struct ibv_query_device {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint64_t driver_data[0];
+};
+
+struct ibv_query_device_resp {
+       uint64_t fw_ver;
+       uint64_t node_guid;
+       uint64_t sys_image_guid;
+       uint64_t max_mr_size;
+       uint64_t page_size_cap;
+       uint32_t vendor_id;
+       uint32_t vendor_part_id;
+       uint32_t hw_ver;
+       uint32_t max_qp;
+       uint32_t max_qp_wr;
+       uint32_t device_cap_flags;
+       uint32_t max_sge;
+       uint32_t max_sge_rd;
+       uint32_t max_cq;
+       uint32_t max_cqe;
+       uint32_t max_mr;
+       uint32_t max_pd;
+       uint32_t max_qp_rd_atom;
+       uint32_t max_ee_rd_atom;
+       uint32_t max_res_rd_atom;
+       uint32_t max_qp_init_rd_atom;
+       uint32_t max_ee_init_rd_atom;
+       uint32_t atomic_cap;
+       uint32_t max_ee;
+       uint32_t max_rdd;
+       uint32_t max_mw;
+       uint32_t max_raw_ipv6_qp;
+       uint32_t max_raw_ethy_qp;
+       uint32_t max_mcast_grp;
+       uint32_t max_mcast_qp_attach;
+       uint32_t max_total_mcast_qp_attach;
+       uint32_t max_ah;
+       uint32_t max_fmr;
+       uint32_t max_map_per_fmr;
+       uint32_t max_srq;
+       uint32_t max_srq_wr;
+       uint32_t max_srq_sge;
+       uint16_t max_pkeys;
+       uint8_t  local_ca_ack_delay;
+       uint8_t  phys_port_cnt;
+       uint8_t  reserved[4];
+};
+
+struct ibv_query_port {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint8_t  port_num;
+       uint8_t  reserved[7];
+       uint64_t driver_data[0];
+};
+
+struct ibv_query_port_resp {
+       uint32_t port_cap_flags;
+       uint32_t max_msg_sz;
+       uint32_t bad_pkey_cntr;
+       uint32_t qkey_viol_cntr;
+       uint32_t gid_tbl_len;
+       uint16_t pkey_tbl_len;
+       uint16_t lid;
+       uint16_t sm_lid;
+       uint8_t  state;
+       uint8_t  max_mtu;
+       uint8_t  active_mtu;
+       uint8_t  lmc;
+       uint8_t  max_vl_num;
+       uint8_t  sm_sl;
+       uint8_t  subnet_timeout;
+       uint8_t  init_type_reply;
+       uint8_t  active_width;
+       uint8_t  active_speed;
+       uint8_t  phys_state;
+       uint8_t  reserved[3];
+};
+
+struct ibv_alloc_pd_resp {
+       uint64_t pd_handle;
+       uint32_t pdn;
+       uint32_t reserved;
+};
+
+struct ibv_reg_mr {
+       uint64_t start;
+       uint64_t length;
+       uint64_t hca_va;
+       uint32_t access_flags;
+       uint32_t pdn;
+       uint64_t pd_handle;
+};
+
+struct ibv_reg_mr_resp {
+       uint64_t mr_handle;
+       uint32_t lkey;
+       uint32_t rkey;
+};
+
+struct ibv_create_cq {
+       struct ibv_reg_mr mr;   
+       uint64_t arm_db_page;
+       uint64_t set_db_page;
+       uint32_t arm_db_index;
+       uint32_t set_db_index;
+       uint64_t user_handle;
+       uint32_t cqe;
+       uint32_t lkey;          /* used only by kernel */
+};
+struct ibv_create_cq_resp {
+       uint64_t user_handle;
+       uint64_t cq_handle;
+       struct ibv_reg_mr_resp mr;
+       uint32_t cqe;
+       uint32_t cqn;
+};
+
+struct ibv_kern_wc {
+       uint64_t  wr_id;
+       uint32_t  status;
+       uint32_t  opcode;
+       uint32_t  vendor_err;
+       uint32_t  byte_len;
+       uint32_t  imm_data;
+       uint32_t  qp_num;
+       uint32_t  src_qp;
+       uint32_t  wc_flags;
+       uint16_t  pkey_index;
+       uint16_t  slid;
+       uint8_t   sl;
+       uint8_t   dlid_path_bits;
+       uint8_t   port_num;
+       uint8_t   reserved;
+};
+
+struct ibv_poll_cq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t cq_handle;
+       uint32_t ne;
+};
+
+struct ibv_poll_cq_resp {
+       uint32_t count;
+       uint32_t reserved;
+       struct ibv_kern_wc wc[];
+};
+
+struct ibv_req_notify_cq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t cq_handle;
+       uint32_t solicited;
+};
+
+struct ibv_destroy_cq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t cq_handle;
+       uint32_t reserved;
+};
+
+struct ibv_destroy_cq_resp {
+       uint32_t comp_events_reported;
+       uint32_t async_events_reported;
+};
+
+struct ibv_kern_global_route {
+       uint8_t  dgid[16];
+       uint32_t flow_label;
+       uint8_t  sgid_index;
+       uint8_t  hop_limit;
+       uint8_t  traffic_class;
+       uint8_t  reserved;
+};
+
+struct ibv_kern_ah_attr {
+       struct ibv_kern_global_route grh;
+       uint16_t dlid;
+       uint8_t  sl;
+       uint8_t  src_path_bits;
+       uint8_t  static_rate;
+       uint8_t  is_global;
+       uint8_t  port_num;
+       uint8_t  reserved;
+};
+
+struct ibv_kern_qp_attr {
+       uint32_t        qp_attr_mask;
+       uint32_t        qp_state;
+       uint32_t        cur_qp_state;
+       uint32_t        path_mtu;
+       uint32_t        path_mig_state;
+       uint32_t        qkey;
+       uint32_t        rq_psn;
+       uint32_t        sq_psn;
+       uint32_t        dest_qp_num;
+       uint32_t        qp_access_flags;
+
+       struct ibv_kern_ah_attr ah_attr;
+       struct ibv_kern_ah_attr alt_ah_attr;
+
+       /* ib_qp_cap */
+       uint32_t        max_send_wr;
+       uint32_t        max_recv_wr;
+       uint32_t        max_send_sge;
+       uint32_t        max_recv_sge;
+       uint32_t        max_inline_data;
+
+       uint16_t        pkey_index;
+       uint16_t        alt_pkey_index;
+       uint8_t en_sqd_async_notify;
+       uint8_t sq_draining;
+       uint8_t max_rd_atomic;
+       uint8_t max_dest_rd_atomic;
+       uint8_t min_rnr_timer;
+       uint8_t port_num;
+       uint8_t timeout;
+       uint8_t retry_cnt;
+       uint8_t rnr_retry;
+       uint8_t alt_port_num;
+       uint8_t alt_timeout;
+       uint8_t reserved[5];
+};
+
+struct ibv_create_qp {
+       uint64_t sq_db_page;
+       uint64_t rq_db_page;
+       uint32_t sq_db_index;
+       uint32_t rq_db_index;
+       struct ibv_reg_mr mr;
+       uint64_t user_handle;
+       uint64_t send_cq_handle;
+       uint64_t recv_cq_handle;
+       uint64_t srq_handle;
+       uint32_t max_send_wr;
+       uint32_t max_recv_wr;
+       uint32_t max_send_sge;
+       uint32_t max_recv_sge;
+       uint32_t max_inline_data;
+       uint32_t lkey;  /* used only in kernel */
+       uint8_t  sq_sig_all;
+       uint8_t  qp_type;
+       uint8_t  is_srq;
+       uint8_t  reserved[5];
+};
+
+struct ibv_create_qp_resp {
+       struct ibv_reg_mr_resp mr;
+       uint64_t user_handle;
+       uint64_t qp_handle;
+       uint32_t qpn;
+       uint32_t max_send_wr;
+       uint32_t max_recv_wr;
+       uint32_t max_send_sge;
+       uint32_t max_recv_sge;
+       uint32_t max_inline_data;
+};
+
+struct ibv_modify_qp_resp {
+       enum ibv_qp_attr_mask attr_mask;
+       uint8_t qp_state;
+       uint8_t reserved[3];
+};
+
+struct ibv_kern_send_wr {
+       uint64_t wr_id;
+       uint32_t num_sge;
+       uint32_t opcode;
+       uint32_t send_flags;
+       uint32_t imm_data;
+       union {
+               struct {
+                       uint64_t remote_addr;
+                       uint32_t rkey;
+                       uint32_t reserved;
+               } rdma;
+               struct {
+                       uint64_t remote_addr;
+                       uint64_t compare_add;
+                       uint64_t swap;
+                       uint32_t rkey;
+                       uint32_t reserved;
+               } atomic;
+               struct {
+                       uint32_t ah;
+                       uint32_t remote_qpn;
+                       uint32_t remote_qkey;
+                       uint32_t reserved;
+               } ud;
+       } wr;
+};
+
+struct ibv_post_send {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t qp_handle;
+       uint32_t wr_count;
+       uint32_t sge_count;
+       uint32_t wqe_size;
+       struct ibv_kern_send_wr send_wr[];
+};
+
+struct ibv_post_send_resp {
+       uint32_t bad_wr;
+};
+
+struct ibv_kern_recv_wr {
+       uint64_t wr_id;
+       uint32_t num_sge;
+       uint32_t reserved;
+};
+
+struct ibv_post_recv {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t qp_handle;
+       uint32_t wr_count;
+       uint32_t sge_count;
+       uint32_t wqe_size;
+       struct ibv_kern_recv_wr recv_wr[];
+};
+
+struct ibv_post_recv_resp {
+       uint32_t bad_wr;
+};
+
+struct ibv_post_srq_recv {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t srq_handle;
+       uint32_t wr_count;
+       uint32_t sge_count;
+       uint32_t wqe_size;
+       struct ibv_kern_recv_wr recv_wr[];
+};
+
+struct ibv_post_srq_recv_resp {
+       uint32_t bad_wr;
+};
+
+struct ibv_create_ah {
+       struct ibv_reg_mr mr;   
+};
+
+struct ibv_create_ah_resp {
+       uint64_t start;
+       struct ibv_reg_mr_resp mr;
+       ib_av_attr_t            av_attr;
+};
+
+struct ibv_destroy_ah {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t ah_handle;
+};
+
+struct ibv_attach_mcast {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint8_t  gid[16];
+       uint32_t qp_handle;
+       uint16_t mlid;
+       uint16_t reserved;
+       uint64_t driver_data[];
+};
+
+struct ibv_detach_mcast {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint8_t  gid[16];
+       uint32_t qp_handle;
+       uint16_t mlid;
+       uint16_t reserved;
+       uint64_t driver_data[];
+};
+
+struct ibv_create_srq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint64_t user_handle;
+       uint32_t pd_handle;
+       uint32_t max_wr;
+       uint32_t max_sge;
+       uint32_t srq_limit;
+       uint64_t driver_data[];
+};
+
+struct ibv_create_srq_resp {
+       uint32_t srq_handle;
+};
+
+struct ibv_modify_srq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t srq_handle;
+       uint32_t attr_mask;
+       uint32_t max_wr;
+       uint32_t srq_limit;
+       uint64_t driver_data[];
+};
+
+struct ibv_destroy_srq {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint32_t srq_handle;
+       uint32_t reserved;
+};
+
+struct ibv_destroy_srq_resp {
+       uint32_t events_reported;
+};
+
+/*
+ * Compatibility with older ABI versions
+ */
+
+enum {
+       IB_USER_VERBS_CMD_QUERY_PARAMS_V2,
+       IB_USER_VERBS_CMD_GET_CONTEXT_V2,
+       IB_USER_VERBS_CMD_QUERY_DEVICE_V2,
+       IB_USER_VERBS_CMD_QUERY_PORT_V2,
+       IB_USER_VERBS_CMD_QUERY_GID_V2,
+       IB_USER_VERBS_CMD_QUERY_PKEY_V2,
+       IB_USER_VERBS_CMD_ALLOC_PD_V2,
+       IB_USER_VERBS_CMD_DEALLOC_PD_V2,
+       IB_USER_VERBS_CMD_CREATE_AH_V2,
+       IB_USER_VERBS_CMD_MODIFY_AH_V2,
+       IB_USER_VERBS_CMD_QUERY_AH_V2,
+       IB_USER_VERBS_CMD_DESTROY_AH_V2,
+       IB_USER_VERBS_CMD_REG_MR_V2,
+       IB_USER_VERBS_CMD_REG_SMR_V2,
+       IB_USER_VERBS_CMD_REREG_MR_V2,
+       IB_USER_VERBS_CMD_QUERY_MR_V2,
+       IB_USER_VERBS_CMD_DEREG_MR_V2,
+       IB_USER_VERBS_CMD_ALLOC_MW_V2,
+       IB_USER_VERBS_CMD_BIND_MW_V2,
+       IB_USER_VERBS_CMD_DEALLOC_MW_V2,
+       IB_USER_VERBS_CMD_CREATE_CQ_V2,
+       IB_USER_VERBS_CMD_RESIZE_CQ_V2,
+       IB_USER_VERBS_CMD_DESTROY_CQ_V2,
+       IB_USER_VERBS_CMD_POLL_CQ_V2,
+       IB_USER_VERBS_CMD_PEEK_CQ_V2,
+       IB_USER_VERBS_CMD_REQ_NOTIFY_CQ_V2,
+       IB_USER_VERBS_CMD_CREATE_QP_V2,
+       IB_USER_VERBS_CMD_QUERY_QP_V2,
+       IB_USER_VERBS_CMD_MODIFY_QP_V2,
+       IB_USER_VERBS_CMD_DESTROY_QP_V2,
+       IB_USER_VERBS_CMD_POST_SEND_V2,
+       IB_USER_VERBS_CMD_POST_RECV_V2,
+       IB_USER_VERBS_CMD_ATTACH_MCAST_V2,
+       IB_USER_VERBS_CMD_DETACH_MCAST_V2,
+       IB_USER_VERBS_CMD_CREATE_SRQ_V2,
+       IB_USER_VERBS_CMD_MODIFY_SRQ_V2,
+       IB_USER_VERBS_CMD_QUERY_SRQ_V2,
+       IB_USER_VERBS_CMD_DESTROY_SRQ_V2,
+       IB_USER_VERBS_CMD_POST_SRQ_RECV_V2,
+       /*
+        * Set commands that didn't exist to -1 so our compile-time
+        * trick opcodes in IBV_INIT_CMD() doesn't break.
+        */
+       IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL_V2 = -1,
+};
+
+struct ibv_destroy_cq_v1 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t cq_handle;
+};
+
+struct ibv_destroy_qp_v1 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t qp_handle;
+};
+
+struct ibv_destroy_srq_v1 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t srq_handle;
+};
+
+struct ibv_get_context_v2 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint64_t cq_fd_tab;
+       uint64_t driver_data[];
+};
+
+struct ibv_create_cq_v2 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint64_t response;
+       uint64_t user_handle;
+       uint32_t cqe;
+       uint32_t event_handler;
+       uint64_t driver_data[];
+};
+
+struct ibv_modify_srq_v3 {
+       uint32_t command;
+       uint16_t in_words;
+       uint16_t out_words;
+       uint32_t srq_handle;
+       uint32_t attr_mask;
+       uint32_t max_wr;
+       uint32_t max_sge;
+       uint32_t srq_limit;
+       uint32_t reserved;
+       uint64_t driver_data[];
+};
+
+struct ibv_create_qp_resp_v3 {
+       uint32_t qp_handle;
+       uint32_t qpn;
+};
+
+#endif /* KERN_ABI_H */
diff --git a/trunk/hw/mthca/user/mlnx_uvp_memfree.c b/trunk/hw/mthca/user/mlnx_uvp_memfree.c
new file mode 100644 (file)
index 0000000..32e6750
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: memfree.c 3283 2005-08-31 23:06:55Z roland $
+ */
+
+#include <mt_l2w.h>
+
+#include "mlnx_uvp.h"
+
+#define MTHCA_FREE_MAP_SIZE (MTHCA_DB_REC_PER_PAGE / BITS_PER_LONG)
+
+struct mthca_db_page {
+       unsigned long free[MTHCA_FREE_MAP_SIZE];
+       uint64_t     *db_rec;
+};
+
+struct mthca_db_table {
+       int                  npages;
+       int                  max_group1;
+       int                  min_group2;
+       HANDLE      mutex;
+       struct mthca_db_page page[];
+};
+
+int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,
+                  uint32_t **db)
+{
+       int i, j, k;
+       int group, start, end, dir;
+       int ret = 0;
+
+       WaitForSingleObject( db_tab->mutex, INFINITE );
+
+       switch (type) {
+       case MTHCA_DB_TYPE_CQ_ARM:
+       case MTHCA_DB_TYPE_SQ:
+               group = 0;
+               start = 0;
+               end   = db_tab->max_group1;
+               dir   = 1;
+               break;
+
+       case MTHCA_DB_TYPE_CQ_SET_CI:
+       case MTHCA_DB_TYPE_RQ:
+       case MTHCA_DB_TYPE_SRQ:
+               group = 1;
+               start = db_tab->npages - 1;
+               end   = db_tab->min_group2;
+               dir   = -1;
+               break;
+
+       default:
+               ret = -1;
+               goto out;
+       }
+
+       for (i = start; i != end; i += dir)
+               if (db_tab->page[i].db_rec)
+                       for (j = 0; j < MTHCA_FREE_MAP_SIZE; ++j)
+                               if (db_tab->page[i].free[j]) 
+                                       goto found;
+
+       if (db_tab->max_group1 >= db_tab->min_group2 - 1) {
+               ret = -1;
+               goto out;
+       }
+
+       if (posix_memalign((void **) &db_tab->page[i].db_rec, MTHCA_DB_REC_PAGE_SIZE,
+                          MTHCA_DB_REC_PAGE_SIZE)) {
+               ret = -1;
+               goto out;
+       }
+
+       memset(db_tab->page[i].db_rec, 0, MTHCA_DB_REC_PAGE_SIZE);
+       memset(db_tab->page[i].free, 0xff, sizeof db_tab->page[i].free);
+
+       if (group == 0)
+               ++db_tab->max_group1;
+       else
+               --db_tab->min_group2;
+
+found:
+       for (j = 0; j < MTHCA_FREE_MAP_SIZE; ++j) {
+               k = ffsl(db_tab->page[i].free[j]);
+               if (k)
+                       break;
+       }
+
+       if (!k) {
+               ret = -1;
+               goto out;
+       }
+
+       --k;
+       db_tab->page[i].free[j] &= ~(1UL << k);
+
+       j = j * BITS_PER_LONG + k;
+       if (group == 1)
+               j = MTHCA_DB_REC_PER_PAGE - 1 - j;
+
+       ret = i * MTHCA_DB_REC_PER_PAGE + j;
+       *db = (uint32_t *) &db_tab->page[i].db_rec[j];
+       
+out:
+       ReleaseMutex( db_tab->mutex );
+       return ret;
+}
+
+void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn)
+{
+       db[1] = cl_hton32((qn << 8) | (type << 5));
+}
+
+void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index)
+{
+       int i, j;
+       struct mthca_db_page *page;
+
+       i = db_index / MTHCA_DB_REC_PER_PAGE;
+       j = db_index % MTHCA_DB_REC_PER_PAGE;
+
+       page = db_tab->page + i;
+
+       WaitForSingleObject( db_tab->mutex, INFINITE );
+       page->db_rec[j] = 0;
+
+       if (i >= db_tab->min_group2)
+               j = MTHCA_DB_REC_PER_PAGE - 1 - j;
+
+       page->free[j / BITS_PER_LONG] |= 1UL << (j % BITS_PER_LONG);
+
+       ReleaseMutex( db_tab->mutex );
+}
+
+struct mthca_db_table *mthca_alloc_db_tab(int uarc_size)
+{
+       struct mthca_db_table *db_tab;
+       int npages;
+       int i;
+
+       npages = uarc_size / MTHCA_DB_REC_PAGE_SIZE;
+       db_tab = cl_malloc(sizeof (struct mthca_db_table) +
+                       npages * sizeof (struct mthca_db_page));
+       if (!db_tab)
+               goto err_malloc;
+
+       db_tab->mutex = CreateMutex( NULL, FALSE, NULL );
+       if (!db_tab->mutex)
+               goto err_mutex;
+       db_tab->npages     = npages;
+       db_tab->max_group1 = 0;
+       db_tab->min_group2 = npages - 1;
+
+       for (i = 0; i < npages; ++i)
+               db_tab->page[i].db_rec = NULL;
+
+       goto end;
+
+err_mutex:
+       cl_free(db_tab);
+err_malloc:
+end:   
+       return db_tab;
+}
+
+void mthca_free_db_tab(struct mthca_db_table *db_tab)
+{
+       int i;
+
+       if (!db_tab)
+               return;
+
+       for (i = 0; i < db_tab->npages; ++i)
+               if (db_tab->page[i].db_rec)
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+                       cl_free(db_tab->page[i].db_rec);
+#else
+                       VirtualFree( db_tab->page[i].db_rec, 0, MEM_RELEASE);
+#endif
+
+       cl_free(db_tab);
+}
diff --git a/trunk/hw/mthca/user/mlnx_uvp_qp.c b/trunk/hw/mthca/user/mlnx_uvp_qp.c
new file mode 100644 (file)
index 0000000..ed2b9a2
--- /dev/null
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: qp.c 4214 2005-11-29 17:43:08Z roland $
+ */
+
+#include <mt_l2w.h>
+#include "mlnx_uvp.h"
+#include "mlnx_uvp_doorbell.h"
+#include "mlnx_uvp_wqe.h"
+#include "mlnx_ual_data.h"
+
+#if defined(EVENT_TRACING)
+#include "mlnx_uvp_qp.tmh"
+#endif
+
+static const uint8_t mthca_opcode[] = {
+       MTHCA_OPCODE_RDMA_WRITE,
+       MTHCA_OPCODE_RDMA_WRITE_IMM,
+       MTHCA_OPCODE_SEND,
+       MTHCA_OPCODE_SEND_IMM,
+       MTHCA_OPCODE_RDMA_READ,
+       MTHCA_OPCODE_ATOMIC_CS,
+       MTHCA_OPCODE_ATOMIC_FA
+};
+
+static enum mthca_wr_opcode conv_ibal_wr_opcode(struct _ib_send_wr *wr)
+{
+       enum mthca_wr_opcode opcode = -1; //= wr->wr_type;
+
+       switch (wr->wr_type) {
+               case WR_SEND: 
+                       opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_SEND_IMM : MTHCA_OPCODE_SEND;
+                       break;
+               case WR_RDMA_WRITE:     
+                       opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_RDMA_WRITE_IMM : MTHCA_OPCODE_RDMA_WRITE;
+                       break;
+               case WR_RDMA_READ:              opcode = MTHCA_OPCODE_RDMA_READ; break;
+               case WR_COMPARE_SWAP: opcode = MTHCA_OPCODE_ATOMIC_CS; break;
+               case WR_FETCH_ADD:                      opcode = MTHCA_OPCODE_ATOMIC_FA; break;
+               default:                                                opcode = MTHCA_OPCODE_INVALID;break;
+       }
+       return opcode;
+}
+
+
+static void dump_wqe(uint32_t *wqe_ptr , struct mthca_qp *qp_ptr)
+{
+       net32_t *wqe = wqe_ptr;
+
+       (void) wqe;     /* avoid warning if mthca_dbg compiled away... */
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,("WQE contents  QPN 0x%06x \n",qp_ptr->ibv_qp.qp_num));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",0
+               , cl_ntoh32(wqe[0]), cl_ntoh32(wqe[1]), cl_ntoh32(wqe[2]), cl_ntoh32(wqe[3])));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",4
+               , cl_ntoh32(wqe[4]), cl_ntoh32(wqe[5]), cl_ntoh32(wqe[6]), cl_ntoh32(wqe[7])));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",8
+               , cl_ntoh32(wqe[8]), cl_ntoh32(wqe[9]), cl_ntoh32(wqe[10]), cl_ntoh32(wqe[11])));
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",12
+               , cl_ntoh32(wqe[12]), cl_ntoh32(wqe[13]), cl_ntoh32(wqe[14]), cl_ntoh32(wqe[15])));
+
+}
+static void *get_recv_wqe(struct mthca_qp *qp, int n)
+{
+       return qp->buf + (n << qp->rq.wqe_shift);
+}
+
+static void *get_send_wqe(struct mthca_qp *qp, int n)
+{
+       void *wqe_addr = qp->buf + qp->send_wqe_offset + (n << qp->sq.wqe_shift);
+       UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP,
+               ("wqe %p, qp_buf %p, offset %#x,  index %d, shift %d \n",
+                wqe_addr, qp->buf, qp->send_wqe_offset, n, 
+               qp->sq.wqe_shift));
+       
+       return wqe_addr;
+}
+
+void mthca_init_qp_indices(struct mthca_qp *qp)
+{
+       qp->sq.next_ind  = 0;
+       qp->sq.last_comp = qp->sq.max - 1;
+       qp->sq.head      = 0;
+       qp->sq.tail      = 0;
+       qp->sq.last      = get_send_wqe(qp, qp->sq.max - 1);
+
+       qp->rq.next_ind  = 0;
+       qp->rq.last_comp = qp->rq.max - 1;
+       qp->rq.head      = 0;
+       qp->rq.tail      = 0;
+       qp->rq.last      = get_recv_wqe(qp, qp->rq.max - 1);
+}
+
+static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, struct mthca_cq *cq)
+{
+       unsigned cur;
+
+       cur = wq->head - wq->tail;
+       if ((int)(cur + nreq) < wq->max)
+               return 0;
+
+       cl_spinlock_acquire(&cq->lock);
+       cur = wq->head - wq->tail;
+       cl_spinlock_release(&cq->lock);
+
+       return (int)(cur + nreq) >= wq->max;
+}
+
+
+int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr)
+{
+       struct mthca_qp *qp = to_mqp(ibqp);
+       uint8_t *wqe;
+       uint8_t *prev_wqe;
+       int ret = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       uint32_t f0 = 0;
+       int ind;
+       int op0 = 0;
+       enum ib_wr_opcode opcode;
+       
+       UVP_ENTER(UVP_DBG_QP);
+       cl_spinlock_acquire(&qp->sq.lock);
+
+       /* XXX check that state is OK to post send */
+
+       ind = qp->sq.next_ind;
+
+       if(ibqp->state == IBV_QPS_RESET)
+               return -EBUSY;
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+
+               if (mthca_wq_overflow(&qp->sq, nreq, to_mcq(qp->ibv_qp.send_cq))) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", ibqp->qp_num,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq));
+                       ret = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_send_wqe(qp, ind);
+               prev_wqe = qp->sq.last;
+               qp->sq.last = wqe;
+               opcode = conv_ibal_wr_opcode(wr);
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               ((struct mthca_next_seg *) wqe)->flags =
+                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                        cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
+                       ((wr->send_opt & IB_SEND_OPT_SOLICITED) ?
+                        cl_hton32(MTHCA_NEXT_SOLICIT) : 0)   |
+                       cl_hton32(1);
+               if (opcode == MTHCA_OPCODE_SEND_IMM||
+                   opcode == MTHCA_OPCODE_RDMA_WRITE_IMM)
+                       ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+
+               switch (ibqp->qp_type) {
+               case IB_QPT_RELIABLE_CONN:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_ATOMIC_CS:
+                       case MTHCA_OPCODE_ATOMIC_FA:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+
+                               wqe += sizeof (struct mthca_raddr_seg);
+
+                               if (opcode == MTHCA_OPCODE_ATOMIC_CS) {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic2);
+                                       ((struct mthca_atomic_seg *) wqe)->compare =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                               } else {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                                       ((struct mthca_atomic_seg *) wqe)->compare = 0;
+                               }
+
+                               wqe += sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                        sizeof (struct mthca_atomic_seg)) / 16;
+                               break;
+
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                       case MTHCA_OPCODE_RDMA_READ:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case IB_QPT_UNRELIABLE_CONN:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case IB_QPT_UNRELIABLE_DGRM:
+                       {
+                               struct ibv_ah *ibv_ah = ((mlnx_ual_av_info_t*)wr->dgrm.ud.h_av)->ibv_ah;
+                               ((struct mthca_tavor_ud_seg *) wqe)->lkey =
+                                       cl_hton32(to_mah(ibv_ah)->key);
+                               ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
+                                       cl_hton64((uint64_t)to_mah(ibv_ah)->av);
+                               ((struct mthca_tavor_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
+                               ((struct mthca_tavor_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
+
+                               wqe += sizeof (struct mthca_tavor_ud_seg);
+                               size += sizeof (struct mthca_tavor_ud_seg) / 16;
+                               break;
+                       }
+
+               default:
+                       break;
+               }
+
+               if ((int)(int)wr->num_ds > qp->sq.max_gs) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SQ %06x too many gathers\n",ibqp->qp_num));
+                       ret = -ERANGE;
+                       *bad_wr = wr;
+                       goto out;
+               }
+//TODO sleybo:
+               if (wr->send_opt & IB_SEND_OPT_INLINE) {
+                       if (wr->num_ds) {
+                               struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe;
+                               int s = 0;
+
+                               wqe += sizeof *seg;
+                               for (i = 0; i < (int)wr->num_ds; ++i) {
+                                       struct _ib_local_ds *sge = &wr->ds_array[i];
+
+                                       s += sge->length;
+
+                                       if (s > qp->max_inline_data) {
+                                               ret = -1;
+                                               *bad_wr = wr;
+                                               goto out;
+                                       }
+
+                                       memcpy(wqe, (void *) (ULONG_PTR) sge->vaddr,
+                                              sge->length);
+                                       wqe += sge->length;
+                               }
+
+                               seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s);
+                               size += align(s + sizeof *seg, 16) / 16;
+                       }
+               } else {
+                       for (i = 0; i < (int)wr->num_ds; ++i) {
+                               ((struct mthca_data_seg *) wqe)->byte_count =
+                                       cl_hton32(wr->ds_array[i].length);
+                               ((struct mthca_data_seg *) wqe)->lkey =
+                                       cl_hton32(wr->ds_array[i].lkey);
+                               ((struct mthca_data_seg *) wqe)->addr =
+                                       cl_hton64(wr->ds_array[i].vaddr);
+                               wqe += sizeof (struct mthca_data_seg);
+                               size += sizeof (struct mthca_data_seg) / 16;
+                       }
+               }
+
+               qp->wrid[ind + qp->rq.max] = wr->wr_id;
+
+               if (opcode == MTHCA_OPCODE_INVALID) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num));
+                       ret = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32(((ind << qp->sq.wqe_shift) +
+                       qp->send_wqe_offset) |opcode);
+               
+               wmb();
+               
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
+
+               if (!size0) {
+                       size0 = size;
+                       op0   = opcode;
+               }
+               
+               if(0)
+                       dump_wqe( (uint32_t*)qp->sq.last,qp);
+               
+               ++ind;
+               if (unlikely(ind >= qp->sq.max))
+                       ind -= qp->sq.max;
+
+       }
+
+out:
+       if (likely(nreq)) {
+               uint32_t doorbell[2];
+
+               doorbell[0] = cl_hton32(((qp->sq.next_ind << qp->sq.wqe_shift) +
+                                    qp->send_wqe_offset) | f0 | op0);
+               doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0);
+
+               wmb();
+
+               mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL);
+       }
+
+       qp->sq.next_ind = ind;
+       qp->sq.head    += nreq;
+
+       cl_spinlock_release(&qp->sq.lock);
+       
+       UVP_EXIT(UVP_DBG_QP);
+       return ret;
+}
+
+
+int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+                         struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_qp *qp = to_mqp(ibqp);
+       uint32_t doorbell[2];
+       int ret = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       int ind;
+       uint8_t *wqe;
+       uint8_t *prev_wqe;
+       
+       UVP_ENTER(UVP_DBG_QP);
+       
+       cl_spinlock_acquire(&qp->rq.lock);
+
+       /* XXX check that state is OK to post receive */
+       
+       ind = qp->rq.next_ind;
+       if(ibqp->state == IBV_QPS_RESET)
+               return -EBUSY;
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+                       doorbell[1] = cl_hton32(ibqp->qp_num << 8); //TODO sleybo: add qpn to qp struct 
+
+                       /*
+                        * Make sure that descriptors are written
+                        * before doorbell is rung.
+                        */
+                       mb();
+
+                       mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_RECV_DOORBELL);
+
+                       qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+                       size0 = 0;
+               }
+
+               if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.send_cq))) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR,UVP_DBG_QP,("RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", ibqp->qp_num,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq));
+                       ret = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_recv_wqe(qp, ind);
+               prev_wqe = qp->rq.last;
+               qp->rq.last = wqe;
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD);
+               ((struct mthca_next_seg *) wqe)->flags =
+                       cl_hton32(MTHCA_NEXT_CQ_UPDATE);
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               if (unlikely((int)wr->num_ds  > qp->rq.max_gs)) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("RQ %06x too many gathers\n",ibqp->qp_num));
+                       ret = -ERANGE;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               qp->wrid[ind] = wr->wr_id;
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32((ind << qp->rq.wqe_shift) | 1);
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD | size);
+
+               if (!size0)
+                       size0 = size;
+
+               ++ind;
+               if (unlikely(ind >= qp->rq.max))
+                       ind -= qp->rq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+               doorbell[1] = cl_hton32((ibqp->qp_num << 8) | nreq);
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell is rung.
+                */
+               mb();
+
+               mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_RECV_DOORBELL);
+       }
+
+       qp->rq.next_ind = ind;
+       qp->rq.head    += nreq;
+
+       cl_spinlock_release(&qp->rq.lock);
+       UVP_EXIT(UVP_DBG_QP);
+       return ret;
+}
+
+int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+                         struct _ib_send_wr **bad_wr)
+{
+       struct mthca_qp *qp = to_mqp(ibqp);
+       uint32_t doorbell[2];
+       uint8_t *wqe;
+       uint8_t *prev_wqe;
+       int ret = 0;
+       int nreq;       
+       int i;
+       int size;
+       int size0 = 0;
+       uint32_t f0 = 0;
+       int ind;
+       uint8_t op0 = 0;
+       enum ib_wr_opcode opcode;
+       
+       UVP_ENTER(UVP_DBG_QP);
+       
+       cl_spinlock_acquire(&qp->sq.lock);
+
+       /* XXX check that state is OK to post send */
+
+       ind = qp->sq.head & (qp->sq.max - 1);
+       if(ibqp->state == IBV_QPS_RESET)
+               return -EBUSY;
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
+                                           ((qp->sq.head & 0xffff) << 8) | f0 | op0);
+                       doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0);
+                       qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
+                       size0 = 0;
+
+                       /*
+                        * Make sure that descriptors are written before
+                        * doorbell record.
+                        */
+                       wmb();
+                       *qp->sq.db = cl_hton32(qp->sq.head & 0xffff);
+
+                       /*
+                        * Make sure doorbell record is written before we
+                        * write MMIO send doorbell.
+                        */
+                       wmb();
+                       mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL);
+
+               }
+
+               if (mthca_wq_overflow(&qp->sq, nreq, to_mcq(qp->ibv_qp.send_cq))) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR,UVP_DBG_QP,("SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", ibqp->qp_num,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq));                     
+                       ret = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_send_wqe(qp, ind);
+               prev_wqe = qp->sq.last;
+               qp->sq.last = wqe;
+               opcode = conv_ibal_wr_opcode(wr);
+
+               ((struct mthca_next_seg *) wqe)->flags =
+                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                        cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
+                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                        cl_hton32(MTHCA_NEXT_SOLICIT) : 0)   |
+                       cl_hton32(1);
+               if (opcode == MTHCA_OPCODE_SEND_IMM||
+                       opcode == MTHCA_OPCODE_RDMA_WRITE_IMM)
+                       ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               switch (ibqp->qp_type) {
+               case IB_QPT_RELIABLE_CONN:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_ATOMIC_CS:
+                       case MTHCA_OPCODE_ATOMIC_FA:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32((wr->remote_ops.rkey));
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+
+                               wqe += sizeof (struct mthca_raddr_seg);
+
+                               if (opcode == MTHCA_OPCODE_ATOMIC_CS) {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic2);
+                                       ((struct mthca_atomic_seg *) wqe)->compare =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                               } else {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cl_hton64(wr->remote_ops.atomic1);
+                                       ((struct mthca_atomic_seg *) wqe)->compare = 0;
+                               }
+
+                               wqe += sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                        sizeof (struct mthca_atomic_seg)) / 16;
+                               break;
+
+                       case MTHCA_OPCODE_RDMA_READ:
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case IB_QPT_UNRELIABLE_CONN:
+                       switch (opcode) {
+                       case MTHCA_OPCODE_RDMA_WRITE:
+                       case MTHCA_OPCODE_RDMA_WRITE_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cl_hton64(wr->remote_ops.vaddr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cl_hton32(wr->remote_ops.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case IB_QPT_UNRELIABLE_DGRM:
+                       {
+                               struct ibv_ah *ibv_ah = ((mlnx_ual_av_info_t*)wr->dgrm.ud.h_av)->ibv_ah;
+                               memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
+                                      to_mah(ibv_ah)->av, sizeof ( struct mthca_av));
+                               ((struct mthca_arbel_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
+                               ((struct mthca_arbel_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
+
+
+                               wqe += sizeof (struct mthca_arbel_ud_seg);
+                               size += sizeof (struct mthca_arbel_ud_seg) / 16;
+                               break;
+                       }
+
+               default:
+                       break;
+               }
+
+               if ((int)wr->num_ds > qp->sq.max_gs) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SQ %06x full too many gathers\n",ibqp->qp_num));
+                       ret = -ERANGE;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               if (wr->send_opt & IB_SEND_OPT_INLINE) {
+                       if (wr->num_ds) {
+                               struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe;
+                               int s = 0;
+
+                               wqe += sizeof *seg;
+                               for (i = 0; i < (int)wr->num_ds; ++i) {
+                                       struct _ib_local_ds *sge = &wr->ds_array[i];
+
+                                       s += sge->length;
+
+                                       if (s > qp->max_inline_data) {
+                                               ret = -1;
+                                               *bad_wr = wr;
+                                               goto out;
+                                       }
+
+                                       memcpy(wqe, (void *) (uintptr_t) sge->vaddr,
+                                              sge->length);
+                                       wqe += sge->length;
+                               }
+
+                               seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s);
+                               size += align(s + sizeof *seg, 16) / 16;
+                       }
+               } else {
+
+                       for (i = 0; i < (int)wr->num_ds; ++i) {
+                               ((struct mthca_data_seg *) wqe)->byte_count =
+                                       cl_hton32(wr->ds_array[i].length);
+                               ((struct mthca_data_seg *) wqe)->lkey =
+                                       cl_hton32(wr->ds_array[i].lkey);
+                               ((struct mthca_data_seg *) wqe)->addr =
+                                       cl_hton64(wr->ds_array[i].vaddr);
+                               wqe += sizeof (struct mthca_data_seg);
+                               size += sizeof (struct mthca_data_seg) / 16;
+                       }
+//TODO do this also in kernel
+//                     size += wr->num_ds * (sizeof *seg / 16);
+               }
+
+               qp->wrid[ind + qp->rq.max] = wr->wr_id;
+
+               if (opcode == MTHCA_OPCODE_INVALID) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num));
+                       ret = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32(((ind << qp->sq.wqe_shift) +
+                              qp->send_wqe_offset) |
+                             opcode);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD | size);
+
+               if (!size0) {
+                       size0 = size;
+                       op0   = opcode;
+               }
+
+               ++ind;
+               if (unlikely(ind >= qp->sq.max))
+                       ind -= qp->sq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cl_hton32((nreq << 24) |
+                                   ((qp->sq.head & 0xffff) << 8) | f0 | op0);
+               doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0);
+
+               qp->sq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               wmb();
+               *qp->sq.db = cl_hton32(qp->sq.head & 0xffff);
+
+               /*
+                * Make sure doorbell record is written before we
+                * write MMIO send doorbell.
+                */
+               wmb();
+               mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL);
+       }
+
+       cl_spinlock_release(&qp->sq.lock);
+
+       UVP_EXIT(UVP_DBG_QP);
+       
+       return ret;
+}
+
+int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+                         struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_qp *qp = to_mqp(ibqp);
+       int ret = 0;
+       int nreq;
+       int ind;
+       int i;
+       uint8_t *wqe;
+       
+       UVP_ENTER(UVP_DBG_QP);
+       
+       cl_spinlock_acquire(&qp->rq.lock);
+
+       /* XXX check that state is OK to post receive */
+
+       ind = qp->rq.head & (qp->rq.max - 1);
+       if(ibqp->state == IBV_QPS_RESET)
+               return -EBUSY;
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.send_cq))) {//TODO sleybo: check the cq
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", ibqp->qp_num,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq));
+                       ret = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_recv_wqe(qp, ind);
+
+               ((struct mthca_next_seg *) wqe)->flags = 0;
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > qp->rq.max_gs)) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full too many scatter\n",ibqp->qp_num));
+                       ret = -ERANGE;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cl_hton64(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < qp->rq.max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               qp->wrid[ind] = wr->wr_id;
+
+               ++ind;
+               if (unlikely(ind >= qp->rq.max))
+                       ind -= qp->rq.max;
+       }
+out:
+       if (likely(nreq)) {
+               qp->rq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               mb();
+               *qp->rq.db = cl_hton32(qp->rq.head & 0xffff);
+       }
+
+       cl_spinlock_release(&qp->rq.lock);
+       
+       UVP_EXIT(UVP_DBG_QP);
+       
+       return ret;
+}
+
+int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
+                      ib_qp_type_t type, struct mthca_qp *qp)
+{
+       int size;
+       int max_sq_sge;
+
+       qp->rq.max_gs    = cap->max_recv_sge;
+       qp->sq.max_gs    = cap->max_send_sge;
+       max_sq_sge       = align(cap->max_inline_data + sizeof (struct mthca_inline_seg),
+                                sizeof (struct mthca_data_seg)) / sizeof (struct mthca_data_seg);
+       if (max_sq_sge < (int)cap->max_send_sge)
+               max_sq_sge = cap->max_send_sge;
+
+       qp->wrid = cl_malloc((qp->rq.max + qp->sq.max) * sizeof (uint64_t));
+       if (!qp->wrid)
+               return -1;
+
+       size = sizeof (struct mthca_next_seg) +
+               qp->rq.max_gs * sizeof (struct mthca_data_seg);
+
+       for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
+            qp->rq.wqe_shift++)
+               ; /* nothing */
+
+       size = max_sq_sge * sizeof (struct mthca_data_seg);
+       switch (type) {
+       case IB_QPT_UNRELIABLE_DGRM:
+               size += mthca_is_memfree(pd->context) ?
+                       sizeof (struct mthca_arbel_ud_seg) :
+                       sizeof (struct mthca_tavor_ud_seg);
+               break;
+
+       case IB_QPT_UNRELIABLE_CONN:
+               size += sizeof (struct mthca_raddr_seg);
+               break;
+
+       case IB_QPT_RELIABLE_CONN:
+               size += sizeof (struct mthca_raddr_seg);
+               /*
+                * An atomic op will require an atomic segment, a
+                * remote address segment and one scatter entry.
+                */
+               if (size < (sizeof (struct mthca_atomic_seg) +
+                           sizeof (struct mthca_raddr_seg) +
+                           sizeof (struct mthca_data_seg)))
+                       size = (sizeof (struct mthca_atomic_seg) +
+                               sizeof (struct mthca_raddr_seg) +
+                               sizeof (struct mthca_data_seg));
+               break;
+
+       default:
+               break;
+       }
+
+       /* Make sure that we have enough space for a bind request */
+       if (size < sizeof (struct mthca_bind_seg))
+               size = sizeof (struct mthca_bind_seg);
+
+       size += sizeof (struct mthca_next_seg);
+
+       for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
+            qp->sq.wqe_shift++)
+               ; /* nothing */
+
+       qp->send_wqe_offset = align(qp->rq.max << qp->rq.wqe_shift,
+                                   1 << qp->sq.wqe_shift);
+
+       qp->buf_size = qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift);
+
+       if (posix_memalign(&qp->buf, g_page_size,
+                          align(qp->buf_size, g_page_size))) {
+               cl_free(qp->wrid);
+               return -1;
+       }
+
+       memset(qp->buf, 0, qp->buf_size);
+
+       if (mthca_is_memfree(pd->context)) {
+               struct mthca_next_seg *next;
+               struct mthca_data_seg *scatter;
+               int i;
+               uint32_t sz;
+
+               sz = cl_hton32((sizeof (struct mthca_next_seg) +
+                           qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16);
+
+               for (i = 0; i < qp->rq.max; ++i) {
+                       next = get_recv_wqe(qp, i);
+                       next->nda_op = cl_hton32(((i + 1) & (qp->rq.max - 1)) <<
+                                            qp->rq.wqe_shift);
+                       next->ee_nds = sz;
+
+                       for (scatter = (void *) (next + 1);
+                            (void *) scatter < (void *) (next + (1 << qp->rq.wqe_shift));
+                            ++scatter)
+                               scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+               }
+
+               for (i = 0; i < qp->sq.max; ++i) {
+                       next = get_send_wqe(qp, i);
+                       next->nda_op = cl_hton32((((i + 1) & (qp->sq.max - 1)) <<
+                                             qp->sq.wqe_shift) +
+                                            qp->send_wqe_offset);
+               }
+       }
+
+       qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+       qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
+       return 0;
+}
+
+struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn)
+{
+       int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift;
+
+       if (ctx->qp_table[tind].refcnt)
+               return ctx->qp_table[tind].table[qpn & ctx->qp_table_mask];
+       else
+               return NULL;
+}
+
+int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp)
+{
+       int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift;
+       int ret = 0;
+
+       WaitForSingleObject( ctx->qp_table_mutex, INFINITE );
+
+       if (!ctx->qp_table[tind].refcnt) {
+               ctx->qp_table[tind].table = cl_malloc(
+                       (ctx->qp_table_mask + 1) * sizeof (struct mthca_qp *));
+               if (!ctx->qp_table[tind].table) {
+                       ret = -1;
+                       goto out;
+               }
+       }
+       ++ctx->qp_table[tind].refcnt;
+       ctx->qp_table[tind].table[qpn & ctx->qp_table_mask] = qp;
+
+out:
+       ReleaseMutex( ctx->qp_table_mutex );
+       return ret;
+}
+
+void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn)
+{
+       int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift;
+
+       WaitForSingleObject( ctx->qp_table_mutex, INFINITE );
+
+       if (!--ctx->qp_table[tind].refcnt)
+               cl_free(ctx->qp_table[tind].table);
+       else
+               ctx->qp_table[tind].table[qpn & ctx->qp_table_mask] = NULL;
+       
+       ReleaseMutex( ctx->qp_table_mutex );
+}
+
+int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
+                      int index, int *dbd, uint32_t *new_wqe)
+{
+       struct mthca_next_seg *next;
+
+       /*
+        * For SRQs, all WQEs generate a CQE, so we're always at the
+        * end of the doorbell chain.
+        */
+       if (qp->ibv_qp.srq) {
+               *new_wqe = 0;
+               return 0;
+       }
+
+       if (is_send)
+               next = get_send_wqe(qp, index);
+       else
+               next = get_recv_wqe(qp, index);
+
+       *dbd = !!(next->ee_nds & cl_hton32(MTHCA_NEXT_DBD));
+       if (next->ee_nds & cl_hton32(0x3f))
+               *new_wqe = (next->nda_op & cl_hton32(~0x3f)) |
+                       (next->ee_nds & cl_hton32(0x3f));
+       else
+               *new_wqe = 0;
+
+       return 0;
+}
+
diff --git a/trunk/hw/mthca/user/mlnx_uvp_srq.c b/trunk/hw/mthca/user/mlnx_uvp_srq.c
new file mode 100644 (file)
index 0000000..ab5e8bd
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: srq.c 4017 2005-11-10 17:20:24Z roland $
+ */
+
+#include <mt_l2w.h>
+
+#include "mlnx_uvp.h"
+#include "mlnx_uvp_doorbell.h"
+#include "mlnx_uvp_wqe.h"
+
+#if defined(EVENT_TRACING)
+#include "mlnx_uvp_srq.tmh"
+#endif
+
+static void *get_wqe(struct mthca_srq *srq, int n)
+{
+       return (uint8_t*)srq->buf + (n << srq->wqe_shift);
+}
+
+/*
+ * Return a pointer to the location within a WQE that we're using as a
+ * link when the WQE is in the free list.  We use the imm field at an
+ * offset of 12 bytes because in the Tavor case, posting a WQE may
+ * overwrite the next segment of the previous WQE, but a receive WQE
+ * will never touch the imm field.  This avoids corrupting our free
+ * list if the previous WQE has already completed and been put on the
+ * free list when we post the next WQE.
+ */
+static inline int *wqe_to_link(void *wqe)
+{
+       return (int *) ((uint8_t*)wqe + 12);
+}
+
+void mthca_free_srq_wqe(struct mthca_srq *srq, int ind)
+{
+       cl_spinlock_acquire(&srq->lock);
+
+       if (srq->first_free >= 0)
+               *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
+       else
+               srq->first_free = ind;
+
+       *wqe_to_link(get_wqe(srq, ind)) = -1;
+       srq->last_free = ind;
+
+       cl_spinlock_release(&srq->lock);
+}
+
+int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
+                             struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       uint32_t doorbell[2];
+       int err = 0;
+       int first_ind;
+       int ind;
+       int next_ind;
+       int nreq;
+       int i;
+       uint8_t *wqe;
+       uint8_t *prev_wqe;
+
+       cl_spinlock_acquire(&srq->lock);
+
+       first_ind = srq->first_free;
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+                       doorbell[1] = cl_hton32(srq->srqn << 8);
+
+                       /*
+                        * Make sure that descriptors are written
+                        * before doorbell is rung.
+                        */
+                       wmb();
+
+                       mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
+
+                       first_ind = srq->first_free;
+               }
+
+               ind = srq->first_free;
+
+               if (ind < 0) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       err = -1;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               wqe       = get_wqe(srq, ind);
+               next_ind  = *wqe_to_link(wqe);
+
+               if (next_ind < 0) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP  ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               prev_wqe  = srq->last;
+               srq->last = wqe;
+
+               ((struct mthca_next_seg *) wqe)->nda_op = 0;
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               /* flags field will always remain 0 */
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > srq->max_gs)) {
+                       err = -1;
+                       *bad_wr = wr;
+                       srq->last = prev_wqe;
+                       break;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               htonll(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < srq->max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cl_hton32((ind << srq->wqe_shift) | 1);
+               mb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cl_hton32(MTHCA_NEXT_DBD);
+
+               srq->wrid[ind]  = wr->wr_id;
+               srq->first_free = next_ind;
+       }
+
+       if (nreq) {
+               doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+               doorbell[1] = cl_hton32((srq->srqn << 8) | nreq);
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell is rung.
+                */
+               wmb();
+
+               mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
+       }
+
+       cl_spinlock_release(&srq->lock);
+       return err;
+}
+
+int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
+                             struct _ib_recv_wr *wr,
+                             struct _ib_recv_wr **bad_wr)
+{
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       int err = 0;
+       int ind;
+       int next_ind;
+       int nreq;
+       int i;
+       uint8_t *wqe;
+
+       cl_spinlock_acquire(&srq->lock);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
+               ind = srq->first_free;
+
+               if (ind < 0) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               wqe       = get_wqe(srq, ind);
+               next_ind  = *wqe_to_link(wqe);
+
+               if (next_ind < 0) {
+                       UVP_PRINT(TRACE_LEVEL_ERROR  ,UVP_DBG_LOW  ,("SRQ %06x full\n", srq->srqn));
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               ((struct mthca_next_seg *) wqe)->nda_op =
+                       cl_hton32((next_ind << srq->wqe_shift) | 1);
+               ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+               /* flags field will always remain 0 */
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely((int)wr->num_ds > srq->max_gs)) {
+                       err = -1;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               for (i = 0; i < (int)wr->num_ds; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cl_hton32(wr->ds_array[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cl_hton32(wr->ds_array[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               htonll(wr->ds_array[i].vaddr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < srq->max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               srq->wrid[ind]  = wr->wr_id;
+               srq->first_free = next_ind;
+       }
+
+       if (likely(nreq)) {
+               srq->counter += (uint16_t)nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * we write doorbell record.
+                */
+               wmb();
+               *srq->db = cl_hton32(srq->counter);
+       }
+
+       cl_spinlock_release(&srq->lock);
+       return err;
+}
+
+int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
+                      struct mthca_srq *srq)
+{
+       struct mthca_data_seg *scatter;
+       uint8_t *wqe;
+       int size;
+       int i;
+
+       srq->wrid = cl_malloc(srq->max * sizeof (uint64_t));
+       if (!srq->wrid)
+               return -1;
+
+       size = sizeof (struct mthca_next_seg) +
+               srq->max_gs * sizeof (struct mthca_data_seg);
+
+       for (srq->wqe_shift = 6; 1 << srq->wqe_shift < size; ++srq->wqe_shift)
+               ; /* nothing */
+
+       srq->buf_size = srq->max << srq->wqe_shift;
+
+       if (posix_memalign(&srq->buf, g_page_size,
+                          align(srq->buf_size, g_page_size))) {
+               cl_free(srq->wrid);
+               return -1;
+       }
+
+       memset(srq->buf, 0, srq->buf_size);
+
+       /*
+        * Now initialize the SRQ buffer so that all of the WQEs are
+        * linked into the list of free WQEs.  In addition, set the
+        * scatter list L_Keys to the sentry value of 0x100.
+        */
+
+       for (i = 0; i < srq->max; ++i) {
+               wqe = get_wqe(srq, i);
+
+               *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+
+               for (scatter = (struct mthca_data_seg *)(wqe + sizeof (struct mthca_next_seg));
+                    (void *) scatter < (void*)(wqe + (1 << srq->wqe_shift));
+                    ++scatter)
+                       scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY);
+       }
+
+       srq->first_free = 0;
+       srq->last_free  = srq->max - 1;
+       srq->last       = get_wqe(srq, srq->max - 1);
+
+       return 0;
+}
diff --git a/trunk/hw/mthca/user/mlnx_uvp_verbs.c b/trunk/hw/mthca/user/mlnx_uvp_verbs.c
new file mode 100644 (file)
index 0000000..c155995
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: verbs.c 4182 2005-11-28 21:14:30Z roland $
+ */
+
+#include <mt_l2w.h>
+
+#include "mlnx_uvp.h"
+#include "mx_abi.h"
+
+struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
+{
+       struct mthca_pd           *pd;
+
+       pd = cl_malloc(sizeof *pd);
+       if (!pd)
+               goto err_malloc;
+
+       if (!mthca_is_memfree(context)) {
+               pd->ah_list = NULL;
+               pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
+               if (!pd->ah_mutex) 
+                       goto err_mutex;
+       }
+
+       /* fill response fields */
+       pd->ibv_pd.context = context;   
+       pd->ibv_pd.handle = resp->pd_handle;
+       pd->pdn = resp->pdn;
+
+       return &pd->ibv_pd;
+
+err_mutex:
+       cl_free(pd);
+err_malloc:
+       return NULL;
+}
+
+int mthca_free_pd(struct ibv_pd *ibv_pd)
+{
+       struct mthca_pd *pd = to_mpd(ibv_pd);
+       if (!mthca_is_memfree(ibv_pd->context)) 
+               CloseHandle(pd->ah_mutex);
+       cl_free(pd);
+       return 0;
+}
+
+static struct ibv_mr *__mthca_reg_mr(struct ibv_pd *pd, void *addr,
+                                    size_t length, uint64_t hca_va,
+                                    enum ibv_access_flags access)
+{
+       struct ibv_mr *mr;
+       struct ibv_reg_mr cmd;
+
+       mr = cl_malloc(sizeof *mr);
+       if (!mr)
+               return NULL;
+
+#ifdef WIN_TO_BE_CHANGED
+       if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
+                          access, mr, &cmd, sizeof cmd)) {
+               cl_free(mr);
+               return NULL;
+       }
+#endif
+       return mr;
+}
+
+struct ibv_mr *mthca_reg_mr(struct ibv_pd *pd, void *addr,
+                           size_t length, enum ibv_access_flags access)
+{
+       return __mthca_reg_mr(pd, addr, length, (uintptr_t) addr, access);
+}
+
+int mthca_dereg_mr(struct ibv_mr *mr)
+{
+       int ret;
+
+#ifdef WIN_TO_BE_CHANGED
+       ret = ibv_cmd_dereg_mr(mr);
+       if (ret)
+               return ret;
+#endif
+
+       cl_free(mr);
+       return 0;
+}
+
+/* allocate create_cq infrastructure  and fill it's request parameters structure */
+struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
+                              struct ibv_create_cq *req)
+{
+       struct mthca_cq            *cq;
+       int                         nent;
+       int                         ret;
+
+       cq = cl_malloc(sizeof *cq);
+       if (!cq)
+               goto exit;
+
+       cl_spinlock_construct(&cq->lock);
+       if (cl_spinlock_init(&cq->lock))
+               goto err;
+
+       for (nent = 1; nent <= *p_cqe; nent <<= 1)
+               ; /* nothing */
+
+       if (posix_memalign(&cq->buf, g_page_size,
+                          align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
+               goto err;
+
+       mthca_init_cq_buf(cq, nent);
+
+       if (mthca_is_memfree(context)) {
+               cq->arm_sn          = 1;
+               cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
+                                                    MTHCA_DB_TYPE_CQ_SET_CI,
+                                                    &cq->set_ci_db);
+               if (cq->set_ci_db_index < 0)
+                       goto err_unreg;
+
+               cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
+                                                    MTHCA_DB_TYPE_CQ_ARM,
+                                                    &cq->arm_db);
+               if (cq->arm_db_index < 0)
+                       goto err_set_db;
+
+               req->arm_db_page  = db_align(cq->arm_db);
+               req->set_db_page  = db_align(cq->set_ci_db);
+               req->arm_db_index = cq->arm_db_index;
+               req->set_db_index = cq->set_ci_db_index;
+       }
+
+       req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
+       req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
+       req->mr.hca_va = 0;
+       req->mr.pd_handle    = to_mctx(context)->pd->handle;
+       req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
+       req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
+       req->user_handle = (uint64_t)(ULONG_PTR)cq;
+       req->cqe = *p_cqe;
+       *p_cqe = nent-1;
+//     *p_cqe = *p_cqe;        // return the same value
+//     cq->ibv_cq.cqe = nent -1;
+       return &cq->ibv_cq;
+
+err_set_db:
+       if (mthca_is_memfree(context))
+               mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
+                             cq->set_ci_db_index);
+
+err_unreg:
+       cl_free(cq->buf);
+
+err:
+       cl_free(cq);
+exit:
+       return ERR_PTR(-ENOMEM);
+}
+
+struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
+                              struct ibv_create_cq_resp *resp)
+{
+       struct mthca_cq   *cq;
+       int                         ret;
+
+       cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
+
+       cq->cqn = resp->cqn;
+       cq->mr.handle = resp->mr.mr_handle;
+       cq->mr.lkey = resp->mr.lkey;
+       cq->mr.rkey = resp->mr.rkey;
+       cq->mr.pd = to_mctx(context)->pd;
+       cq->mr.context = context;
+       cq->ibv_cq.cqe = resp->cqe;
+       cq->ibv_cq.handle = resp->cq_handle;
+       cq->ibv_cq.context = context;
+
+       if (mthca_is_memfree(context)) {
+               mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
+               mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
+       }
+
+       return &cq->ibv_cq;
+
+}
+
+int mthca_destroy_cq(struct ibv_cq *cq)
+{
+       int ret;
+
+       if (mthca_is_memfree(cq->context)) {
+               mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
+                             to_mcq(cq)->set_ci_db_index);
+               mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
+                             to_mcq(cq)->arm_db_index);
+       }
+
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       cl_free(to_mcq(cq)->buf);
+#else
+       VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
+#endif
+
+       
+       cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
+       cl_free(to_mcq(cq));
+
+       return 0;
+}
+
+static int align_queue_size(struct ibv_context *context, int size, int spare)
+{
+       int ret;
+
+       /*
+        * If someone asks for a 0-sized queue, presumably they're not
+        * going to use it.  So don't mess with their size.
+        */
+       if (!size)
+               return 0;
+
+       if (mthca_is_memfree(context)) {
+               for (ret = 1; ret < size + spare; ret <<= 1)
+                       ; /* nothing */
+
+               return ret;
+       } else
+               return size + spare;
+}
+
+#ifdef WIN_TO_BE_CHANGED
+struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
+                                struct ibv_srq_init_attr *attr)
+{
+       struct mthca_create_srq      cmd;
+       struct mthca_create_srq_resp resp;
+       struct mthca_srq            *srq;
+       int                          ret;
+
+       /* Sanity check SRQ size before proceeding */
+       if (attr->attr.max_wr > 16 << 20 || attr->attr.max_sge > 64)
+               return NULL;
+
+       srq = cl_malloc(sizeof *srq);
+       if (!srq)
+               return NULL;
+
+       cl_spinlock_construct(&srq->lock);
+       if (cl_spinlock_init(&srq->lock))
+               goto err;
+
+       srq->max     = align_queue_size(pd->context, attr->attr.max_wr, 1);
+       srq->max_gs  = attr->attr.max_sge;
+       srq->counter = 0;
+
+       if (mthca_alloc_srq_buf(pd, &attr->attr, srq))
+               goto err;
+
+       srq->mr = __mthca_reg_mr(pd, srq->buf, srq->buf_size, 0, 0);
+       if (!srq->mr)
+               goto err_free;
+
+       srq->mr->context = pd->context;
+
+       if (mthca_is_memfree(pd->context)) {
+               srq->db_index = mthca_alloc_db(to_mctx(pd->context)->db_tab,
+                                              MTHCA_DB_TYPE_SRQ, &srq->db);
+               if (srq->db_index < 0)
+                       goto err_unreg;
+
+               cmd.db_page  = db_align(srq->db);
+               cmd.db_index = srq->db_index;
+       }
+
+       cmd.lkey = srq->mr->lkey;
+
+#ifdef WIN_TO_BE_CHANGED
+       ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
+                                &cmd.ibv_cmd, sizeof cmd,
+                                &resp.ibv_resp, sizeof resp);
+       if (ret)
+               goto err_db;
+
+       srq->srqn = resp.srqn;
+
+       if (mthca_is_memfree(pd->context))
+               mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn);
+
+       return &srq->ibv_srq;
+
+err_db:
+#endif
+       if (mthca_is_memfree(pd->context))
+               mthca_free_db(to_mctx(pd->context)->db_tab, MTHCA_DB_TYPE_SRQ,
+                             srq->db_index);
+
+err_unreg:
+       mthca_dereg_mr(srq->mr);
+
+err_free:
+       cl_free(srq->wrid);
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       cl_free(srq->buf);
+#else
+       VirtualFree( srq->buf, 0, MEM_RELEASE);
+#endif
+
+err:
+       cl_free(srq);
+
+       return NULL;
+}
+
+int mthca_modify_srq(struct ibv_srq *srq,
+                    struct ibv_srq_attr *attr,
+                    enum ibv_srq_attr_mask attr_mask)
+{
+       struct ibv_modify_srq cmd;
+
+#ifdef WIN_TO_BE_CHANGED
+       return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
+#else
+       return -ENOSYS;
+#endif
+}
+
+int mthca_destroy_srq(struct ibv_srq *srq)
+{
+       int ret;
+
+#ifdef WIN_TO_BE_CHANGED
+       ret = ibv_cmd_destroy_srq(srq);
+       if (ret)
+               return ret;
+#endif
+
+       if (mthca_is_memfree(srq->context))
+               mthca_free_db(to_mctx(srq->context)->db_tab, MTHCA_DB_TYPE_SRQ,
+                             to_msrq(srq)->db_index);
+
+       mthca_dereg_mr(to_msrq(srq)->mr);
+
+       cl_spinlock_destroy(&((struct mthca_srq *)srq)->lock);
+
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       cl_free(to_msrq(srq)->buf);
+#else
+       VirtualFree( to_msrq(srq)->buf, 0, MEM_RELEASE);
+#endif
+       cl_free(to_msrq(srq)->wrid);
+       cl_free(to_msrq(srq));
+
+       return 0;
+}
+#endif
+
+struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
+       struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
+{
+       struct mthca_qp       *qp;
+       struct ibv_context *context = pd->context;
+       int                    ret;
+
+       UVP_ENTER(UVP_DBG_QP);
+       /* Sanity check QP size before proceeding */
+       if (attr->cap.max_send_wr     > 65536 ||
+           attr->cap.max_recv_wr     > 65536 ||
+           attr->cap.max_send_sge    > 64    ||
+           attr->cap.max_recv_sge    > 64    ||
+           attr->cap.max_inline_data > 1024) {
+               ret = -EINVAL;
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks  failed (%d)\n",ret));
+               goto exit;
+               }
+
+       qp = cl_malloc(sizeof *qp);
+       if (!qp) {
+               ret = -ENOMEM;
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
+               goto err_nomem;
+       }       
+
+       qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
+       qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
+
+       if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
+               ret = -ENOMEM;
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
+               goto err_nomem;
+       } 
+
+       mthca_init_qp_indices(qp);
+
+       cl_spinlock_construct(&qp->sq.lock);
+       cl_spinlock_construct(&qp->rq.lock);
+       if (cl_spinlock_init(&qp->sq.lock) || cl_spinlock_init(&qp->rq.lock)) {
+               ret = -EFAULT;
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed (%d)\n",ret));
+               goto err_spinlock;
+       }
+
+       if (mthca_is_memfree(context)) {
+               qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
+                                                MTHCA_DB_TYPE_SQ,
+                                                &qp->sq.db);
+               if (qp->sq.db_index < 0)
+                       goto err_spinlock;
+
+               qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
+                                                MTHCA_DB_TYPE_RQ,
+                                                &qp->rq.db);
+               if (qp->rq.db_index < 0)
+                       goto err_sq_db;
+
+               req->sq_db_page  = db_align(qp->sq.db);
+               req->rq_db_page  = db_align(qp->rq.db);
+               req->sq_db_index = qp->sq.db_index;
+               req->rq_db_index = qp->rq.db_index;
+       }
+
+       // fill the rest qp fields
+       qp->ibv_qp      .pd = pd;
+       qp->ibv_qp.send_cq = attr->send_cq;
+       qp->ibv_qp.recv_cq = attr->recv_cq;
+       qp->ibv_qp.srq = attr->srq;
+       qp->ibv_qp.state = IBV_QPS_RESET;
+       qp->ibv_qp.qp_type = attr->qp_type;
+
+       // fill the rest request fields
+       req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
+       req->mr.length = qp->buf_size;
+       req->mr.hca_va = 0;
+       req->mr.pd_handle    = pd->handle;
+       req->mr.pdn = to_mpd(pd)->pdn;
+       req->mr.access_flags = 0;       //local read
+       req->user_handle = (uint64_t)(ULONG_PTR)qp;
+       req->send_cq_handle = attr->send_cq->handle;
+       req->recv_cq_handle = attr->recv_cq->handle;
+       req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
+       req->max_send_wr = attr->cap.max_send_wr;
+       req->max_recv_wr = attr->cap.max_recv_wr;
+       req->max_send_sge = attr->cap.max_send_sge;
+       req->max_recv_sge = attr->cap.max_recv_sge;
+       req->max_inline_data = attr->cap.max_inline_data;
+       req->sq_sig_all = (uint8_t)attr->sq_sig_all;
+       req->qp_type = attr->qp_type;
+       req->is_srq = !!attr->srq;
+
+
+       UVP_EXIT(UVP_DBG_QP);
+       return &qp->ibv_qp;
+
+err_sq_db:
+       if (mthca_is_memfree(context))
+               mthca_free_db(to_mctx(context)->db_tab, 
+                       MTHCA_DB_TYPE_SQ, qp->sq.db_index);
+
+err_spinlock:
+       cl_free(qp->wrid);
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       cl_free(qp->buf);
+#else
+       VirtualFree( qp->buf, 0, MEM_RELEASE);
+#endif
+
+err_nomem:
+       cl_free(qp);
+
+exit:
+       
+       UVP_EXIT(UVP_DBG_QP);
+       return ERR_PTR(ret);
+}
+
+struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
+       struct ibv_create_qp_resp *resp)
+{
+       struct mthca_qp       *qp;
+       int                    ret;
+       UVP_ENTER(UVP_DBG_QP);
+       qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
+
+       qp->ibv_qp.handle                       = resp->qp_handle;
+       qp->ibv_qp.qp_num               = resp->qpn;
+       qp->sq.max                              = resp->max_send_wr;
+       qp->rq.max                              = resp->max_recv_wr;
+       qp->sq.max_gs                   = resp->max_send_sge;
+       qp->rq.max_gs                   = resp->max_recv_sge;
+       qp->max_inline_data     = resp->max_inline_data;
+       qp->mr.handle = resp->mr.mr_handle;
+       qp->mr.lkey = resp->mr.lkey;
+       qp->mr.rkey = resp->mr.rkey;
+       qp->mr.pd = pd;
+       qp->mr.context = pd->context;
+
+       if (mthca_is_memfree(pd->context)) {
+               mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
+               mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
+       }
+
+       ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
+       if (ret)
+               goto err_store_qp;
+
+       UVP_EXIT(UVP_DBG_QP);
+       return &qp->ibv_qp;
+
+err_store_qp:
+       UVP_EXIT(UVP_DBG_QP);
+       return ERR_PTR(ret);
+}
+
+
+int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+                   enum ibv_qp_attr_mask attr_mask)
+{
+       int ret = 0;
+
+       if (attr_mask & IBV_QP_STATE)
+               qp->state = attr->qp_state;
+
+       if ((attr_mask & IBV_QP_STATE) &&
+           (attr->qp_state == IBV_QPS_RESET)) {
+               mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
+                              qp->srq ? to_msrq(qp->srq) : NULL);
+               if (qp->send_cq != qp->recv_cq)
+                       mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
+
+               mthca_init_qp_indices(to_mqp(qp));
+
+               if (mthca_is_memfree(qp->pd->context)) {
+                       *to_mqp(qp)->sq.db = 0;
+                       *to_mqp(qp)->rq.db = 0;
+               }
+       }
+
+       return ret;
+}
+
+int mthca_destroy_qp(struct ibv_qp *qp)
+{
+       int ret;
+
+       mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
+                      qp->srq ? to_msrq(qp->srq) : NULL);
+       if (qp->send_cq != qp->recv_cq)
+               mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
+
+       cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
+       if (qp->send_cq != qp->recv_cq)
+               cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
+       mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
+       if (qp->send_cq != qp->recv_cq)
+               cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
+       cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
+
+       if (mthca_is_memfree(qp->pd->context)) {
+               mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
+                             to_mqp(qp)->rq.db_index);
+               mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
+                             to_mqp(qp)->sq.db_index);
+       }
+
+       cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
+       cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
+
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       cl_free(to_mqp(qp)->buf);
+#else
+       VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
+#endif
+       cl_free(to_mqp(qp)->wrid);
+       cl_free(to_mqp(qp));
+
+       return 0;
+}
+
+
+int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req)
+{
+       void *buf;
+
+       if (posix_memalign(&buf, g_page_size, g_page_size)) 
+               return -ENOMEM;
+       
+       req->mr.start = (uint64_t)(ULONG_PTR)buf;
+       req->mr.length = g_page_size;
+       req->mr.hca_va = (uint64_t)(ULONG_PTR)buf;
+       req->mr.pd_handle    = pd->handle;
+       req->mr.pdn = to_mpd(pd)->pdn;
+       req->mr.access_flags = 0;       //local read
+       return 0;
+}
+
+struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd, 
+       struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp)
+{
+       struct mthca_ah *ah;
+
+       ah = cl_malloc(sizeof *ah);
+       if (!ah)
+               return NULL;
+
+       if (mthca_alloc_av(to_mpd(pd), attr, ah, resp)) {
+               cl_free(ah);
+               return NULL;
+       }
+       ah->ibv_ah.pd = pd;
+
+       return &ah->ibv_ah;
+}
+
+int mthca_destroy_ah(struct ibv_ah *ah)
+{
+       mthca_free_av(to_mah(ah));
+       cl_free(to_mah(ah));
+
+       return 0;
+}
+
+int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
+{
+#ifdef WIN_TO_BE_CHANGED
+       return ibv_cmd_attach_mcast(qp, gid, lid);
+#else
+       return -ENOSYS;
+#endif
+}
+
+int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
+{
+#ifdef WIN_TO_BE_CHANGED
+       return ibv_cmd_detach_mcast(qp, gid, lid);
+#else
+       return -ENOSYS;
+#endif
+}
diff --git a/trunk/hw/mthca/user/mlnx_uvp_verbs.h b/trunk/hw/mthca/user/mlnx_uvp_verbs.h
new file mode 100644 (file)
index 0000000..e1dee1a
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: verbs.h 3799 2005-10-17 22:40:49Z roland $
+ */
+
+#ifndef MLNX_UVP_VERBS_H
+#define MLNX_UVP_VERBS_H
+
+#include <iba/ib_types.h>
+
+#ifdef __cplusplus
+#  define BEGIN_C_DECLS extern "C" {
+#  define END_C_DECLS   }
+#else /* !__cplusplus */
+#  define BEGIN_C_DECLS
+#  define END_C_DECLS
+#endif /* __cplusplus */
+
+BEGIN_C_DECLS
+
+union ibv_gid {
+       uint8_t                 raw[16];
+       struct {
+               uint64_t        subnet_prefix;
+               uint64_t        interface_id;
+       } global;
+};
+
+enum ibv_node_type {
+       IBV_NODE_CA     = 1,
+       IBV_NODE_SWITCH,
+       IBV_NODE_ROUTER
+};
+
+enum ibv_device_cap_flags {
+       IBV_DEVICE_RESIZE_MAX_WR        = 1,
+       IBV_DEVICE_BAD_PKEY_CNTR        = 1 <<  1,
+       IBV_DEVICE_BAD_QKEY_CNTR        = 1 <<  2,
+       IBV_DEVICE_RAW_MULTI            = 1 <<  3,
+       IBV_DEVICE_AUTO_PATH_MIG        = 1 <<  4,
+       IBV_DEVICE_CHANGE_PHY_PORT      = 1 <<  5,
+       IBV_DEVICE_UD_AV_PORT_ENFORCE   = 1 <<  6,
+       IBV_DEVICE_CURR_QP_STATE_MOD    = 1 <<  7,
+       IBV_DEVICE_SHUTDOWN_PORT        = 1 <<  8,
+       IBV_DEVICE_INIT_TYPE            = 1 <<  9,
+       IBV_DEVICE_PORT_ACTIVE_EVENT    = 1 << 10,
+       IBV_DEVICE_SYS_IMAGE_GUID       = 1 << 11,
+       IBV_DEVICE_RC_RNR_NAK_GEN       = 1 << 12,
+       IBV_DEVICE_SRQ_RESIZE           = 1 << 13,
+       IBV_DEVICE_N_NOTIFY_CQ          = 1 << 14,
+};
+
+enum ibv_atomic_cap {
+       IBV_ATOMIC_NONE,
+       IBV_ATOMIC_HCA,
+       IBV_ATOMIC_GLOB
+};
+
+struct ibv_device_attr {
+       char                    fw_ver[64];
+       uint64_t                node_guid;
+       uint64_t                sys_image_guid;
+       uint64_t                max_mr_size;
+       uint64_t                page_size_cap;
+       uint32_t                vendor_id;
+       uint32_t                vendor_part_id;
+       uint32_t                hw_ver;
+       int                     max_qp;
+       int                     max_qp_wr;
+       int                     device_cap_flags;
+       int                     max_sge;
+       int                     max_sge_rd;
+       int                     max_cq;
+       int                     max_cqe;
+       int                     max_mr;
+       int                     max_pd;
+       int                     max_qp_rd_atom;
+       int                     max_ee_rd_atom;
+       int                     max_res_rd_atom;
+       int                     max_qp_init_rd_atom;
+       int                     max_ee_init_rd_atom;
+       enum ibv_atomic_cap     atomic_cap;
+       int                     max_ee;
+       int                     max_rdd;
+       int                     max_mw;
+       int                     max_raw_ipv6_qp;
+       int                     max_raw_ethy_qp;
+       int                     max_mcast_grp;
+       int                     max_mcast_qp_attach;
+       int                     max_total_mcast_qp_attach;
+       int                     max_ah;
+       int                     max_fmr;
+       int                     max_map_per_fmr;
+       int                     max_srq;
+       int                     max_srq_wr;
+       int                     max_srq_sge;
+       uint16_t                max_pkeys;
+       uint8_t                 local_ca_ack_delay;
+       uint8_t                 phys_port_cnt;
+};
+
+enum ibv_mtu {
+       IBV_MTU_256  = 1,
+       IBV_MTU_512  = 2,
+       IBV_MTU_1024 = 3,
+       IBV_MTU_2048 = 4,
+       IBV_MTU_4096 = 5
+};
+
+enum ibv_port_state {
+       IBV_PORT_NOP            = 0,
+       IBV_PORT_DOWN           = 1,
+       IBV_PORT_INIT           = 2,
+       IBV_PORT_ARMED          = 3,
+       IBV_PORT_ACTIVE         = 4,
+       IBV_PORT_ACTIVE_DEFER   = 5
+};
+
+struct ibv_port_attr {
+       enum ibv_port_state     state;
+       enum ibv_mtu            max_mtu;
+       enum ibv_mtu            active_mtu;
+       int                     gid_tbl_len;
+       uint32_t                port_cap_flags;
+       uint32_t                max_msg_sz;
+       uint32_t                bad_pkey_cntr;
+       uint32_t                qkey_viol_cntr;
+       uint16_t                pkey_tbl_len;
+       uint16_t                lid;
+       uint16_t                sm_lid;
+       uint8_t                 lmc;
+       uint8_t                 max_vl_num;
+       uint8_t                 sm_sl;
+       uint8_t                 subnet_timeout;
+       uint8_t                 init_type_reply;
+       uint8_t                 active_width;
+       uint8_t                 active_speed;
+       uint8_t                 phys_state;
+};
+
+enum ibv_event_type {
+       IBV_EVENT_CQ_ERR,
+       IBV_EVENT_QP_FATAL,
+       IBV_EVENT_QP_REQ_ERR,
+       IBV_EVENT_QP_ACCESS_ERR,
+       IBV_EVENT_COMM_EST,
+       IBV_EVENT_SQ_DRAINED,
+       IBV_EVENT_PATH_MIG,
+       IBV_EVENT_PATH_MIG_ERR,
+       IBV_EVENT_DEVICE_FATAL,
+       IBV_EVENT_PORT_ACTIVE,
+       IBV_EVENT_PORT_ERR,
+       IBV_EVENT_LID_CHANGE,
+       IBV_EVENT_PKEY_CHANGE,
+       IBV_EVENT_SM_CHANGE,
+       IBV_EVENT_SRQ_ERR,
+       IBV_EVENT_SRQ_LIMIT_REACHED,
+       IBV_EVENT_QP_LAST_WQE_REACHED
+};
+
+struct ibv_async_event {
+       union {
+               struct ibv_cq  *cq;
+               struct ibv_qp  *qp;
+               struct ibv_srq *srq;
+               int             port_num;
+       } element;
+       enum ibv_event_type     event_type;
+};
+
+enum ibv_access_flags {
+       IBV_ACCESS_LOCAL_WRITE          = 1,
+       IBV_ACCESS_REMOTE_WRITE         = (1<<1),
+       IBV_ACCESS_REMOTE_READ          = (1<<2),
+       IBV_ACCESS_REMOTE_ATOMIC        = (1<<3),
+       IBV_ACCESS_MW_BIND              = (1<<4)
+};
+
+struct ibv_pd {
+       struct ibv_context     *context;
+       uint64_t                handle;
+};
+
+struct ibv_mr {
+       struct ibv_context     *context;
+       struct ibv_pd          *pd;
+       uint64_t                handle;
+       uint32_t                lkey;
+       uint32_t                rkey;
+};
+
+struct ibv_global_route {
+       ib_gid_t                dgid;
+       uint32_t                flow_label;
+       uint8_t                 sgid_index;
+       uint8_t                 hop_limit;
+       uint8_t                 traffic_class;
+};
+
+struct ibv_ah_attr {
+       struct ibv_global_route grh;
+       uint16_t                dlid;
+       uint8_t                 sl;
+       uint8_t                 src_path_bits;
+       uint8_t                 static_rate;
+       uint8_t                 is_global;
+       uint8_t                 port_num;
+};
+
+
+enum ib_cq_notify {
+       IB_CQ_SOLICITED,
+       IB_CQ_NEXT_COMP
+};
+
+enum ibv_srq_attr_mask {
+       IBV_SRQ_MAX_WR  = 1 << 0,
+       IBV_SRQ_LIMIT   = 1 << 1,
+};
+
+struct ibv_srq_attr {
+       uint32_t                max_wr;
+       uint32_t                max_sge;
+       uint32_t                srq_limit;
+};
+
+struct ibv_srq_init_attr {
+       void                   *srq_context;
+       struct ibv_srq_attr     attr;
+};
+
+struct ibv_qp_cap {
+       uint32_t                max_send_wr;
+       uint32_t                max_recv_wr;
+       uint32_t                max_send_sge;
+       uint32_t                max_recv_sge;
+       uint32_t                max_inline_data;
+};
+
+struct ibv_qp_init_attr {
+       void                   *qp_context;
+       struct ibv_cq          *send_cq;
+       struct ibv_cq          *recv_cq;
+       struct ibv_srq         *srq;
+       struct ibv_qp_cap       cap;
+       ib_qp_type_t    qp_type;
+       int                     sq_sig_all;
+};
+
+enum ibv_qp_attr_mask {
+       IBV_QP_STATE                    = 1 <<  0,
+       IBV_QP_CUR_STATE                = 1 <<  1,
+       IBV_QP_EN_SQD_ASYNC_NOTIFY      = 1 <<  2,
+       IBV_QP_ACCESS_FLAGS             = 1 <<  3,
+       IBV_QP_PKEY_INDEX               = 1 <<  4,
+       IBV_QP_PORT                     = 1 <<  5,
+       IBV_QP_QKEY                     = 1 <<  6,
+       IBV_QP_AV                       = 1 <<  7,
+       IBV_QP_PATH_MTU                 = 1 <<  8,
+       IBV_QP_TIMEOUT                  = 1 <<  9,
+       IBV_QP_RETRY_CNT                = 1 << 10,
+       IBV_QP_RNR_RETRY                = 1 << 11,
+       IBV_QP_RQ_PSN                   = 1 << 12,
+       IBV_QP_MAX_QP_RD_ATOMIC         = 1 << 13,
+       IBV_QP_ALT_PATH                 = 1 << 14,
+       IBV_QP_MIN_RNR_TIMER            = 1 << 15,
+       IBV_QP_SQ_PSN                   = 1 << 16,
+       IBV_QP_MAX_DEST_RD_ATOMIC       = 1 << 17,
+       IBV_QP_PATH_MIG_STATE           = 1 << 18,
+       IBV_QP_CAP                      = 1 << 19,
+       IBV_QP_DEST_QPN                 = 1 << 20
+};
+
+enum ibv_qp_state {
+       IBV_QPS_RESET,
+       IBV_QPS_INIT,
+       IBV_QPS_RTR,
+       IBV_QPS_RTS,
+       IBV_QPS_SQD,
+       IBV_QPS_SQE,
+       IBV_QPS_ERR
+};
+
+enum ibv_mig_state {
+       IBV_MIG_MIGRATED,
+       IBV_MIG_REARM,
+       IBV_MIG_ARMED
+};
+
+struct ibv_qp_attr {
+       enum ibv_qp_state       qp_state;
+       enum ibv_qp_state       cur_qp_state;
+       enum ibv_mtu            path_mtu;
+       enum ibv_mig_state      path_mig_state;
+       uint32_t                qkey;
+       uint32_t                rq_psn;
+       uint32_t                sq_psn;
+       uint32_t                dest_qp_num;
+       int                     qp_access_flags;
+       struct ibv_qp_cap       cap;
+       struct ibv_ah_attr      ah_attr;
+       struct ibv_ah_attr      alt_ah_attr;
+       uint16_t                pkey_index;
+       uint16_t                alt_pkey_index;
+       uint8_t                 en_sqd_async_notify;
+       uint8_t                 sq_draining;
+       uint8_t                 max_rd_atomic;
+       uint8_t                 max_dest_rd_atomic;
+       uint8_t                 min_rnr_timer;
+       uint8_t                 port_num;
+       uint8_t                 timeout;
+       uint8_t                 retry_cnt;
+       uint8_t                 rnr_retry;
+       uint8_t                 alt_port_num;
+       uint8_t                 alt_timeout;
+};
+
+
+enum ibv_send_flags {
+       IBV_SEND_FENCE          = 1 << 0,
+       IBV_SEND_SIGNALED       = 1 << 1,
+       IBV_SEND_SOLICITED      = 1 << 2,
+       IBV_SEND_INLINE         = 1 << 3
+};
+
+struct ibv_sge {
+       uint64_t                addr;
+       uint32_t                length;
+       uint32_t                lkey;
+};
+
+struct ibv_send_wr {
+       struct ibv_send_wr     *next;
+       uint64_t                wr_id;
+       struct ibv_sge         *sg_list;
+       int                     num_sge;
+       enum ibv_wr_opcode      opcode;
+       enum ibv_send_flags     send_flags;
+       uint32_t                imm_data;               /* in network byte order */
+       union {
+               struct {
+                       uint64_t        remote_addr;
+                       uint32_t        rkey;
+               } rdma;
+               struct {
+                       uint64_t        remote_addr;
+                       uint64_t        compare_add;
+                       uint64_t        swap;
+                       uint32_t        rkey;
+               } atomic;
+               struct {
+                       struct ibv_ah  *ah;
+                       uint32_t        remote_qpn;
+                       uint32_t        remote_qkey;
+               } ud;
+       } wr;
+};
+
+struct ibv_recv_wr {
+       struct ibv_recv_wr     *next;
+       uint64_t                wr_id;
+       struct ibv_sge         *sg_list;
+       int                     num_sge;
+};
+
+typedef enum MTHCA_QP_ACCESS_FLAGS {
+       MTHCA_ACCESS_LOCAL_WRITE        = 1,
+       MTHCA_ACCESS_REMOTE_WRITE       = (1<<1),
+       MTHCA_ACCESS_REMOTE_READ        = (1<<2),
+       MTHCA_ACCESS_REMOTE_ATOMIC      = (1<<3),
+       MTHCA_ACCESS_MW_BIND    = (1<<4)
+} mthca_qp_access_t;
+
+
+struct ibv_srq {
+       struct ibv_context     *context;
+       void                   *srq_context;
+       struct ibv_pd          *pd; 
+       uint64_t                handle;
+       HANDLE          mutex;
+
+#ifdef WIN_TO_BE_CHANGED       
+       pthread_cond_t          cond;
+       uint32_t                events_completed;
+#endif
+};
+
+struct ibv_qp {
+       struct ibv_pd          *pd; 
+       struct ibv_cq          *send_cq;
+       struct ibv_cq          *recv_cq;
+       struct ibv_srq         *srq;
+       uint64_t                handle;
+       uint32_t                qp_num;
+       enum ibv_qp_state       state;
+       ib_qp_type_t    qp_type;
+
+#ifdef WIN_TO_BE_CHANGED       
+       struct ibv_context     *context;
+       HANDLE          mutex;
+       void                   *qp_context;
+       pthread_cond_t          cond;
+       uint32_t                events_completed;
+#endif
+};
+
+struct ibv_cq {
+       uint64_t                handle;
+       int                     cqe;
+       struct ibv_context               *context;
+};
+
+struct ibv_ah {
+       struct ibv_pd *pd;
+};
+
+struct ibv_context_ops {
+       int                     (*query_device)(struct ibv_context *context,
+                                             struct ibv_device_attr *device_attr);
+       int                     (*query_port)(struct ibv_context *context, uint8_t port_num,
+                                             struct ibv_port_attr *port_attr);
+       struct ibv_pd *         (*alloc_pd)(struct ibv_context *context, struct ibv_alloc_pd_resp *resp_p);
+       int                     (*dealloc_pd)(struct ibv_pd *pd);
+       struct ibv_mr *         (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
+                                         enum ibv_access_flags access);
+       int                     (*dereg_mr)(struct ibv_mr *mr);
+       struct ibv_cq * (*create_cq_pre)(struct ibv_context *context, int *cqe,
+                              struct ibv_create_cq *req);
+       struct ibv_cq * (*create_cq_post)(struct ibv_context *context, 
+                              struct ibv_create_cq_resp *resp);
+       int                     (*poll_cq)(struct ibv_cq *cq, int num_entries, struct _ib_wc *wc);
+       int                     (*poll_cq_list)( struct ibv_cq *ibcq, 
+               struct _ib_wc** const                   pp_free_wclist,
+               struct _ib_wc** const                   pp_done_wclist );
+       int                     (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
+       void                    (*cq_event)(struct ibv_cq *cq);
+       int                     (*destroy_cq)(struct ibv_cq *cq);
+       struct ibv_srq *        (*create_srq)(struct ibv_pd *pd,
+                                             struct ibv_srq_init_attr *srq_init_attr);
+       int                     (*modify_srq)(struct ibv_srq *srq,
+                                             struct ibv_srq_attr *srq_attr,
+                                             enum ibv_srq_attr_mask srq_attr_mask);
+       int                     (*destroy_srq)(struct ibv_srq *srq);
+       int                     (*post_srq_recv)(struct ibv_srq *srq,
+                                                struct _ib_recv_wr *recv_wr,
+                                                struct _ib_recv_wr **bad_recv_wr);
+       struct ibv_qp *(*create_qp_pre)(struct ibv_pd *pd, 
+               struct ibv_qp_init_attr *attr, struct ibv_create_qp *req);
+       struct ibv_qp *(*create_qp_post)(struct ibv_pd *pd, 
+               struct ibv_create_qp_resp *resp);
+       int                     (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+                                            enum ibv_qp_attr_mask attr_mask);
+       int                     (*destroy_qp)(struct ibv_qp *qp);
+       int                     (*post_send)(struct ibv_qp *qp, struct _ib_send_wr *wr,
+                                            struct _ib_send_wr **bad_wr);
+       int                     (*post_recv)(struct ibv_qp *qp, struct _ib_recv_wr *wr,
+                                            struct _ib_recv_wr **bad_wr);
+       int (*create_ah_pre)(struct ibv_pd *pd, struct ibv_create_ah *req);
+       struct ibv_ah *(*create_ah_post)(struct ibv_pd *pd, 
+               struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp);
+       int                     (*destroy_ah)(struct ibv_ah *ah);
+       int                     (*attach_mcast)(struct ibv_qp *qp, union ibv_gid *gid,
+                                               uint16_t lid);
+       int                     (*detach_mcast)(struct ibv_qp *qp, union ibv_gid *gid,
+                                               uint16_t lid);
+};
+
+struct ibv_context {
+       struct ibv_context_ops     ops;
+       void                      *abi_compat;
+};
+
+END_C_DECLS
+
+#endif /* INFINIBAND_VERBS_H */
diff --git a/trunk/hw/mthca/user/mlnx_uvp_wqe.h b/trunk/hw/mthca/user/mlnx_uvp_wqe.h
new file mode 100644 (file)
index 0000000..1d9900d
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: wqe.h 4214 2005-11-29 17:43:08Z roland $
+ */
+
+#ifndef WQE_H
+#define WQE_H
+
+enum {
+       MTHCA_SEND_DOORBELL     = 0x10,
+       MTHCA_RECV_DOORBELL     = 0x18
+};
+
+enum {
+       MTHCA_NEXT_DBD       = 1 << 7,
+       MTHCA_NEXT_FENCE     = 1 << 6,
+       MTHCA_NEXT_CQ_UPDATE = 1 << 3,
+       MTHCA_NEXT_EVENT_GEN = 1 << 2,
+       MTHCA_NEXT_SOLICIT   = 1 << 1,
+};
+
+enum {
+       MTHCA_INLINE_SEG = 1 << 31
+};
+
+enum {
+       MTHCA_INVAL_LKEY                        = 0x100,
+       MTHCA_TAVOR_MAX_WQES_PER_RECV_DB        = 256,
+       MTHCA_ARBEL_MAX_WQES_PER_SEND_DB        = 255
+};
+
+struct mthca_next_seg {
+       uint32_t        nda_op; /* [31:6] next WQE [4:0] next opcode */
+       uint32_t        ee_nds; /* [31:8] next EE  [7] DBD [6] F [5:0] next WQE size */
+       uint32_t        flags;  /* [3] CQ [2] Event [1] Solicit */
+       uint32_t        imm;    /* immediate data */
+};
+
+struct mthca_tavor_ud_seg {
+       uint32_t        reserved1;
+       uint32_t        lkey;
+       uint64_t        av_addr;
+       uint32_t        reserved2[4];
+       uint32_t        dqpn;
+       uint32_t        qkey;
+       uint32_t        reserved3[2];
+};
+
+struct mthca_arbel_ud_seg {
+       uint32_t        av[8];
+       uint32_t        dqpn;
+       uint32_t        qkey;
+       uint32_t        reserved[2];
+};
+
+struct mthca_bind_seg {
+       uint32_t        flags;  /* [31] Atomic [30] rem write [29] rem read */
+       uint32_t        reserved;
+       uint32_t        new_rkey;
+       uint32_t        lkey;
+       uint64_t        addr;
+       uint64_t        length;
+};
+
+struct mthca_raddr_seg {
+       uint64_t        raddr;
+       uint32_t        rkey;
+       uint32_t        reserved;
+};
+
+struct mthca_atomic_seg {
+       uint64_t        swap_add;
+       uint64_t        compare;
+};
+
+struct mthca_data_seg {
+       uint32_t        byte_count;
+       uint32_t        lkey;
+       uint64_t        addr;
+};
+
+struct mthca_inline_seg {
+       uint32_t        byte_count;
+};
+
+#endif /* WQE_H */
diff --git a/trunk/hw/mthca/user/mt_l2w.h b/trunk/hw/mthca/user/mt_l2w.h
new file mode 100644 (file)
index 0000000..e9a572a
--- /dev/null
@@ -0,0 +1,114 @@
+#ifndef UMT_L2W_H
+#define UMT_L2W_H
+
+// ===========================================
+// INCLUDES
+// ===========================================
+
+// OS
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+//#include <stddef.h>
+#include <errno.h>
+#include <complib/cl_memory.h>
+//#include <malloc.h>
+
+
+// ===========================================
+// SUBSTITUTIONS
+// ===========================================
+
+#define inline __inline
+#define likely(x)                      (x)
+#define unlikely(x)                    (x)
+
+// ===========================================
+// LITERALS
+// ===========================================
+
+
+
+// ===========================================
+// TYPES
+// ===========================================
+
+
+// ===========================================
+// MACROS
+// ===========================================
+
+// nullifying macros
+
+#define ERR_PTR(error)         ((void*)(LONG_PTR)(error))
+#define PTR_ERR(ptr)                   ((long)(LONG_PTR)(void*)(ptr))
+//TODO: there are 2 assumptions here:
+// - pointer can't be too big (around -1)
+// - error can't be bigger than 1000
+#define IS_ERR(ptr)                            ((ULONG_PTR)ptr > (ULONG_PTR)-1000L)
+
+//-------------------------------------------------------
+// from mt_bitmap.h
+
+#define BITS_PER_LONG          32
+#define BITS_TO_LONGS(bits) \
+       (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+
+/**
+* _ffs - find the first one bit in a word
+* @addr: The address to start the search at
+* @offset: The bitnumber to start searching at
+*
+* returns: 0 - if not found or N+1, if found Nth bit
+*/
+static inline int _ffs(const unsigned long *addr, int offset)
+{
+       //TODO: not an effective code - is better in Assembler
+       int mask = 1 << offset;
+       int rbc = BITS_PER_LONG - offset;
+       int ix;
+       for (ix=0; ix<rbc; ix++, mask<<=1) {
+               if (*addr & mask)
+                       return offset + ix + 1;
+       }
+       return 0;
+}
+
+#define ffs(val)               _ffs(&val,0)
+#define ffsl(val)      ffs(val)
+
+extern size_t g_page_size;
+
+static inline int posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+       // sanity checks
+       if (alignment % sizeof(void*))
+               return EINVAL;
+       if (alignment < g_page_size) {
+               fprintf(stderr, "mthca: Fatal (posix_memalign): alignment too small - %d \n",  alignment );
+               return EINVAL;
+       }
+
+       // allocation
+       *memptr = cl_malloc(size);
+       if (*memptr) 
+               return 0;
+       else    
+               return ENOMEM;
+#else
+       *memptr = VirtualAlloc( NULL, size, MEM_COMMIT | MEM_RESERVE,  PAGE_READWRITE );
+       if (*memptr) 
+               return 0;
+       else    
+               return ENOMEM;
+#endif
+}
+
+// ===========================================
+// FUNCTIONS
+// ===========================================
+
+
+#endif
+
diff --git a/trunk/hw/mthca/user/opcode.h b/trunk/hw/mthca/user/opcode.h
new file mode 100644 (file)
index 0000000..50dad70
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: opcode.h 1989 2005-03-14 20:25:13Z roland $
+ */
+
+#ifndef INFINIBAND_OPCODE_H
+#define INFINIBAND_OPCODE_H
+
+/*
+ * This macro cleans up the definitions of constants for BTH opcodes.
+ * It is used to define constants such as IBV_OPCODE_UD_SEND_ONLY,
+ * which becomes IBV_OPCODE_UD + IBV_OPCODE_SEND_ONLY, and this gives
+ * the correct value.
+ *
+ * In short, user code should use the constants defined using the
+ * macro rather than worrying about adding together other constants.
+*/
+#define IBV_OPCODE(transport, op) \
+       IBV_OPCODE_ ## transport ## _ ## op = \
+               IBV_OPCODE_ ## transport + IBV_OPCODE_ ## op
+
+enum {
+       /* transport types -- just used to define real constants */
+       IBV_OPCODE_RC                                = 0x00,
+       IBV_OPCODE_UC                                = 0x20,
+       IBV_OPCODE_RD                                = 0x40,
+       IBV_OPCODE_UD                                = 0x60,
+
+       /* operations -- just used to define real constants */
+       IBV_OPCODE_SEND_FIRST                        = 0x00,
+       IBV_OPCODE_SEND_MIDDLE                       = 0x01,
+       IBV_OPCODE_SEND_LAST                         = 0x02,
+       IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE          = 0x03,
+       IBV_OPCODE_SEND_ONLY                         = 0x04,
+       IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE          = 0x05,
+       IBV_OPCODE_RDMA_WRITE_FIRST                  = 0x06,
+       IBV_OPCODE_RDMA_WRITE_MIDDLE                 = 0x07,
+       IBV_OPCODE_RDMA_WRITE_LAST                   = 0x08,
+       IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE    = 0x09,
+       IBV_OPCODE_RDMA_WRITE_ONLY                   = 0x0a,
+       IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE    = 0x0b,
+       IBV_OPCODE_RDMA_READ_REQUEST                 = 0x0c,
+       IBV_OPCODE_RDMA_READ_RESPONSE_FIRST          = 0x0d,
+       IBV_OPCODE_RDMA_READ_RESPONSE_MIDDLE         = 0x0e,
+       IBV_OPCODE_RDMA_READ_RESPONSE_LAST           = 0x0f,
+       IBV_OPCODE_RDMA_READ_RESPONSE_ONLY           = 0x10,
+       IBV_OPCODE_ACKNOWLEDGE                       = 0x11,
+       IBV_OPCODE_ATOMIC_ACKNOWLEDGE                = 0x12,
+       IBV_OPCODE_COMPARE_SWAP                      = 0x13,
+       IBV_OPCODE_FETCH_ADD                         = 0x14,
+
+       /* real constants follow -- see comment about above IBV_OPCODE()
+          macro for more details */
+
+       /* RC */
+       IBV_OPCODE(RC, SEND_FIRST),
+       IBV_OPCODE(RC, SEND_MIDDLE),
+       IBV_OPCODE(RC, SEND_LAST),
+       IBV_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(RC, SEND_ONLY),
+       IBV_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
+       IBV_OPCODE(RC, RDMA_WRITE_FIRST),
+       IBV_OPCODE(RC, RDMA_WRITE_MIDDLE),
+       IBV_OPCODE(RC, RDMA_WRITE_LAST),
+       IBV_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(RC, RDMA_WRITE_ONLY),
+       IBV_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+       IBV_OPCODE(RC, RDMA_READ_REQUEST),
+       IBV_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
+       IBV_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
+       IBV_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
+       IBV_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
+       IBV_OPCODE(RC, ACKNOWLEDGE),
+       IBV_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
+       IBV_OPCODE(RC, COMPARE_SWAP),
+       IBV_OPCODE(RC, FETCH_ADD),
+
+       /* UC */
+       IBV_OPCODE(UC, SEND_FIRST),
+       IBV_OPCODE(UC, SEND_MIDDLE),
+       IBV_OPCODE(UC, SEND_LAST),
+       IBV_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(UC, SEND_ONLY),
+       IBV_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
+       IBV_OPCODE(UC, RDMA_WRITE_FIRST),
+       IBV_OPCODE(UC, RDMA_WRITE_MIDDLE),
+       IBV_OPCODE(UC, RDMA_WRITE_LAST),
+       IBV_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(UC, RDMA_WRITE_ONLY),
+       IBV_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+
+       /* RD */
+       IBV_OPCODE(RD, SEND_FIRST),
+       IBV_OPCODE(RD, SEND_MIDDLE),
+       IBV_OPCODE(RD, SEND_LAST),
+       IBV_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(RD, SEND_ONLY),
+       IBV_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
+       IBV_OPCODE(RD, RDMA_WRITE_FIRST),
+       IBV_OPCODE(RD, RDMA_WRITE_MIDDLE),
+       IBV_OPCODE(RD, RDMA_WRITE_LAST),
+       IBV_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+       IBV_OPCODE(RD, RDMA_WRITE_ONLY),
+       IBV_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+       IBV_OPCODE(RD, RDMA_READ_REQUEST),
+       IBV_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
+       IBV_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
+       IBV_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
+       IBV_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
+       IBV_OPCODE(RD, ACKNOWLEDGE),
+       IBV_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
+       IBV_OPCODE(RD, COMPARE_SWAP),
+       IBV_OPCODE(RD, FETCH_ADD),
+
+       /* UD */
+       IBV_OPCODE(UD, SEND_ONLY),
+       IBV_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
+};
+
+#endif /* INFINIBAND_OPCODE_H */