]> git.openfabrics.org - ~shefty/rdma-win.git/commitdiff
[MTHCA] Implementation of fairness between eqs - each eq is limited to 10 ms.
authortzachid <tzachid@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Wed, 4 Oct 2006 12:57:02 +0000 (12:57 +0000)
committertzachid <tzachid@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Wed, 4 Oct 2006 12:57:02 +0000 (12:57 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1@514 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

trunk/hw/mthca/kernel/hca_data.h
trunk/hw/mthca/kernel/hca_driver.c
trunk/hw/mthca/kernel/mt_l2w.c
trunk/hw/mthca/kernel/mthca.inf
trunk/hw/mthca/kernel/mthca_eq.c
trunk/hw/mthca/kernel/mthca_provider.h

index e3188609e6c1a4d6fdfa26649bd46663b710cd49..66681ce29b1c360cd66bcdd0a49d2c51b416e198 100644 (file)
@@ -44,6 +44,8 @@ extern char                           mlnx_uvp_lib_name[];
 extern uint32_t                        g_skip_tavor_reset;\r
 extern uint32_t                        g_disable_tavor_reset;\r
 extern uint32_t                        g_tune_pci;\r
+extern uint32_t         g_processor_affinity;\r
+extern uint32_t         g_max_DPC_time_us;\r
 \r
 \r
 #define MLNX_MAX_HCA   4\r
index 38586cd3ed8eaf0bd5f3dbd272afe7b8bd5647ed..9fee6e99e827885baff6f3163144fb9b796be50f 100644 (file)
@@ -69,6 +69,9 @@ UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ];
 uint32_t g_skip_tavor_reset=0;         /* skip reset for Tavor cards */\r
 uint32_t g_disable_tavor_reset=1;              /* disable Tavor reset for the next driver load */\r
 uint32_t g_tune_pci=0;                         /* 0 - skip tuning PCI configuration space of HCAs */\r
+uint32_t g_processor_affinity = 0;\r
+uint32_t g_max_DPC_time_us = 10000;\r
+\r
 UNICODE_STRING                         g_param_path;\r
 \r
 \r
@@ -236,7 +239,7 @@ __read_registry(
 {\r
        NTSTATUS                                        status;\r
        /* Remember the terminating entry in the table below. */\r
-       RTL_QUERY_REGISTRY_TABLE        table[6];\r
+       RTL_QUERY_REGISTRY_TABLE        table[8];\r
 \r
        HCA_ENTER( HCA_DBG_DEV );\r
 \r
@@ -297,15 +300,30 @@ __read_registry(
        table[4].DefaultData = &g_tune_pci;\r
        table[4].DefaultLength = sizeof(ULONG);\r
 \r
+       table[5].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+       table[5].Name = L"ProcessorAffinity";\r
+       table[5].EntryContext = &g_processor_affinity;\r
+       table[5].DefaultType = REG_DWORD;\r
+       table[5].DefaultData = &g_processor_affinity;\r
+       table[5].DefaultLength = sizeof(ULONG);\r
+\r
+       table[6].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+       table[6].Name = L"MaxDpcTimeUs";\r
+       table[6].EntryContext = &g_max_DPC_time_us;\r
+       table[6].DefaultType = REG_DWORD;\r
+       table[6].DefaultData = &g_max_DPC_time_us;\r
+       table[6].DefaultLength = sizeof(ULONG);\r
+\r
        /* Have at it! */\r
        status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
                g_param_path.Buffer, table, NULL, NULL );\r
 \r
        HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_INIT, \r
-               ("debug level  %d debug flags  0x%.8x SkipTavorReset %d DisableTavorReset %d TunePci %d\n",\r
+               ("debug level  %d debug flags  0x%.8x SkipTavorReset %d DisableTavorReset %d TunePci %d"\r
+               "g_processor_affinity %d g_max_DPC_time_us%d\n",\r
                g_mthca_dbg_level ,     g_mthca_dbg_flags,\r
                g_skip_tavor_reset, g_disable_tavor_reset,\r
-               g_tune_pci ));\r
+               g_tune_pci, g_processor_affinity, g_max_DPC_time_us ));\r
 \r
        HCA_EXIT( HCA_DBG_DEV );\r
        return status;\r
index e1d9929174d310549b3c0bdcffff2174d61f41cc..43928791df462a28c5f9505ce0767b34b18968f5 100644 (file)
-#include <mt_l2w.h>
-
-pci_pool_t *
-pci_pool_create (const char *name, struct mthca_dev *mdev,
-        size_t size, size_t align, size_t allocation)
-{
-       pci_pool_t *pool;
-       UNREFERENCED_PARAMETER(align);
-       UNREFERENCED_PARAMETER(allocation);
-
-       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
-       
-       // allocation parameter is not handled yet
-       ASSERT(allocation == 0);
-
-       // allocate object
-       pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
-       if (pool == NULL) 
-               return NULL;
-
-       //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
-       // while default alloc function  - ExAllocatePoolWithTag -doesn't.
-       // But for now it is used for elements of size <= PAGE_SIZE
-       // Anyway - a sanity check:
-       ASSERT(size <= PAGE_SIZE);
-       if (size > PAGE_SIZE)
-               return NULL;
-
-       //TODO: not too effective: one can read its own alloc/free functions
-       ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
-       
-       // fill the object
-       pool->mdev = mdev;
-       pool->size = size;
-       strncpy( pool->name, name, sizeof pool->name );
-
-       return pool;            
-}
-
-// from lib/string.c
-/**
-* strlcpy - Copy a %NUL terminated string into a sized buffer
-* @dest: Where to copy the string to
-* @src: Where to copy the string from
-* @size: size of destination buffer
-*
-* Compatible with *BSD: the result is always a valid
-* NUL-terminated string that fits in the buffer (unless,
-* of course, the buffer size is zero). It does not pad
-* out the result like strncpy() does.
-*/
-SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)
-{
-        SIZE_T ret = strlen(src);
-
-        if (size) {
-                SIZE_T len = (ret >= size) ? size-1 : ret;
-                memcpy(dest, src, len);
-                dest[len] = '\0';
-        }
-        return ret;
-}
-
-
-int __bitmap_full(const unsigned long *bitmap, int bits)
-{
-       int k, lim = bits/BITS_PER_LONG;
-       for (k = 0; k < lim; ++k)
-               if (~bitmap[k])
-               return 0;
-
-       if (bits % BITS_PER_LONG)
-               if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
-               return 0;
-
-       return 1;
-}
-
-int __bitmap_empty(const unsigned long *bitmap, int bits)
-{
-       int k, lim = bits/BITS_PER_LONG;
-       for (k = 0; k < lim; ++k)
-               if (bitmap[k])
-                       return 0;
-
-       if (bits % BITS_PER_LONG)
-               if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
-                       return 0;
-
-       return 1;
-}
-
-int request_irq(
-       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */
-       IN              KSPIN_LOCK      *isr_lock,              /* spin lock for ISR */                 
-       IN              PKSERVICE_ROUTINE isr,          /* ISR */
-       IN              void *isr_ctx,                                          /* ISR context */
-       OUT     PKINTERRUPT *int_obj                    /* interrupt object */
-       )
-{
-       NTSTATUS                status;
-
-       status = IoConnectInterrupt(
-               int_obj,                                                                                                                /* InterruptObject */
-               isr,                                                                                                                            /* ISR */ 
-               isr_ctx,                                                                                                                /* ISR context */
-               isr_lock,                                                                                                       /* spinlock */
-               int_info->u.Interrupt.Vector,                                   /* interrupt vector */
-               (KIRQL)int_info->u.Interrupt.Level,             /* IRQL */
-               (KIRQL)int_info->u.Interrupt.Level,             /* Synchronize IRQL */
-               (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? 
-               Latched : LevelSensitive),                                                      /* interrupt type: LATCHED or LEVEL */
-               (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared),         /* vector shared or not */
-               (KAFFINITY)int_info->u.Interrupt.Affinity,      /* interrupt affinity */ 
-               FALSE                                                                                                                   /* whether to save Float registers */
-               );
-
-       if (!NT_SUCCESS(status))
-               return -EFAULT;         /* failed to connect interrupt */
-       else
-               return 0;
-}
-
+#include <mt_l2w.h>\r
+#include <hca_data.h>\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "mt_l2w.tmh"\r
+#endif\r
+\r
+pci_pool_t *\r
+pci_pool_create (const char *name, struct mthca_dev *mdev,\r
+        size_t size, size_t align, size_t allocation)\r
+{\r
+       pci_pool_t *pool;\r
+       UNREFERENCED_PARAMETER(align);\r
+       UNREFERENCED_PARAMETER(allocation);\r
+\r
+       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);\r
+       \r
+       // allocation parameter is not handled yet\r
+       ASSERT(allocation == 0);\r
+\r
+       // allocate object\r
+       pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );\r
+       if (pool == NULL) \r
+               return NULL;\r
+\r
+       //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,\r
+       // while default alloc function  - ExAllocatePoolWithTag -doesn't.\r
+       // But for now it is used for elements of size <= PAGE_SIZE\r
+       // Anyway - a sanity check:\r
+       ASSERT(size <= PAGE_SIZE);\r
+       if (size > PAGE_SIZE)\r
+               return NULL;\r
+\r
+       //TODO: not too effective: one can read its own alloc/free functions\r
+       ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );\r
+       \r
+       // fill the object\r
+       pool->mdev = mdev;\r
+       pool->size = size;\r
+       strncpy( pool->name, name, sizeof pool->name );\r
+\r
+       return pool;            \r
+}\r
+\r
+// from lib/string.c\r
+/**\r
+* strlcpy - Copy a %NUL terminated string into a sized buffer\r
+* @dest: Where to copy the string to\r
+* @src: Where to copy the string from\r
+* @size: size of destination buffer\r
+*\r
+* Compatible with *BSD: the result is always a valid\r
+* NUL-terminated string that fits in the buffer (unless,\r
+* of course, the buffer size is zero). It does not pad\r
+* out the result like strncpy() does.\r
+*/\r
+SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)\r
+{\r
+        SIZE_T ret = strlen(src);\r
+\r
+        if (size) {\r
+                SIZE_T len = (ret >= size) ? size-1 : ret;\r
+                memcpy(dest, src, len);\r
+                dest[len] = '\0';\r
+        }\r
+        return ret;\r
+}\r
+\r
+\r
+int __bitmap_full(const unsigned long *bitmap, int bits)\r
+{\r
+       int k, lim = bits/BITS_PER_LONG;\r
+       for (k = 0; k < lim; ++k)\r
+               if (~bitmap[k])\r
+               return 0;\r
+\r
+       if (bits % BITS_PER_LONG)\r
+               if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
+               return 0;\r
+\r
+       return 1;\r
+}\r
+\r
+int __bitmap_empty(const unsigned long *bitmap, int bits)\r
+{\r
+       int k, lim = bits/BITS_PER_LONG;\r
+       for (k = 0; k < lim; ++k)\r
+               if (bitmap[k])\r
+                       return 0;\r
+\r
+       if (bits % BITS_PER_LONG)\r
+               if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
+                       return 0;\r
+\r
+       return 1;\r
+}\r
+\r
+int request_irq(\r
+       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */\r
+       IN              KSPIN_LOCK      *isr_lock,              /* spin lock for ISR */                 \r
+       IN              PKSERVICE_ROUTINE isr,          /* ISR */\r
+       IN              void *isr_ctx,                                          /* ISR context */\r
+       OUT     PKINTERRUPT *int_obj                    /* interrupt object */\r
+       )\r
+{\r
+       NTSTATUS                status;\r
+\r
+       status = IoConnectInterrupt(\r
+               int_obj,                                                                                                                /* InterruptObject */\r
+               isr,                                                                                                                            /* ISR */ \r
+               isr_ctx,                                                                                                                /* ISR context */\r
+               isr_lock,                                                                                                       /* spinlock */\r
+               int_info->u.Interrupt.Vector,                                   /* interrupt vector */\r
+               (KIRQL)int_info->u.Interrupt.Level,             /* IRQL */\r
+               (KIRQL)int_info->u.Interrupt.Level,             /* Synchronize IRQL */\r
+               (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? \r
+               Latched : LevelSensitive),                                                      /* interrupt type: LATCHED or LEVEL */\r
+               (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared),         /* vector shared or not */\r
+               g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity,        /* interrupt affinity */\r
+               FALSE                                                                                                                   /* whether to save Float registers */\r
+               );\r
+\r
+       if (!NT_SUCCESS(status)) {\r
+        HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt  failed status %d (did you change the processor_affinity ? )\n",status));\r
+               return -EFAULT;         /* failed to connect interrupt */\r
+    } \r
+       else\r
+               return 0;\r
+}\r
+\r
index 96198535be93a1b3137caf8d38f659503401b0a0..2d7c3c51cd28abf7d5d6a7dfe6de3aef05982324 100644 (file)
@@ -166,6 +166,8 @@ HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff
 HKR,"Parameters","SkipTavorReset",%REG_DWORD%,0\r
 HKR,"Parameters","DisableTavorResetOnFailure",%REG_DWORD%,1\r
 HKR,"Parameters","TunePci",%REG_DWORD%,0\r
+HKR,"Parameters","ProcessorAffinity",%REG_DWORD%,0\r
+HKR,"Parameters","MaxDpcTimeUs",%REG_DWORD%,10000\r
 HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Flags",%REG_DWORD%,0xffff\r
 HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Level",%REG_DWORD%,0x3\r
 \r
index db51afcb225eef33073e37c27c970d79e982611c..45c312eabb2578a0e32ff518cff4aa8abc93d2d7 100644 (file)
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#include "mthca_dev.h"
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "mthca_eq.tmh"
-#endif
-#include "mthca_cmd.h"
-#include "mthca_config_reg.h"
-
-static int mthca_map_reg(struct mthca_dev *dev,
-                                  u64 offset, unsigned long size,
-                                  void __iomem **map, SIZE_T *map_size);
-static int mthca_map_eq_regs(struct mthca_dev *dev);
-static void mthca_unmap_eq_regs(struct mthca_dev *dev);
-static int mthca_create_eq(struct mthca_dev *dev,
-                                    int nent,
-                                    u8 intr,
-                                    struct mthca_eq *eq);
-
-
-
-#ifdef ALLOC_PRAGMA
-#pragma alloc_text (PAGE, mthca_map_reg)
-#pragma alloc_text (PAGE, mthca_map_eq_regs)
-#pragma alloc_text (PAGE, mthca_init_eq_table)
-#pragma alloc_text (PAGE, mthca_unmap_eq_regs)
-#pragma alloc_text (PAGE, mthca_map_eq_icm)
-#pragma alloc_text (PAGE, mthca_unmap_eq_icm)
-#pragma alloc_text (PAGE, mthca_create_eq)
-#pragma alloc_text (PAGE, mthca_cleanup_eq_table)
-#endif
-
-enum {
-       MTHCA_NUM_ASYNC_EQE = 0x80,
-       MTHCA_NUM_CMD_EQE   = 0x80,
-       MTHCA_NUM_SPARE_EQE = 0x80,
-       MTHCA_EQ_ENTRY_SIZE = 0x20
-};
-
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-#pragma pack(push,1)
-struct mthca_eq_context {
-       __be32 flags;
-       __be64 start;
-       __be32 logsize_usrpage;
-       __be32 tavor_pd;        /* reserved for Arbel */
-       u8     reserved1[3];
-       u8     intr;
-       __be32 arbel_pd;        /* lost_count for Tavor */
-       __be32 lkey;
-       u32    reserved2[2];
-       __be32 consumer_index;
-       __be32 producer_index;
-       u32    reserved3[4];
-};
-#pragma pack(pop)
-
-#define MTHCA_EQ_STATUS_OK          ( 0 << 28)
-#define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)
-#define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)
-#define MTHCA_EQ_OWNER_SW           ( 0 << 24)
-#define MTHCA_EQ_OWNER_HW           ( 1 << 24)
-#define MTHCA_EQ_FLAG_TR            ( 1 << 18)
-#define MTHCA_EQ_FLAG_OI            ( 1 << 17)
-#define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)
-#define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)
-#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)
-#define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)
-
-enum {
-       MTHCA_EVENT_TYPE_COMP                                                   = 0x00,
-       MTHCA_EVENT_TYPE_PATH_MIG                                       = 0x01,
-       MTHCA_EVENT_TYPE_COMM_EST                                       = 0x02,
-       MTHCA_EVENT_TYPE_SQ_DRAINED                             = 0x03,
-       MTHCA_EVENT_TYPE_CQ_ERROR                                               = 0x04,
-       MTHCA_EVENT_TYPE_WQ_CATAS_ERROR                         = 0x05,
-       MTHCA_EVENT_TYPE_EEC_CATAS_ERROR                        = 0x06,
-       MTHCA_EVENT_TYPE_PATH_MIG_FAILED                        = 0x07,
-       MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR      = 0x08,
-       MTHCA_EVENT_TYPE_PORT_CHANGE                                    = 0x09,
-       MTHCA_EVENT_TYPE_CMD                                                                    = 0x0a,
-       MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
-       MTHCA_EVENT_TYPE_ECC_DETECT                                     = 0x0e,
-       MTHCA_EVENT_TYPE_EQ_OVERFLOW                                    = 0x0f,
-       MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR                = 0x11,
-       MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR                        = 0x12,
-       MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE        = 0x13, 
-       MTHCA_EVENT_TYPE_SRQ_LIMIT                                      = 0x14  
-};
-
-#define MTHCA_ASYNC_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG)           | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_COMM_EST)           | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
-                               (1Ui64 << MTHCA_EVENT_TYPE_ECC_DETECT))
-#define MTHCA_SRQ_EVENT_MASK   ((1Ui64 << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
-                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)             | \
-                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_LIMIT))
-
-#define MTHCA_CMD_EVENT_MASK    (1Ui64 << MTHCA_EVENT_TYPE_CMD)
-
-#define MTHCA_EQ_DB_INC_CI     (1 << 24)
-#define MTHCA_EQ_DB_REQ_NOT    (2 << 24)
-#define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)
-#define MTHCA_EQ_DB_SET_CI     (4 << 24)
-#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
-
-#pragma pack(push,1)
-struct mthca_eqe {
-       u8 reserved1;
-       u8 type;
-       u8 reserved2;
-       u8 subtype;
-       union {
-               u32 raw[6];
-               struct {
-                       __be32 cqn;
-               } comp;
-               struct {
-                       u16    reserved1;
-                       __be16 token;
-                       u32    reserved2;
-                       u8     reserved3[3];
-                       u8     status;
-                       __be64 out_param;
-               } cmd;
-               struct {
-                       __be32 qpn;
-               } qp;
-               struct {                        
-                       __be32 srqn;            
-               }       srq;
-               struct {
-                       __be32 cqn;
-                       u32    reserved1;
-                       u8     reserved2[3];
-                       u8     syndrome;
-               } cq_err;
-               struct {
-                       u32    reserved1[2];
-                       __be32 port;
-               } port_change;
-       } event;
-       u8 reserved3[3];
-       u8 owner;
-} ;
-#pragma pack(pop)
-
-#define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)
-#define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)
-
-static inline u64 async_mask(struct mthca_dev *dev)
-{
-       return dev->mthca_flags & MTHCA_FLAG_SRQ ?
-               MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
-               MTHCA_ASYNC_EVENT_MASK;
-}
-
-static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
-{
-       __be32 doorbell[2];
-
-       doorbell[0] = cl_hton32(MTHCA_EQ_DB_SET_CI | eq->eqn);
-       doorbell[1] = cl_hton32(ci & (eq->nent - 1));
-
-       /*
-        * This barrier makes sure that all updates to ownership bits
-        * done by set_eqe_hw() hit memory before the consumer index
-        * is updated.  set_eq_ci() allows the HCA to possibly write
-        * more EQ entries, and we want to avoid the exceedingly
-        * unlikely possibility of the HCA writing an entry and then
-        * having set_eqe_hw() overwrite the owner field.
-        */
-       wmb();
-       mthca_write64(doorbell,
-                     dev->kar + MTHCA_EQ_DOORBELL,
-                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-}
-
-static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
-{
-       /* See comment in tavor_set_eq_ci() above. */
-       wmb();
-       __raw_writel((u32) cl_hton32(ci),
-               (u8*)dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
-       /* We still want ordering, just not swabbing, so add a barrier */
-       mb();
-}
-
-static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
-{
-       if (mthca_is_memfree(dev))
-               arbel_set_eq_ci(dev, eq, ci);
-       else
-               tavor_set_eq_ci(dev, eq, ci);
-}
-
-static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
-{
-       __be32 doorbell[2];
-
-       doorbell[0] = cl_hton32(MTHCA_EQ_DB_REQ_NOT | eqn);
-       doorbell[1] = 0;
-
-       mthca_write64(doorbell,
-                     dev->kar + MTHCA_EQ_DOORBELL,
-                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-}
-
-static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
-{
-       writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
-}
-
-static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
-{
-       if (!mthca_is_memfree(dev)) {
-               __be32 doorbell[2];
-
-               doorbell[0] = cl_hton32(MTHCA_EQ_DB_DISARM_CQ | eqn);
-               doorbell[1] = cl_hton32(cqn);
-
-               mthca_write64(doorbell,
-                             dev->kar + MTHCA_EQ_DOORBELL,
-                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-       }
-}
-
-static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
-{
-       unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
-       return (struct mthca_eqe *)((u8*)eq->page_list[off / PAGE_SIZE].page + off % PAGE_SIZE);
-}
-
-static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
-{
-       struct mthca_eqe* eqe;
-       eqe = get_eqe(eq, eq->cons_index);
-       return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
-}
-
-static inline void set_eqe_hw(struct mthca_eqe *eqe)
-{
-       eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;
-}
-
-static void port_change(struct mthca_dev *dev, int port, int active)
-{
-       struct ib_event record;
-
-       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Port change to %s for port %d\n",
-                 active ? "active" : "down", port));
-
-       record.device = &dev->ib_dev;
-       record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
-       record.element.port_num = (u8)port;
-       // Gen2 ib_core mechanism
-       ib_dispatch_event(&record);
-       // our callback
-       ca_event_handler( &record, &dev->ext->hca.hob );
-}
-
-static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
-{
-       int disarm_cqn;
-       int  eqes_found = 0;
-       int set_ci = 0;
-       struct mthca_eqe *eqe = next_eqe_sw(eq);
-
-       while (eqe) {
-
-               /*
-                * Make sure we read EQ entry contents after we've
-                * checked the ownership bit.
-                */
-               rmb();
-
-               switch (eqe->type) {
-               case MTHCA_EVENT_TYPE_COMP:
-                       disarm_cqn = cl_ntoh32(eqe->event.comp.cqn) & 0xffffff;
-                       disarm_cq(dev, eq->eqn, disarm_cqn);
-                       mthca_cq_completion(dev, disarm_cqn);
-                       break;
-
-               case MTHCA_EVENT_TYPE_PATH_MIG:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_PATH_MIG);
-                       break;
-
-               case MTHCA_EVENT_TYPE_COMM_EST:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_COMM_EST);
-                       break;
-
-               case MTHCA_EVENT_TYPE_SQ_DRAINED:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_SQ_DRAINED);
-                       break;
-
-               case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_QP_LAST_WQE_REACHED);
-                       break;
-
-               case MTHCA_EVENT_TYPE_SRQ_LIMIT:
-                       mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,
-                                               IB_EVENT_SRQ_LIMIT_REACHED);
-                       break;
-
-               case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_QP_FATAL);
-                       break;
-
-               case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_PATH_MIG_ERR);
-                       break;
-
-               case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_QP_REQ_ERR);
-                       break;
-
-               case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
-                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,
-                                      IB_EVENT_QP_ACCESS_ERR);
-                       break;
-
-               case MTHCA_EVENT_TYPE_CMD:
-                       mthca_cmd_event(dev,
-                                       cl_ntoh16(eqe->event.cmd.token),
-                                       eqe->event.cmd.status,
-                                       cl_ntoh64(eqe->event.cmd.out_param));
-                       break;
-
-               case MTHCA_EVENT_TYPE_PORT_CHANGE:
-                       port_change(dev,
-                                   (cl_ntoh32(eqe->event.port_change.port) >> 28) & 3,
-                                   eqe->subtype == 0x4);
-                       break;
-
-               case MTHCA_EVENT_TYPE_CQ_ERROR:
-                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("CQ %s on CQN %06x (syndrome %d)\n",
-                                  eqe->event.cq_err.syndrome == 1 ?
-                                  "overrun" : "access violation",
-                                  cl_ntoh32(eqe->event.cq_err.cqn) & 0xffffff, eqe->event.cq_err.syndrome));
-                       mthca_cq_event(dev, cl_ntoh32(eqe->event.cq_err.cqn),
-                               IB_EVENT_CQ_ERR);
-                       break;
-
-               case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
-                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("EQ overrun on EQN %d\n", eq->eqn));
-                       break;
-
-               case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
-               case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
-               case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
-               case MTHCA_EVENT_TYPE_ECC_DETECT:
-               default:
-                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("Unhandled event %02x(%02x) on EQ %d\n",
-                                  eqe->type, eqe->subtype, eq->eqn));
-                       break;
-               };
-
-               set_eqe_hw(eqe);
-               ++eq->cons_index;
-               eqes_found += 1;
-               ++set_ci;
-
-               /*
-                * The HCA will think the queue has overflowed if we
-                * don't tell it we've been processing events.  We
-                * create our EQs with MTHCA_NUM_SPARE_EQE extra
-                * entries, so we must update our consumer index at
-                * least that often.
-                */
-               if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
-                       /*
-                        * Conditional on hca_type is OK here because
-                        * this is a rare case, not the fast path.
-                        */
-                       set_eq_ci(dev, eq, eq->cons_index);
-                       set_ci = 0;
-               }
-               eqe = next_eqe_sw(eq);
-       }
-
-       /*
-        * Rely on caller to set consumer index so that we don't have
-        * to test hca_type in our interrupt handling fast path.
-        */
-       return eqes_found;
-}
-
-static void mthca_tavor_dpc( PRKDPC dpc, 
-       PVOID ctx, PVOID arg1, PVOID arg2 )
-{
-       struct mthca_eq  *eq  = ctx;
-       struct mthca_dev *dev = eq->dev;
-       SPIN_LOCK_PREP(lh);
-
-       UNREFERENCED_PARAMETER(dpc);
-       UNREFERENCED_PARAMETER(arg1);
-       UNREFERENCED_PARAMETER(arg2);
-
-       spin_lock_dpc(&eq->lock, &lh);
-
-       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */
-       if (mthca_eq_int(dev, eq)) {
-               tavor_set_eq_ci(dev, eq, eq->cons_index);
-               tavor_eq_req_not(dev, eq->eqn);
-       }
-
-       spin_unlock_dpc(&lh);
-}
-
-static BOOLEAN mthca_tavor_interrupt(
-       PKINTERRUPT     int_obj, 
-       PVOID                           ctx
-       )
-{
-       struct mthca_dev *dev = ctx;
-       u32 ecr;
-       int i;
-
-       UNREFERENCED_PARAMETER(int_obj);
-
-       if (dev->eq_table.clr_mask)
-               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
-
-       ecr = readl((u8*)dev->eq_regs.tavor.ecr_base + 4);
-       if (!ecr)
-               return FALSE;
-
-       writel(ecr, (u8*)dev->eq_regs.tavor.ecr_base +
-              MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
-
-       for (i = 0; i < MTHCA_NUM_EQ; ++i) {
-               if (ecr & dev->eq_table.eq[i].eqn_mask &&
-                   next_eqe_sw(&dev->eq_table.eq[i])) {
-                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);
-               }
-       }
-
-       return TRUE;
-}
-
-#ifdef MSI_SUPPORT
-static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,
-                                        struct pt_regs *regs)
-{
-       struct mthca_eq  *eq  = eq_ptr;
-       struct mthca_dev *dev = eq->dev;
-
-       mthca_eq_int(dev, eq);
-       tavor_set_eq_ci(dev, eq, eq->cons_index);
-       tavor_eq_req_not(dev, eq->eqn);
-
-       /* MSI-X vectors always belong to us */
-       return IRQ_HANDLED;
-}
-#endif
-
-static void mthca_arbel_dpc( PRKDPC dpc, 
-       PVOID ctx, PVOID arg1, PVOID arg2 )
-{
-       struct mthca_eq  *eq  = ctx;
-       struct mthca_dev *dev = eq->dev;
-       SPIN_LOCK_PREP(lh);
-
-       UNREFERENCED_PARAMETER(dpc);
-       UNREFERENCED_PARAMETER(arg1);
-       UNREFERENCED_PARAMETER(arg2);
-
-       spin_lock_dpc(&eq->lock, &lh);
-
-       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */
-       if (mthca_eq_int(dev, eq))
-               arbel_set_eq_ci(dev, eq, eq->cons_index);
-       arbel_eq_req_not(dev, eq->eqn_mask);
-
-       spin_unlock_dpc(&lh);
-}
-
-static BOOLEAN mthca_arbel_interrupt(
-       PKINTERRUPT     int_obj, 
-       PVOID                           ctx
-       )
-{
-       struct mthca_dev *dev = ctx;
-       int work = 0;
-       int i;
-
-       UNREFERENCED_PARAMETER(int_obj);
-
-       if (dev->eq_table.clr_mask)
-               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
-
-       for (i = 0; i < MTHCA_NUM_EQ; ++i) {
-               if (next_eqe_sw( &dev->eq_table.eq[i]) ) {
-                       work = 1;
-                       while(InterlockedCompareExchange(&dev->dpc_lock, 1, 0));
-                       
-                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);
-                       InterlockedCompareExchange(&dev->dpc_lock, 0, 1);
-               } else {
-                       arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
-               }
-       }
-
-       return (BOOLEAN)work;
-}
-
-#ifdef MSI_SUPPORT
-static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr,
-                                              struct pt_regs *regs)
-{
-       struct mthca_eq  *eq  = eq_ptr;
-       struct mthca_dev *dev = eq->dev;
-
-       mthca_eq_int(dev, eq);
-       arbel_set_eq_ci(dev, eq, eq->cons_index);
-       arbel_eq_req_not(dev, eq->eqn_mask);
-
-       /* MSI-X vectors always belong to us */
-       return IRQ_HANDLED;
-}
-#endif
-
-static int mthca_create_eq(struct mthca_dev *dev,
-                                    int nent,
-                                    u8 intr,
-                                    struct mthca_eq *eq)
-{
-       int npages;
-       u64 *dma_list = NULL;
-       struct mthca_mailbox *mailbox;
-       struct mthca_eq_context *eq_context;
-       int err = -ENOMEM;
-       int i;
-       u8 status;
-       
-       HCA_ENTER(HCA_DBG_INIT);
-       eq->dev  = dev; 
-       eq->nent = roundup_pow_of_two(max(nent, 2));
-       npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
-
-       eq->page_list = kmalloc(npages * sizeof *eq->page_list,
-                               GFP_KERNEL);
-       if (!eq->page_list)
-               goto err_out;
-
-       for (i = 0; i < npages; ++i)
-               eq->page_list[i].page = NULL;
-
-       dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-       if (!dma_list)
-               goto err_out_free;
-
-       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox))
-               goto err_out_free;
-       eq_context = mailbox->buf;
-
-       for (i = 0; i < npages; ++i) {
-               alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &eq->page_list[i]);
-               if (!eq->page_list[i].page)
-                       goto err_out_free_pages;
-               dma_list[i] = eq->page_list[i].dma_address;
-       }
-
-       for (i = 0; i < eq->nent; ++i)
-               set_eqe_hw(get_eqe(eq, i));
-
-       eq->eqn = mthca_alloc(&dev->eq_table.alloc);
-       if (eq->eqn == -1)
-               goto err_out_free_pages;
-
-       err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
-                                 dma_list, PAGE_SHIFT, npages,
-                                 0, npages * PAGE_SIZE,
-                                 MTHCA_MPT_FLAG_LOCAL_WRITE |
-                                 MTHCA_MPT_FLAG_LOCAL_READ,
-                                 &eq->mr);
-       if (err)
-               goto err_out_free_eq;
-
-       RtlZeroMemory(eq_context, sizeof *eq_context);
-       eq_context->flags           = cl_hton32(MTHCA_EQ_STATUS_OK   |
-                                                 MTHCA_EQ_OWNER_HW    |
-                                                 MTHCA_EQ_STATE_ARMED |
-                                                 MTHCA_EQ_FLAG_TR);
-       if (mthca_is_memfree(dev))
-               eq_context->flags  |= cl_hton32(MTHCA_EQ_STATE_ARBEL);
-
-       eq_context->logsize_usrpage = cl_hton32((ffs(eq->nent) - 1) << 24);
-       if (mthca_is_memfree(dev)) {
-               eq_context->arbel_pd = cl_hton32(dev->driver_pd.pd_num);
-       } else {
-               eq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);
-               eq_context->tavor_pd         = cl_hton32(dev->driver_pd.pd_num);
-       }
-       eq_context->intr            = intr;
-       eq_context->lkey            = cl_hton32(eq->mr.ibmr.lkey);
-
-       err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
-       if (err) {
-               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_EQ failed (%d)\n", err));
-               goto err_out_free_mr;
-       }
-       if (status) {
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_EQ returned status 0x%02x\n",
-                          status));
-               err = -EINVAL;
-               goto err_out_free_mr;
-       }
-
-       kfree(dma_list);
-       mthca_free_mailbox(dev, mailbox);
-
-       eq->eqn_mask   = _byteswap_ulong(1 << eq->eqn);
-       eq->cons_index = 0;
-
-       dev->eq_table.arm_mask |= eq->eqn_mask;
-
-       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_INIT ,("Allocated EQ %d with %d entries\n",
-                 eq->eqn, eq->nent));
-
-       HCA_EXIT(HCA_DBG_INIT);
-       return err;
-
- err_out_free_mr:
-       mthca_free_mr(dev, &eq->mr);
-
- err_out_free_eq:
-       mthca_free(&dev->eq_table.alloc, eq->eqn);
-
- err_out_free_pages:
-       for (i = 0; i < npages; ++i) {
-               if (eq->page_list[i].page) {
-                       free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);
-               }
-       }
-       mthca_free_mailbox(dev, mailbox);
-
- err_out_free:
-       kfree(eq->page_list);
-       kfree(dma_list);
-
- err_out:
-       HCA_EXIT(HCA_DBG_INIT);
-       return err;
-}
-
-static void mthca_free_eq(struct mthca_dev *dev,
-                         struct mthca_eq *eq)
-{
-       struct mthca_mailbox *mailbox;
-       int err;
-       u8 status;
-       int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
-               PAGE_SIZE;
-       int i;
-
-       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox))
-               return;
-
-       err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
-       if (err)
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ failed (%d)\n", err));
-       if (status)
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ returned status 0x%02x\n", status));
-
-       dev->eq_table.arm_mask &= ~eq->eqn_mask;
-
-       { // debug print
-               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Dumping EQ context %02x:\n", eq->eqn));
-               for (i = 0; i < sizeof (struct mthca_eq_context) / 4; i=i+4) {
-                       HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] %08x %08x %08x %08x\n", i,
-                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4)),
-                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4)),
-                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+2)*4)),
-                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4))));
-                                       
-               }
-       }
-
-       mthca_free_mr(dev, &eq->mr);
-       for (i = 0; i < npages; ++i) {
-               free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);
-       }
-
-       kfree(eq->page_list);
-       mthca_free_mailbox(dev, mailbox);
-}
-
-static void mthca_free_irqs(struct mthca_dev *dev)
-{
-       if (dev->eq_table.have_irq)
-               free_irq(dev->ext->int_obj);
-#ifdef MSI_SUPPORT     
-       for (i = 0; i < MTHCA_NUM_EQ; ++i)
-               if (dev->eq_table.eq[i].have_irq)
-                       free_irq(dev->eq_table.eq[i].msi_x_vector,
-                                dev->eq_table.eq + i);
-#endif         
-}
-
-static int mthca_map_reg(struct mthca_dev *dev,
-                                  u64 offset, unsigned long size,
-                                  void __iomem **map, SIZE_T *map_size)
-{
-       u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);
-       *map = ioremap(base + offset, size, map_size);
-       if (!*map) 
-               return -ENOMEM;
-       return 0;
-}
-
-static void mthca_unmap_reg(struct mthca_dev *dev, u64 offset,
-                           unsigned long size, void __iomem *map, SIZE_T map_size)
-{
-       UNREFERENCED_PARAMETER(dev);
-       UNREFERENCED_PARAMETER(size);
-       UNREFERENCED_PARAMETER(offset);
-       iounmap(map, map_size);
-}
-
-static int mthca_map_eq_regs(struct mthca_dev *dev)
-{
-       u64 mthca_base;
-
-       mthca_base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);
-
-       if (mthca_is_memfree(dev)) {
-               /*
-                * We assume that the EQ arm and EQ set CI registers
-                * fall within the first BAR.  We can't trust the
-                * values firmware gives us, since those addresses are
-                * valid on the HCA's side of the PCI bus but not
-                * necessarily the host side.
-                */
-               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                                 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
-                                 &dev->clr_base, &dev->clr_base_size)) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "
-                                 "aborting.\n"));
-                       return -ENOMEM;
-               }
-
-               /*
-                * Add 4 because we limit ourselves to EQs 0 ... 31,
-                * so we only need the low word of the register.
-                */
-               if (mthca_map_reg(dev, ((pci_resource_len(dev, 0) - 1) &
-                                       dev->fw.arbel.eq_arm_base) + 4, 4,
-                                 &dev->eq_regs.arbel.eq_arm, &dev->eq_regs.arbel.eq_arm_size)) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ arm register, aborting.\n"));
-                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
-                                       dev->clr_base, dev->clr_base_size);
-                       return -ENOMEM;
-               }
-
-               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                                 dev->fw.arbel.eq_set_ci_base,
-                                 MTHCA_EQ_SET_CI_SIZE,
-                                 &dev->eq_regs.arbel.eq_set_ci_base,
-                                 &dev->eq_regs.arbel.eq_set_ci_base_size
-                                 )) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ CI register, aborting.\n"));
-                       mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &
-                                             dev->fw.arbel.eq_arm_base) + 4, 4,
-                                       dev->eq_regs.arbel.eq_arm, dev->eq_regs.arbel.eq_arm_size);
-                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
-                                       dev->clr_base, dev->clr_base_size);
-                       return -ENOMEM;
-               }
-       } else {
-               if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
-                                 &dev->clr_base, &dev->clr_base_size)) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "
-                                 "aborting.\n"));
-                       return -ENOMEM;
-               }
-
-               if (mthca_map_reg(dev, MTHCA_ECR_BASE,
-                                 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
-                                 &dev->eq_regs.tavor.ecr_base,  &dev->eq_regs.tavor.ecr_base_size)) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map ecr register, "
-                                 "aborting.\n"));
-                       mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
-                                       dev->clr_base, dev->clr_base_size);
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-
-}
-
-static void mthca_unmap_eq_regs(struct mthca_dev *dev)
-{
-       if (mthca_is_memfree(dev)) {
-               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                               dev->fw.arbel.eq_set_ci_base,
-                               MTHCA_EQ_SET_CI_SIZE,
-                               dev->eq_regs.arbel.eq_set_ci_base, 
-                               dev->eq_regs.arbel.eq_set_ci_base_size);
-               mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &
-                               dev->fw.arbel.eq_arm_base) + 4, 4,
-                       dev->eq_regs.arbel.eq_arm,
-                       dev->eq_regs.arbel.eq_arm_size);
-               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &
-                               dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
-                               dev->clr_base, dev->clr_base_size);
-       } else {
-               mthca_unmap_reg(dev, MTHCA_ECR_BASE,
-                               MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
-                               dev->eq_regs.tavor.ecr_base, 
-                               dev->eq_regs.tavor.ecr_base_size);
-               mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
-                               dev->clr_base, dev->clr_base_size);
-       }
-}
-
-int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
-{
-       int ret;
-       u8 status;
-
-       /*
-        * We assume that mapping one page is enough for the whole EQ
-        * context table.  This is fine with all current HCAs, because
-        * we only use 32 EQs and each EQ uses 32 bytes of context
-        * memory, or 1 KB total.
-        */
-       dev->eq_table.icm_virt = icm_virt;
-       alloc_dma_zmem_map(dev,PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &dev->eq_table.sg);
-       if (!dev->eq_table.sg.page)
-               return -ENOMEM;
-
-       ret = mthca_MAP_ICM_page(dev, dev->eq_table.sg.dma_address, icm_virt, &status);
-       if (!ret && status)
-               ret = -EINVAL;
-       if (ret) 
-               free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );
-
-       return ret;
-}
-
-void mthca_unmap_eq_icm(struct mthca_dev *dev)
-{
-       u8 status;
-
-       mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
-       free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );
-}
-
-int mthca_init_eq_table(struct mthca_dev *dev)
-{
-       int err;
-       u8 status;
-       u8 intr;
-       int i;
-       
-       HCA_ENTER(HCA_DBG_INIT);
-       err = mthca_alloc_init(&dev->eq_table.alloc,
-                              dev->limits.num_eqs,
-                              dev->limits.num_eqs - 1,
-                              dev->limits.reserved_eqs);
-       if (err)
-               return err;
-
-       err = mthca_map_eq_regs(dev);
-       if (err)
-               goto err_out_free;
-
-#ifdef MSI_SUPPORT
-       if (dev->mthca_flags & MTHCA_FLAG_MSI ||
-           dev->mthca_flags & MTHCA_FLAG_MSI_X) {
-               dev->eq_table.clr_mask = 0;
-       } else
-#endif 
-       {
-               dev->eq_table.clr_mask =
-                       _byteswap_ulong(1 << (dev->eq_table.inta_pin & 31));
-               dev->eq_table.clr_int  = dev->clr_base +
-                       (dev->eq_table.inta_pin < 32 ? 4 : 0);
-       }
-
-       dev->eq_table.arm_mask = 0;
-
-       intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
-               128 : dev->eq_table.inta_pin;
-
-       err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
-                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
-                             &dev->eq_table.eq[MTHCA_EQ_COMP]);
-       if (err)
-               goto err_out_unmap;
-
-       err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
-                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
-                             &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
-       if (err)
-               goto err_out_comp;
-
-       err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
-                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
-                             &dev->eq_table.eq[MTHCA_EQ_CMD]);
-       if (err)
-               goto err_out_async;
-
-#ifdef MSI_SUPPORT
-       if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
-               static const char *eq_name[] = {
-                       [MTHCA_EQ_COMP]  = DRV_NAME " (comp)",
-                       [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
-                       [MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"
-               };
-
-               for (i = 0; i < MTHCA_NUM_EQ; ++i) {
-                       err = request_irq(dev->eq_table.eq[i].msi_x_vector,
-                                         mthca_is_memfree(dev) ?
-                                         mthca_arbel_msi_x_interrupt :
-                                         mthca_tavor_msi_x_interrupt,
-                                         0, eq_name[i], dev->eq_table.eq + i);
-                       if (err)
-                               goto err_out_cmd;
-                       dev->eq_table.eq[i].have_irq = 1;
-                       /* init DPC stuff something like that */
-                       spin_lock_init( &dev->eq_table.eq[i].lock );    
-                       dev->dpc_lock = 0;
-                       KeInitializeDpc(
-                               &dev->eq_table.eq[i].dpc,
-                               mthca_is_memfree(dev) ?
-                                       mthca_arbel_msi_x_dpc :
-                                       mthca_tavor_msi_x_dpc,
-                               dev->eq_table.eq + i);
-               }
-       } else 
-#endif 
-       {
-               spin_lock_init( &dev->ext->isr_lock );  
-               err = request_irq(
-                       &dev->ext->interruptInfo,
-                       &dev->ext->isr_lock.lock        ,
-                       mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt,
-                       dev,
-                       &dev->ext->int_obj
-                 );
-               if (err)
-                       goto err_out_cmd;
-               dev->eq_table.have_irq = 1;
-
-               /* init DPC stuff */
-               for (i = 0; i < MTHCA_NUM_EQ; ++i) {
-                       spin_lock_init( &dev->eq_table.eq[i].lock );    
-                       KeInitializeDpc(
-                               &dev->eq_table.eq[i].dpc,
-                               mthca_is_memfree(dev) ?
-                                       mthca_arbel_dpc :
-                                       mthca_tavor_dpc,
-                               dev->eq_table.eq + i);
-               }
-       }
-
-       err = mthca_MAP_EQ(dev, async_mask(dev),
-                          0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
-       if (err)
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for async EQ %d failed (%d)\n",
-                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err));
-       if (status)
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for async EQ %d returned status 0x%02x\n",
-                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status));
-       err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
-                          0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
-       if (err)
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for cmd EQ %d failed (%d)\n",
-                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err));
-       if (status)
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for cmd EQ %d returned status 0x%02x\n",
-                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status));
-
-       for (i = 0; i < MTHCA_NUM_EQ; ++i)
-               if (mthca_is_memfree(dev))
-                       arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
-               else
-                       tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
-
-       return 0;
-
-err_out_cmd:
-       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
-
-err_out_async:
-       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
-
-err_out_comp:
-       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
-
-err_out_unmap:
-       mthca_unmap_eq_regs(dev);
-
-err_out_free:
-       mthca_alloc_cleanup(&dev->eq_table.alloc);
-       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("mthca_init_eq  failed %d",err));
-       return err;
-}
-
-void mthca_cleanup_eq_table(struct mthca_dev *dev)
-{
-       u8 status;
-       int i;
-
-       mthca_free_irqs(dev);
-
-       mthca_MAP_EQ(dev, async_mask(dev),
-                    1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
-       mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
-                    1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
-
-       for (i = 0; i < MTHCA_NUM_EQ; ++i)
-               mthca_free_eq(dev, &dev->eq_table.eq[i]);
-
-       mthca_unmap_eq_regs(dev);
-
-       mthca_alloc_cleanup(&dev->eq_table.alloc);
-}
-
-
+/*\r
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#include "mthca_dev.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "mthca_eq.tmh"\r
+#endif\r
+#include "mthca_cmd.h"\r
+#include "mthca_config_reg.h"\r
+\r
+static int mthca_map_reg(struct mthca_dev *dev,\r
+                                  u64 offset, unsigned long size,\r
+                                  void __iomem **map, SIZE_T *map_size);\r
+static int mthca_map_eq_regs(struct mthca_dev *dev);\r
+static void mthca_unmap_eq_regs(struct mthca_dev *dev);\r
+static int mthca_create_eq(struct mthca_dev *dev,\r
+                                    int nent,\r
+                                    u8 intr,\r
+                                    struct mthca_eq *eq);\r
+\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, mthca_map_reg)\r
+#pragma alloc_text (PAGE, mthca_map_eq_regs)\r
+#pragma alloc_text (PAGE, mthca_init_eq_table)\r
+#pragma alloc_text (PAGE, mthca_unmap_eq_regs)\r
+#pragma alloc_text (PAGE, mthca_map_eq_icm)\r
+#pragma alloc_text (PAGE, mthca_unmap_eq_icm)\r
+#pragma alloc_text (PAGE, mthca_create_eq)\r
+#pragma alloc_text (PAGE, mthca_cleanup_eq_table)\r
+#endif\r
+\r
+enum {\r
+       MTHCA_NUM_ASYNC_EQE = 0x80,\r
+       MTHCA_NUM_CMD_EQE   = 0x80,\r
+       MTHCA_NUM_SPARE_EQE = 0x80,\r
+       MTHCA_EQ_ENTRY_SIZE = 0x20\r
+};\r
+\r
+/*\r
+ * Must be packed because start is 64 bits but only aligned to 32 bits.\r
+ */\r
+#pragma pack(push,1)\r
+struct mthca_eq_context {\r
+       __be32 flags;\r
+       __be64 start;\r
+       __be32 logsize_usrpage;\r
+       __be32 tavor_pd;        /* reserved for Arbel */\r
+       u8     reserved1[3];\r
+       u8     intr;\r
+       __be32 arbel_pd;        /* lost_count for Tavor */\r
+       __be32 lkey;\r
+       u32    reserved2[2];\r
+       __be32 consumer_index;\r
+       __be32 producer_index;\r
+       u32    reserved3[4];\r
+};\r
+#pragma pack(pop)\r
+\r
+#define MTHCA_EQ_STATUS_OK          ( 0 << 28)\r
+#define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)\r
+#define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)\r
+#define MTHCA_EQ_OWNER_SW           ( 0 << 24)\r
+#define MTHCA_EQ_OWNER_HW           ( 1 << 24)\r
+#define MTHCA_EQ_FLAG_TR            ( 1 << 18)\r
+#define MTHCA_EQ_FLAG_OI            ( 1 << 17)\r
+#define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)\r
+#define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)\r
+#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)\r
+#define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)\r
+\r
+enum {\r
+       MTHCA_EVENT_TYPE_COMP                                                   = 0x00,\r
+       MTHCA_EVENT_TYPE_PATH_MIG                                       = 0x01,\r
+       MTHCA_EVENT_TYPE_COMM_EST                                       = 0x02,\r
+       MTHCA_EVENT_TYPE_SQ_DRAINED                             = 0x03,\r
+       MTHCA_EVENT_TYPE_CQ_ERROR                                               = 0x04,\r
+       MTHCA_EVENT_TYPE_WQ_CATAS_ERROR                         = 0x05,\r
+       MTHCA_EVENT_TYPE_EEC_CATAS_ERROR                        = 0x06,\r
+       MTHCA_EVENT_TYPE_PATH_MIG_FAILED                        = 0x07,\r
+       MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR      = 0x08,\r
+       MTHCA_EVENT_TYPE_PORT_CHANGE                                    = 0x09,\r
+       MTHCA_EVENT_TYPE_CMD                                                                    = 0x0a,\r
+       MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,\r
+       MTHCA_EVENT_TYPE_ECC_DETECT                                     = 0x0e,\r
+       MTHCA_EVENT_TYPE_EQ_OVERFLOW                                    = 0x0f,\r
+       MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR                = 0x11,\r
+       MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR                        = 0x12,\r
+       MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE        = 0x13, \r
+       MTHCA_EVENT_TYPE_SRQ_LIMIT                                      = 0x14  \r
+};\r
+\r
+#define MTHCA_ASYNC_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG)           | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_COMM_EST)           | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_CQ_ERROR)           | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \\r
+                               (1Ui64 << MTHCA_EVENT_TYPE_ECC_DETECT))\r
+#define MTHCA_SRQ_EVENT_MASK   ((1Ui64 << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \\r
+                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)             | \\r
+                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_LIMIT))\r
+\r
+#define MTHCA_CMD_EVENT_MASK    (1Ui64 << MTHCA_EVENT_TYPE_CMD)\r
+\r
+#define MTHCA_EQ_DB_INC_CI     (1 << 24)\r
+#define MTHCA_EQ_DB_REQ_NOT    (2 << 24)\r
+#define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)\r
+#define MTHCA_EQ_DB_SET_CI     (4 << 24)\r
+#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)\r
+\r
+#pragma pack(push,1)\r
+struct mthca_eqe {\r
+       u8 reserved1;\r
+       u8 type;\r
+       u8 reserved2;\r
+       u8 subtype;\r
+       union {\r
+               u32 raw[6];\r
+               struct {\r
+                       __be32 cqn;\r
+               } comp;\r
+               struct {\r
+                       u16    reserved1;\r
+                       __be16 token;\r
+                       u32    reserved2;\r
+                       u8     reserved3[3];\r
+                       u8     status;\r
+                       __be64 out_param;\r
+               } cmd;\r
+               struct {\r
+                       __be32 qpn;\r
+               } qp;\r
+               struct {                        \r
+                       __be32 srqn;            \r
+               }       srq;\r
+               struct {\r
+                       __be32 cqn;\r
+                       u32    reserved1;\r
+                       u8     reserved2[3];\r
+                       u8     syndrome;\r
+               } cq_err;\r
+               struct {\r
+                       u32    reserved1[2];\r
+                       __be32 port;\r
+               } port_change;\r
+       } event;\r
+       u8 reserved3[3];\r
+       u8 owner;\r
+} ;\r
+#pragma pack(pop)\r
+\r
+#define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)\r
+#define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)\r
+\r
+static inline u64 async_mask(struct mthca_dev *dev)\r
+{\r
+       return dev->mthca_flags & MTHCA_FLAG_SRQ ?\r
+               MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :\r
+               MTHCA_ASYNC_EVENT_MASK;\r
+}\r
+\r
+static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)\r
+{\r
+       __be32 doorbell[2];\r
+\r
+       doorbell[0] = cl_hton32(MTHCA_EQ_DB_SET_CI | eq->eqn);\r
+       doorbell[1] = cl_hton32(ci & (eq->nent - 1));\r
+\r
+       /*\r
+        * This barrier makes sure that all updates to ownership bits\r
+        * done by set_eqe_hw() hit memory before the consumer index\r
+        * is updated.  set_eq_ci() allows the HCA to possibly write\r
+        * more EQ entries, and we want to avoid the exceedingly\r
+        * unlikely possibility of the HCA writing an entry and then\r
+        * having set_eqe_hw() overwrite the owner field.\r
+        */\r
+       wmb();\r
+       mthca_write64(doorbell,\r
+                     dev->kar + MTHCA_EQ_DOORBELL,\r
+                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));\r
+}\r
+\r
+static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)\r
+{\r
+       /* See comment in tavor_set_eq_ci() above. */\r
+       wmb();\r
+       __raw_writel((u32) cl_hton32(ci),\r
+               (u8*)dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);\r
+       /* We still want ordering, just not swabbing, so add a barrier */\r
+       mb();\r
+}\r
+\r
+static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)\r
+{\r
+       if (mthca_is_memfree(dev))\r
+               arbel_set_eq_ci(dev, eq, ci);\r
+       else\r
+               tavor_set_eq_ci(dev, eq, ci);\r
+}\r
+\r
+static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)\r
+{\r
+       __be32 doorbell[2];\r
+\r
+       doorbell[0] = cl_hton32(MTHCA_EQ_DB_REQ_NOT | eqn);\r
+       doorbell[1] = 0;\r
+\r
+       mthca_write64(doorbell,\r
+                     dev->kar + MTHCA_EQ_DOORBELL,\r
+                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));\r
+}\r
+\r
+static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)\r
+{\r
+       writel(eqn_mask, dev->eq_regs.arbel.eq_arm);\r
+}\r
+\r
+static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)\r
+{\r
+       if (!mthca_is_memfree(dev)) {\r
+               __be32 doorbell[2];\r
+\r
+               doorbell[0] = cl_hton32(MTHCA_EQ_DB_DISARM_CQ | eqn);\r
+               doorbell[1] = cl_hton32(cqn);\r
+\r
+               mthca_write64(doorbell,\r
+                             dev->kar + MTHCA_EQ_DOORBELL,\r
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));\r
+       }\r
+}\r
+\r
+static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)\r
+{\r
+       unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;\r
+       return (struct mthca_eqe *)((u8*)eq->page_list[off / PAGE_SIZE].page + off % PAGE_SIZE);\r
+}\r
+\r
+static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)\r
+{\r
+       struct mthca_eqe* eqe;\r
+       eqe = get_eqe(eq, eq->cons_index);\r
+       return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;\r
+}\r
+\r
+static inline void set_eqe_hw(struct mthca_eqe *eqe)\r
+{\r
+       eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;\r
+}\r
+\r
+static void port_change(struct mthca_dev *dev, int port, int active)\r
+{\r
+       struct ib_event record;\r
+\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Port change to %s for port %d\n",\r
+                 active ? "active" : "down", port));\r
+\r
+       record.device = &dev->ib_dev;\r
+       record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;\r
+       record.element.port_num = (u8)port;\r
+       // Gen2 ib_core mechanism\r
+       ib_dispatch_event(&record);\r
+       // our callback\r
+       ca_event_handler( &record, &dev->ext->hca.hob );\r
+}\r
+\r
+static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)\r
+{\r
+       int disarm_cqn;\r
+       int  eqes_found = 0;\r
+       int set_ci = 0;\r
+       struct mthca_eqe *eqe = next_eqe_sw(eq);\r
+    uint64_t start = cl_get_time_stamp();\r
+    int loops = 0;\r
+\r
+       while (eqe) {\r
+\r
+               /*\r
+                * Make sure we read EQ entry contents after we've\r
+                * checked the ownership bit.\r
+                */\r
+               rmb();\r
+\r
+               switch (eqe->type) {\r
+               case MTHCA_EVENT_TYPE_COMP:\r
+                       disarm_cqn = cl_ntoh32(eqe->event.comp.cqn) & 0xffffff;\r
+                       disarm_cq(dev, eq->eqn, disarm_cqn);\r
+                       mthca_cq_completion(dev, disarm_cqn);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_PATH_MIG:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_PATH_MIG);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_COMM_EST:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_COMM_EST);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_SQ_DRAINED:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_SQ_DRAINED);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_QP_LAST_WQE_REACHED);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_SRQ_LIMIT:\r
+                       mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,\r
+                                               IB_EVENT_SRQ_LIMIT_REACHED);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_QP_FATAL);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_PATH_MIG_ERR);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_QP_REQ_ERR);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:\r
+                       mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
+                                      IB_EVENT_QP_ACCESS_ERR);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_CMD:\r
+                       mthca_cmd_event(dev,\r
+                                       cl_ntoh16(eqe->event.cmd.token),\r
+                                       eqe->event.cmd.status,\r
+                                       cl_ntoh64(eqe->event.cmd.out_param));\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_PORT_CHANGE:\r
+                       port_change(dev,\r
+                                   (cl_ntoh32(eqe->event.port_change.port) >> 28) & 3,\r
+                                   eqe->subtype == 0x4);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_CQ_ERROR:\r
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("CQ %s on CQN %06x (syndrome %d)\n",\r
+                                  eqe->event.cq_err.syndrome == 1 ?\r
+                                  "overrun" : "access violation",\r
+                                  cl_ntoh32(eqe->event.cq_err.cqn) & 0xffffff, eqe->event.cq_err.syndrome));\r
+                       mthca_cq_event(dev, cl_ntoh32(eqe->event.cq_err.cqn),\r
+                               IB_EVENT_CQ_ERR);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_EQ_OVERFLOW:\r
+                       HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("EQ overrun on EQN %d\n", eq->eqn));\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:\r
+               case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:\r
+               case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:\r
+               case MTHCA_EVENT_TYPE_ECC_DETECT:\r
+               default:\r
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("Unhandled event %02x(%02x) on EQ %d\n",\r
+                                  eqe->type, eqe->subtype, eq->eqn));\r
+                       break;\r
+               };\r
+\r
+               set_eqe_hw(eqe);\r
+               ++eq->cons_index;\r
+               eqes_found += 1;\r
+               ++set_ci;\r
+\r
+               /*\r
+                * The HCA will think the queue has overflowed if we\r
+                * don't tell it we've been processing events.  We\r
+                * create our EQs with MTHCA_NUM_SPARE_EQE extra\r
+                * entries, so we must update our consumer index at\r
+                * least that often.\r
+                */\r
+               if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {\r
+                       /*\r
+                        * Conditional on hca_type is OK here because\r
+                        * this is a rare case, not the fast path.\r
+                        */\r
+                       set_eq_ci(dev, eq, eq->cons_index);\r
+                       set_ci = 0;\r
+               }\r
+        loops++;\r
+        if (cl_get_time_stamp() - start > g_max_DPC_time_us ) {\r
+            HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handeling of EQ stopped, and a new DPC is entered after %d loops\n", loops));\r
+            KeInsertQueueDpc(&dev->eq_table.eq[eq->eq_num].dpc, NULL, NULL);\r
+            break;\r
+        }       \r
+               eqe = next_eqe_sw(eq);\r
+       }\r
+\r
+       /*\r
+        * Rely on caller to set consumer index so that we don't have\r
+        * to test hca_type in our interrupt handling fast path.\r
+        */\r
+       return eqes_found;\r
+}\r
+\r
+static void mthca_tavor_dpc( PRKDPC dpc, \r
+       PVOID ctx, PVOID arg1, PVOID arg2 )\r
+{\r
+       struct mthca_eq  *eq  = ctx;\r
+       struct mthca_dev *dev = eq->dev;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       UNREFERENCED_PARAMETER(dpc);\r
+       UNREFERENCED_PARAMETER(arg1);\r
+       UNREFERENCED_PARAMETER(arg2);\r
+\r
+       spin_lock_dpc(&eq->lock, &lh);\r
+\r
+       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */\r
+       if (mthca_eq_int(dev, eq)) {\r
+               tavor_set_eq_ci(dev, eq, eq->cons_index);\r
+               tavor_eq_req_not(dev, eq->eqn);\r
+       }\r
+\r
+       spin_unlock_dpc(&lh);\r
+}\r
+\r
+static BOOLEAN mthca_tavor_interrupt(\r
+       PKINTERRUPT     int_obj, \r
+       PVOID                           ctx\r
+       )\r
+{\r
+       struct mthca_dev *dev = ctx;\r
+       u32 ecr;\r
+       int i;\r
+\r
+       UNREFERENCED_PARAMETER(int_obj);\r
+\r
+       if (dev->eq_table.clr_mask)\r
+               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);\r
+\r
+       ecr = readl((u8*)dev->eq_regs.tavor.ecr_base + 4);\r
+       if (!ecr)\r
+               return FALSE;\r
+\r
+       writel(ecr, (u8*)dev->eq_regs.tavor.ecr_base +\r
+              MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);\r
+\r
+       for (i = 0; i < MTHCA_NUM_EQ; ++i) {\r
+               if (ecr & dev->eq_table.eq[i].eqn_mask &&\r
+                   next_eqe_sw(&dev->eq_table.eq[i])) {\r
+                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);\r
+               }\r
+       }\r
+\r
+       return TRUE;\r
+}\r
+\r
+#ifdef MSI_SUPPORT\r
+static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,\r
+                                        struct pt_regs *regs)\r
+{\r
+       struct mthca_eq  *eq  = eq_ptr;\r
+       struct mthca_dev *dev = eq->dev;\r
+\r
+       mthca_eq_int(dev, eq);\r
+       tavor_set_eq_ci(dev, eq, eq->cons_index);\r
+       tavor_eq_req_not(dev, eq->eqn);\r
+\r
+       /* MSI-X vectors always belong to us */\r
+       return IRQ_HANDLED;\r
+}\r
+#endif\r
+\r
+static void mthca_arbel_dpc( PRKDPC dpc, \r
+       PVOID ctx, PVOID arg1, PVOID arg2 )\r
+{\r
+       struct mthca_eq  *eq  = ctx;\r
+       struct mthca_dev *dev = eq->dev;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       UNREFERENCED_PARAMETER(dpc);\r
+       UNREFERENCED_PARAMETER(arg1);\r
+       UNREFERENCED_PARAMETER(arg2);\r
+\r
+       spin_lock_dpc(&eq->lock, &lh);\r
+\r
+       /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */\r
+       if (mthca_eq_int(dev, eq))\r
+               arbel_set_eq_ci(dev, eq, eq->cons_index);\r
+       arbel_eq_req_not(dev, eq->eqn_mask);\r
+\r
+       spin_unlock_dpc(&lh);\r
+}\r
+\r
+static BOOLEAN mthca_arbel_interrupt(\r
+       PKINTERRUPT     int_obj, \r
+       PVOID                           ctx\r
+       )\r
+{\r
+       struct mthca_dev *dev = ctx;\r
+       int work = 0;\r
+       int i;\r
+\r
+       UNREFERENCED_PARAMETER(int_obj);\r
+\r
+       if (dev->eq_table.clr_mask)\r
+               writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);\r
+\r
+       for (i = 0; i < MTHCA_NUM_EQ; ++i) {\r
+               if (next_eqe_sw( &dev->eq_table.eq[i]) ) {\r
+                       work = 1;\r
+                       while(InterlockedCompareExchange(&dev->dpc_lock, 1, 0));\r
+                       \r
+                       KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL);\r
+                       InterlockedCompareExchange(&dev->dpc_lock, 0, 1);\r
+               } else {\r
+                       arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);\r
+               }\r
+       }\r
+\r
+       return (BOOLEAN)work;\r
+}\r
+\r
+#ifdef MSI_SUPPORT\r
+static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr,\r
+                                              struct pt_regs *regs)\r
+{\r
+       struct mthca_eq  *eq  = eq_ptr;\r
+       struct mthca_dev *dev = eq->dev;\r
+\r
+       mthca_eq_int(dev, eq);\r
+       arbel_set_eq_ci(dev, eq, eq->cons_index);\r
+       arbel_eq_req_not(dev, eq->eqn_mask);\r
+\r
+       /* MSI-X vectors always belong to us */\r
+       return IRQ_HANDLED;\r
+}\r
+#endif\r
+\r
+static int mthca_create_eq(struct mthca_dev *dev,\r
+                                    int nent,\r
+                                    u8 intr,\r
+                                    struct mthca_eq *eq)\r
+{\r
+       int npages;\r
+       u64 *dma_list = NULL;\r
+       struct mthca_mailbox *mailbox;\r
+       struct mthca_eq_context *eq_context;\r
+       int err = -ENOMEM;\r
+       int i;\r
+       u8 status;\r
+       \r
+       HCA_ENTER(HCA_DBG_INIT);\r
+       eq->dev  = dev; \r
+       eq->nent = roundup_pow_of_two(max(nent, 2));\r
+       npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;\r
+\r
+       eq->page_list = kmalloc(npages * sizeof *eq->page_list,\r
+                               GFP_KERNEL);\r
+       if (!eq->page_list)\r
+               goto err_out;\r
+\r
+       for (i = 0; i < npages; ++i)\r
+               eq->page_list[i].page = NULL;\r
+\r
+       dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);\r
+       if (!dma_list)\r
+               goto err_out_free;\r
+\r
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);\r
+       if (IS_ERR(mailbox))\r
+               goto err_out_free;\r
+       eq_context = mailbox->buf;\r
+\r
+       for (i = 0; i < npages; ++i) {\r
+               alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &eq->page_list[i]);\r
+               if (!eq->page_list[i].page)\r
+                       goto err_out_free_pages;\r
+               dma_list[i] = eq->page_list[i].dma_address;\r
+       }\r
+\r
+       for (i = 0; i < eq->nent; ++i)\r
+               set_eqe_hw(get_eqe(eq, i));\r
+\r
+       eq->eqn = mthca_alloc(&dev->eq_table.alloc);\r
+       if (eq->eqn == -1)\r
+               goto err_out_free_pages;\r
+\r
+       err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,\r
+                                 dma_list, PAGE_SHIFT, npages,\r
+                                 0, npages * PAGE_SIZE,\r
+                                 MTHCA_MPT_FLAG_LOCAL_WRITE |\r
+                                 MTHCA_MPT_FLAG_LOCAL_READ,\r
+                                 &eq->mr);\r
+       if (err)\r
+               goto err_out_free_eq;\r
+\r
+       RtlZeroMemory(eq_context, sizeof *eq_context);\r
+       eq_context->flags           = cl_hton32(MTHCA_EQ_STATUS_OK   |\r
+                                                 MTHCA_EQ_OWNER_HW    |\r
+                                                 MTHCA_EQ_STATE_ARMED |\r
+                                                 MTHCA_EQ_FLAG_TR);\r
+       if (mthca_is_memfree(dev))\r
+               eq_context->flags  |= cl_hton32(MTHCA_EQ_STATE_ARBEL);\r
+\r
+       eq_context->logsize_usrpage = cl_hton32((ffs(eq->nent) - 1) << 24);\r
+       if (mthca_is_memfree(dev)) {\r
+               eq_context->arbel_pd = cl_hton32(dev->driver_pd.pd_num);\r
+       } else {\r
+               eq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);\r
+               eq_context->tavor_pd         = cl_hton32(dev->driver_pd.pd_num);\r
+       }\r
+       eq_context->intr            = intr;\r
+       eq_context->lkey            = cl_hton32(eq->mr.ibmr.lkey);\r
+\r
+       err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_EQ failed (%d)\n", err));\r
+               goto err_out_free_mr;\r
+       }\r
+       if (status) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_EQ returned status 0x%02x\n",\r
+                          status));\r
+               err = -EINVAL;\r
+               goto err_out_free_mr;\r
+       }\r
+\r
+       kfree(dma_list);\r
+       mthca_free_mailbox(dev, mailbox);\r
+\r
+       eq->eqn_mask   = _byteswap_ulong(1 << eq->eqn);\r
+       eq->cons_index = 0;\r
+\r
+       dev->eq_table.arm_mask |= eq->eqn_mask;\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_INIT ,("Allocated EQ %d with %d entries\n",\r
+                 eq->eqn, eq->nent));\r
+\r
+       HCA_EXIT(HCA_DBG_INIT);\r
+       return err;\r
+\r
+ err_out_free_mr:\r
+       mthca_free_mr(dev, &eq->mr);\r
+\r
+ err_out_free_eq:\r
+       mthca_free(&dev->eq_table.alloc, eq->eqn);\r
+\r
+ err_out_free_pages:\r
+       for (i = 0; i < npages; ++i) {\r
+               if (eq->page_list[i].page) {\r
+                       free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);\r
+               }\r
+       }\r
+       mthca_free_mailbox(dev, mailbox);\r
+\r
+ err_out_free:\r
+       kfree(eq->page_list);\r
+       kfree(dma_list);\r
+\r
+ err_out:\r
+       HCA_EXIT(HCA_DBG_INIT);\r
+       return err;\r
+}\r
+\r
+static void mthca_free_eq(struct mthca_dev *dev,\r
+                         struct mthca_eq *eq)\r
+{\r
+       struct mthca_mailbox *mailbox;\r
+       int err;\r
+       u8 status;\r
+       int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /\r
+               PAGE_SIZE;\r
+       int i;\r
+\r
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);\r
+       if (IS_ERR(mailbox))\r
+               return;\r
+\r
+       err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);\r
+       if (err)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ failed (%d)\n", err));\r
+       if (status)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_EQ returned status 0x%02x\n", status));\r
+\r
+       dev->eq_table.arm_mask &= ~eq->eqn_mask;\r
+\r
+       { // debug print\r
+               HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Dumping EQ context %02x:\n", eq->eqn));\r
+               for (i = 0; i < sizeof (struct mthca_eq_context) / 4; i=i+4) {\r
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] %08x %08x %08x %08x\n", i,\r
+                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4)),\r
+                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4)),\r
+                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+2)*4)),\r
+                                       cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4))));\r
+                                       \r
+               }\r
+       }\r
+\r
+       mthca_free_mr(dev, &eq->mr);\r
+       for (i = 0; i < npages; ++i) {\r
+               free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL);\r
+       }\r
+\r
+       kfree(eq->page_list);\r
+       mthca_free_mailbox(dev, mailbox);\r
+}\r
+\r
+static void mthca_free_irqs(struct mthca_dev *dev)\r
+{\r
+       if (dev->eq_table.have_irq)\r
+               free_irq(dev->ext->int_obj);\r
+#ifdef MSI_SUPPORT     \r
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)\r
+               if (dev->eq_table.eq[i].have_irq)\r
+                       free_irq(dev->eq_table.eq[i].msi_x_vector,\r
+                                dev->eq_table.eq + i);\r
+#endif         \r
+}\r
+\r
+static int mthca_map_reg(struct mthca_dev *dev,\r
+                                  u64 offset, unsigned long size,\r
+                                  void __iomem **map, SIZE_T *map_size)\r
+{\r
+       u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);\r
+       *map = ioremap(base + offset, size, map_size);\r
+       if (!*map) \r
+               return -ENOMEM;\r
+       return 0;\r
+}\r
+\r
+static void mthca_unmap_reg(struct mthca_dev *dev, u64 offset,\r
+                           unsigned long size, void __iomem *map, SIZE_T map_size)\r
+{\r
+       UNREFERENCED_PARAMETER(dev);\r
+       UNREFERENCED_PARAMETER(size);\r
+       UNREFERENCED_PARAMETER(offset);\r
+       iounmap(map, map_size);\r
+}\r
+\r
+static int mthca_map_eq_regs(struct mthca_dev *dev)\r
+{\r
+       u64 mthca_base;\r
+\r
+       mthca_base = pci_resource_start(dev, HCA_BAR_TYPE_HCR);\r
+\r
+       if (mthca_is_memfree(dev)) {\r
+               /*\r
+                * We assume that the EQ arm and EQ set CI registers\r
+                * fall within the first BAR.  We can't trust the\r
+                * values firmware gives us, since those addresses are\r
+                * valid on the HCA's side of the PCI bus but not\r
+                * necessarily the host side.\r
+                */\r
+               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                                 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,\r
+                                 &dev->clr_base, &dev->clr_base_size)) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "\r
+                                 "aborting.\n"));\r
+                       return -ENOMEM;\r
+               }\r
+\r
+               /*\r
+                * Add 4 because we limit ourselves to EQs 0 ... 31,\r
+                * so we only need the low word of the register.\r
+                */\r
+               if (mthca_map_reg(dev, ((pci_resource_len(dev, 0) - 1) &\r
+                                       dev->fw.arbel.eq_arm_base) + 4, 4,\r
+                                 &dev->eq_regs.arbel.eq_arm, &dev->eq_regs.arbel.eq_arm_size)) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ arm register, aborting.\n"));\r
+                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,\r
+                                       dev->clr_base, dev->clr_base_size);\r
+                       return -ENOMEM;\r
+               }\r
+\r
+               if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                                 dev->fw.arbel.eq_set_ci_base,\r
+                                 MTHCA_EQ_SET_CI_SIZE,\r
+                                 &dev->eq_regs.arbel.eq_set_ci_base,\r
+                                 &dev->eq_regs.arbel.eq_set_ci_base_size\r
+                                 )) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map EQ CI register, aborting.\n"));\r
+                       mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &\r
+                                             dev->fw.arbel.eq_arm_base) + 4, 4,\r
+                                       dev->eq_regs.arbel.eq_arm, dev->eq_regs.arbel.eq_arm_size);\r
+                       mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                                       dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,\r
+                                       dev->clr_base, dev->clr_base_size);\r
+                       return -ENOMEM;\r
+               }\r
+       } else {\r
+               if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,\r
+                                 &dev->clr_base, &dev->clr_base_size)) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, "\r
+                                 "aborting.\n"));\r
+                       return -ENOMEM;\r
+               }\r
+\r
+               if (mthca_map_reg(dev, MTHCA_ECR_BASE,\r
+                                 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,\r
+                                 &dev->eq_regs.tavor.ecr_base,  &dev->eq_regs.tavor.ecr_base_size)) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map ecr register, "\r
+                                 "aborting.\n"));\r
+                       mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,\r
+                                       dev->clr_base, dev->clr_base_size);\r
+                       return -ENOMEM;\r
+               }\r
+       }\r
+\r
+       return 0;\r
+\r
+}\r
+\r
+static void mthca_unmap_eq_regs(struct mthca_dev *dev)\r
+{\r
+       if (mthca_is_memfree(dev)) {\r
+               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                               dev->fw.arbel.eq_set_ci_base,\r
+                               MTHCA_EQ_SET_CI_SIZE,\r
+                               dev->eq_regs.arbel.eq_set_ci_base, \r
+                               dev->eq_regs.arbel.eq_set_ci_base_size);\r
+               mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) &\r
+                               dev->fw.arbel.eq_arm_base) + 4, 4,\r
+                       dev->eq_regs.arbel.eq_arm,\r
+                       dev->eq_regs.arbel.eq_arm_size);\r
+               mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) &\r
+                               dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,\r
+                               dev->clr_base, dev->clr_base_size);\r
+       } else {\r
+               mthca_unmap_reg(dev, MTHCA_ECR_BASE,\r
+                               MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,\r
+                               dev->eq_regs.tavor.ecr_base, \r
+                               dev->eq_regs.tavor.ecr_base_size);\r
+               mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,\r
+                               dev->clr_base, dev->clr_base_size);\r
+       }\r
+}\r
+\r
+int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)\r
+{\r
+       int ret;\r
+       u8 status;\r
+\r
+       /*\r
+        * We assume that mapping one page is enough for the whole EQ\r
+        * context table.  This is fine with all current HCAs, because\r
+        * we only use 32 EQs and each EQ uses 32 bytes of context\r
+        * memory, or 1 KB total.\r
+        */\r
+       dev->eq_table.icm_virt = icm_virt;\r
+       alloc_dma_zmem_map(dev,PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &dev->eq_table.sg);\r
+       if (!dev->eq_table.sg.page)\r
+               return -ENOMEM;\r
+\r
+       ret = mthca_MAP_ICM_page(dev, dev->eq_table.sg.dma_address, icm_virt, &status);\r
+       if (!ret && status)\r
+               ret = -EINVAL;\r
+       if (ret) \r
+               free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );\r
+\r
+       return ret;\r
+}\r
+\r
+void mthca_unmap_eq_icm(struct mthca_dev *dev)\r
+{\r
+       u8 status;\r
+\r
+       mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);\r
+       free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL );\r
+}\r
+\r
+int mthca_init_eq_table(struct mthca_dev *dev)\r
+{\r
+       int err;\r
+       u8 status;\r
+       u8 intr;\r
+       int i;\r
+       \r
+       HCA_ENTER(HCA_DBG_INIT);\r
+       err = mthca_alloc_init(&dev->eq_table.alloc,\r
+                              dev->limits.num_eqs,\r
+                              dev->limits.num_eqs - 1,\r
+                              dev->limits.reserved_eqs);\r
+       if (err)\r
+               return err;\r
+\r
+       err = mthca_map_eq_regs(dev);\r
+       if (err)\r
+               goto err_out_free;\r
+\r
+#ifdef MSI_SUPPORT\r
+       if (dev->mthca_flags & MTHCA_FLAG_MSI ||\r
+           dev->mthca_flags & MTHCA_FLAG_MSI_X) {\r
+               dev->eq_table.clr_mask = 0;\r
+       } else\r
+#endif \r
+       {\r
+               dev->eq_table.clr_mask =\r
+                       _byteswap_ulong(1 << (dev->eq_table.inta_pin & 31));\r
+               dev->eq_table.clr_int  = dev->clr_base +\r
+                       (dev->eq_table.inta_pin < 32 ? 4 : 0);\r
+       }\r
+\r
+       dev->eq_table.arm_mask = 0;\r
+\r
+       intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?\r
+               128 : dev->eq_table.inta_pin;\r
+\r
+       err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,\r
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,\r
+                             &dev->eq_table.eq[MTHCA_EQ_COMP]);\r
+       if (err)\r
+               goto err_out_unmap;\r
+\r
+       err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,\r
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,\r
+                             &dev->eq_table.eq[MTHCA_EQ_ASYNC]);\r
+       if (err)\r
+               goto err_out_comp;\r
+\r
+       err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,\r
+                             (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,\r
+                             &dev->eq_table.eq[MTHCA_EQ_CMD]);\r
+       if (err)\r
+               goto err_out_async;\r
+\r
+#ifdef MSI_SUPPORT\r
+       if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {\r
+               static const char *eq_name[] = {\r
+                       [MTHCA_EQ_COMP]  = DRV_NAME " (comp)",\r
+                       [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",\r
+                       [MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"\r
+               };\r
+\r
+               for (i = 0; i < MTHCA_NUM_EQ; ++i) {\r
+                       err = request_irq(dev->eq_table.eq[i].msi_x_vector,\r
+                                         mthca_is_memfree(dev) ?\r
+                                         mthca_arbel_msi_x_interrupt :\r
+                                         mthca_tavor_msi_x_interrupt,\r
+                                         0, eq_name[i], dev->eq_table.eq + i);\r
+                       if (err)\r
+                               goto err_out_cmd;\r
+                       dev->eq_table.eq[i].have_irq = 1;\r
+                       /* init DPC stuff something like that */\r
+                       spin_lock_init( &dev->eq_table.eq[i].lock );    \r
+                       dev->dpc_lock = 0;\r
+                       KeInitializeDpc(\r
+                               &dev->eq_table.eq[i].dpc,\r
+                               mthca_is_memfree(dev) ?\r
+                                       mthca_arbel_msi_x_dpc :\r
+                                       mthca_tavor_msi_x_dpc,\r
+                               dev->eq_table.eq + i);\r
+               }\r
+       } else \r
+#endif \r
+       {\r
+               spin_lock_init( &dev->ext->isr_lock );  \r
+               err = request_irq(\r
+                       &dev->ext->interruptInfo,\r
+                       &dev->ext->isr_lock.lock        ,\r
+                       mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt,\r
+                       dev,\r
+                       &dev->ext->int_obj\r
+                 );\r
+               if (err)\r
+                       goto err_out_cmd;\r
+               dev->eq_table.have_irq = 1;\r
+\r
+               /* init DPC stuff */\r
+               for (i = 0; i < MTHCA_NUM_EQ; ++i) {\r
+                       spin_lock_init( &dev->eq_table.eq[i].lock );    \r
+                       KeInitializeDpc(\r
+                               &dev->eq_table.eq[i].dpc,\r
+                               mthca_is_memfree(dev) ?\r
+                                       mthca_arbel_dpc :\r
+                                       mthca_tavor_dpc,\r
+                               dev->eq_table.eq + i);\r
+                       dev->eq_table.eq[i].eq_num = i;\r
+               }\r
+       }\r
+\r
+       err = mthca_MAP_EQ(dev, async_mask(dev),\r
+                          0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);\r
+       if (err)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for async EQ %d failed (%d)\n",\r
+                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err));\r
+       if (status)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for async EQ %d returned status 0x%02x\n",\r
+                          dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status));\r
+       err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,\r
+                          0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);\r
+       if (err)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for cmd EQ %d failed (%d)\n",\r
+                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err));\r
+       if (status)\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for cmd EQ %d returned status 0x%02x\n",\r
+                          dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status));\r
+\r
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)\r
+               if (mthca_is_memfree(dev))\r
+                       arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);\r
+               else\r
+                       tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);\r
+\r
+       return 0;\r
+\r
+err_out_cmd:\r
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);\r
+\r
+err_out_async:\r
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);\r
+\r
+err_out_comp:\r
+       mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);\r
+\r
+err_out_unmap:\r
+       mthca_unmap_eq_regs(dev);\r
+\r
+err_out_free:\r
+       mthca_alloc_cleanup(&dev->eq_table.alloc);\r
+       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("mthca_init_eq  failed %d\n",err));\r
+       return err;\r
+}\r
+\r
+void mthca_cleanup_eq_table(struct mthca_dev *dev)\r
+{\r
+       u8 status;\r
+       int i;\r
+\r
+       mthca_free_irqs(dev);\r
+\r
+       mthca_MAP_EQ(dev, async_mask(dev),\r
+                    1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);\r
+       mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,\r
+                    1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);\r
+\r
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)\r
+               mthca_free_eq(dev, &dev->eq_table.eq[i]);\r
+\r
+       mthca_unmap_eq_regs(dev);\r
+\r
+       mthca_alloc_cleanup(&dev->eq_table.alloc);\r
+}\r
+\r
+\r
index 136f6cb95ad9e8e5643226d12211c87b5965265a..b321a7e88745e2932eea3eef969a36822d762f15 100644 (file)
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#ifndef MTHCA_PROVIDER_H
-#define MTHCA_PROVIDER_H
-
-#include <ib_verbs.h>
-#include <ib_pack.h>
-#include <iba/ib_ci.h>
-
-typedef uint32_t mthca_mpt_access_t;
-#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
-#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
-#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
-#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
-#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
-
-union mthca_buf {
-       struct scatterlist direct;
-       struct scatterlist *page_list;
-};
-
-struct mthca_uar {
-       PFN_NUMBER pfn;
-       int           index;
-};
-
-struct mthca_user_db_table;
-
-struct mthca_ucontext {
-       struct ib_ucontext          ibucontext;
-       struct mthca_uar            uar;
-       struct mthca_user_db_table *db_tab;
-       // for user UAR 
-       PMDL    mdl;
-       PVOID   kva;
-       SIZE_T uar_size;        
-};
-
-struct mthca_mtt;
-
-struct mthca_mr {
-       //NB: the start of this structure is to be equal to mlnx_mro_t !
-       //NB: the structure was not inserted here for not to mix driver and provider structures
-       struct ib_mr      ibmr;
-       struct mthca_mtt *mtt;
-       int                     iobuf_used;
-       mt_iobuf_t      iobuf;
-       void *secure_handle;
-};
-
-struct mthca_fmr {
-       struct ib_fmr      ibmr;
-       struct ib_fmr_attr attr;
-       struct mthca_mtt  *mtt;
-       int                maps;
-       union {
-               struct {
-                       struct mthca_mpt_entry __iomem *mpt;
-                       u64 __iomem *mtts;
-               } tavor;
-               struct {
-                       struct mthca_mpt_entry *mpt;
-                       __be64 *mtts;
-               } arbel;
-       } mem;
-};
-
-struct mthca_pd {
-       struct ib_pd    ibpd;
-       u32             pd_num;
-       atomic_t        sqp_count;
-       struct mthca_mr ntmr;
-       int             privileged;
-};
-
-struct mthca_eq {
-       struct mthca_dev      *dev;
-       int                    eqn;
-       u32                    eqn_mask;
-       u32                    cons_index;
-       u16                    msi_x_vector;
-       u16                    msi_x_entry;
-       int                    have_irq;
-       int                    nent;
-       struct scatterlist *page_list;
-       struct mthca_mr        mr;
-       KDPC                            dpc;                    /* DPC for MSI-X interrupts */
-       spinlock_t  lock;                       /* spinlock for simult DPCs */
-};
-
-struct mthca_av;
-
-enum mthca_ah_type {
-       MTHCA_AH_ON_HCA,
-       MTHCA_AH_PCI_POOL,
-       MTHCA_AH_KMALLOC
-};
-
-struct mthca_ah {
-       struct ib_ah       ibah;
-       enum mthca_ah_type type;
-       u32                key;
-       struct mthca_av   *av;
-       dma_addr_t         avdma;
-};
-
-/*
- * Quick description of our CQ/QP locking scheme:
- *
- * We have one global lock that protects dev->cq/qp_table.  Each
- * struct mthca_cq/qp also has its own lock.  An individual qp lock
- * may be taken inside of an individual cq lock.  Both cqs attached to
- * a qp may be locked, with the send cq locked first.  No other
- * nesting should be done.
- *
- * Each struct mthca_cq/qp also has an atomic_t ref count.  The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
- *
- * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
- * destroy function to sleep on.
- *
- * This means that access from the consumer API requires nothing but
- * taking the struct's lock.
- *
- * Access because of a completion event should go as follows:
- * - lock cq/qp_table and look up struct
- * - increment ref count in struct
- * - drop cq/qp_table lock
- * - lock struct, do your thing, and unlock struct
- * - decrement ref count; if zero, wake up waiters
- *
- * To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
- * - wait_event until ref count is zero
- *
- * It is the consumer's responsibilty to make sure that no QP
- * operations (WQE posting or state modification) are pending when the
- * QP is destroyed.  Also, the consumer must make sure that calls to
- * qp_modify are serialized.
- *
- * Possible optimizations (wait for profile data to see if/where we
- * have locks bouncing between CPUs):
- * - split cq/qp table lock into n separate (cache-aligned) locks,
- *   indexed (say) by the page in the table
- * - split QP struct lock into three (one for common info, one for the
- *   send queue and one for the receive queue)
- */
-//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP
-// operations (WQE posting or state modification) are pending when the QP is destroyed"
-
-struct mthca_cq {
-       struct ib_cq           ibcq;
-       void                                            *cq_context;    // leo: for IBAL shim
-       spinlock_t             lock;
-       atomic_t               refcount;
-       int                    cqn;
-       u32                    cons_index;
-       int                    is_direct;
-       int                    is_kernel;
-
-       /* Next fields are Arbel only */
-       int                    set_ci_db_index;
-       __be32                *set_ci_db;
-       int                    arm_db_index;
-       __be32                *arm_db;
-       int                    arm_sn;
-       int                    u_arm_db_index;
-       int                *p_u_arm_sn;
-
-       union mthca_buf        queue;
-       struct mthca_mr        mr;
-       wait_queue_head_t      wait;
-       KMUTEX                      mutex;
-};
-
-struct mthca_srq {
-       struct ib_srq           ibsrq;
-       spinlock_t              lock;
-       atomic_t                refcount;
-       int                     srqn;
-       int                     max;
-       int                     max_gs;
-       int                     wqe_shift;
-       int                     first_free;
-       int                     last_free;
-       u16                     counter;  /* Arbel only */
-       int                     db_index; /* Arbel only */
-       __be32                 *db;       /* Arbel only */
-       void                   *last;
-
-       int                     is_direct;
-       u64                    *wrid;
-       union mthca_buf         queue;
-       struct mthca_mr         mr;
-
-       wait_queue_head_t       wait;
-       KMUTEX                      mutex;
-};
-
-struct mthca_wq {
-       spinlock_t lock;
-       int        max;
-       unsigned   next_ind;
-       unsigned   last_comp;
-       unsigned   head;
-       unsigned   tail;
-       void      *last;
-       int        max_gs;
-       int        wqe_shift;
-
-       int        db_index;    /* Arbel only */
-       __be32    *db;
-};
-
-struct mthca_qp {
-       struct ib_qp           ibqp;
-       void                                            *qp_context;    // leo: for IBAL shim
-       //TODO: added just because absense of ibv_query_qp
-       // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;
-       struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp
-       atomic_t               refcount;
-       u32                    qpn;
-       int                    is_direct;
-       u8                     transport;
-       u8                     state;
-       u8                     atomic_rd_en;
-       u8                     resp_depth;
-
-       struct mthca_mr        mr;
-
-       struct mthca_wq        rq;
-       struct mthca_wq        sq;
-       enum ib_sig_type       sq_policy;
-       int                    send_wqe_offset;
-       int                    max_inline_data;
-
-       u64                   *wrid;
-       union mthca_buf        queue;
-
-       wait_queue_head_t      wait;
-       KMUTEX                      mutex;
-};
-
-struct mthca_sqp {
-       struct mthca_qp qp;
-       int             port;
-       int             pkey_index;
-       u32             qkey;
-       u32             send_psn;
-       struct ib_ud_header ud_header;
-       struct scatterlist sg;
-};
-
-static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
-{
-       return container_of(ibucontext, struct mthca_ucontext, ibucontext);
-}
-
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
-{
-       return container_of(ibmr, struct mthca_fmr, ibmr);
-}
-
-static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
-{
-       return container_of(ibmr, struct mthca_mr, ibmr);
-}
-
-static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
-{
-       return container_of(ibpd, struct mthca_pd, ibpd);
-}
-
-static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
-{
-       return container_of(ibah, struct mthca_ah, ibah);
-}
-
-static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
-{
-       return container_of(ibcq, struct mthca_cq, ibcq);
-}
-
-static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
-{
-       return container_of(ibsrq, struct mthca_srq, ibsrq);
-}
-
-static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
-{
-       return container_of(ibqp, struct mthca_qp, ibqp);
-}
-
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
-{
-       return container_of(qp, struct mthca_sqp, qp);
-}
-
-static inline uint8_t start_port(struct ib_device *device)
-{
-       return device->node_type == IB_NODE_SWITCH ? 0 : 1;
-}
-
-static inline uint8_t end_port(struct ib_device *device)
-{
-       return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
-}
-
-static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)
-{
-       RtlCopyMemory(dest, p_umv_buf->p_inout_buf,  len);
-       return 0;
-}
-
-static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)
-{
-       if (p_umv_buf->output_size < len) {
-               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;
-               p_umv_buf->output_size = 0;
-               return -EFAULT;
-       }
-       RtlCopyMemory(p_umv_buf->p_inout_buf,  src, len);
-       p_umv_buf->status = IB_SUCCESS;
-       p_umv_buf->output_size = (uint32_t)len;
-       return 0;
-}
-
-
-
-// API
-int mthca_query_device(struct ib_device *ibdev,
-                                        struct ib_device_attr *props);
-
-int mthca_query_port(struct ib_device *ibdev,
-                           u8 port, struct ib_port_attr *props);
-
-int mthca_modify_port(struct ib_device *ibdev,
-                            u8 port, int port_modify_mask,
-                            struct ib_port_modify *props);
-
-int mthca_query_pkey_chunk(struct ib_device *ibdev,
-                           u8 port, u16 index, u16 pkey[32]);
-
-int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,
-                          int index, union ib_gid gid[8]);
-
-struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
-                                               ci_umv_buf_t* const                     p_umv_buf);
-
-int mthca_dealloc_ucontext(struct ib_ucontext *context);
-
-struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
-                                   struct ib_ucontext *context,
-                                   ci_umv_buf_t* const                 p_umv_buf);
-
-int mthca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *mthca_ah_create(struct ib_pd *pd,
-                                    struct ib_ah_attr *ah_attr);
-
-int mthca_ah_destroy(struct ib_ah *ah);
-
-struct ib_srq *mthca_create_srq(struct ib_pd *pd,
-                                      struct ib_srq_init_attr *init_attr,
-                                      ci_umv_buf_t* const                      p_umv_buf);
-
-int mthca_destroy_srq(struct ib_srq *srq);
-
-struct ib_qp *mthca_create_qp(struct ib_pd *pd,
-                                    struct ib_qp_init_attr *init_attr,
-                                    ci_umv_buf_t* const                        p_umv_buf);
-
-int mthca_destroy_qp(struct ib_qp *qp);
-
-struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
-                                    struct ib_ucontext *context,
-                                    ci_umv_buf_t* const                        p_umv_buf);
-
-int mthca_destroy_cq(struct ib_cq *cq);
-
-struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);
-
-struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
-                                      struct ib_phys_buf *buffer_list,
-                                      int                 num_phys_buf,
-                                      mthca_qp_access_t                 acc,
-                                      u64                *iova_start);
-
-struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, 
-        void* __ptr64  vaddr, uint64_t length, uint64_t hca_va,
-        mthca_qp_access_t acc, boolean_t um_call);
-
-int mthca_dereg_mr(struct ib_mr *mr);
-
-struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
-                                     struct ib_fmr_attr *fmr_attr);
-
-int mthca_dealloc_fmr(struct ib_fmr *fmr);
-
-int mthca_unmap_fmr(struct list_head *fmr_list);
-
-int mthca_poll_cq_list(
-       IN              struct ib_cq *ibcq, 
-       IN      OUT                     ib_wc_t** const                         pp_free_wclist,
-               OUT                     ib_wc_t** const                         pp_done_wclist );
-
-
-#endif /* MTHCA_PROVIDER_H */
+/*\r
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.\r
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#ifndef MTHCA_PROVIDER_H\r
+#define MTHCA_PROVIDER_H\r
+\r
+#include <ib_verbs.h>\r
+#include <ib_pack.h>\r
+#include <iba/ib_ci.h>\r
+\r
+typedef uint32_t mthca_mpt_access_t;\r
+#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)\r
+#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)\r
+#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)\r
+#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)\r
+#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)\r
+\r
+union mthca_buf {\r
+       struct scatterlist direct;\r
+       struct scatterlist *page_list;\r
+};\r
+\r
+struct mthca_uar {\r
+       PFN_NUMBER pfn;\r
+       int           index;\r
+};\r
+\r
+struct mthca_user_db_table;\r
+\r
+struct mthca_ucontext {\r
+       struct ib_ucontext          ibucontext;\r
+       struct mthca_uar            uar;\r
+       struct mthca_user_db_table *db_tab;\r
+       // for user UAR \r
+       PMDL    mdl;\r
+       PVOID   kva;\r
+       SIZE_T uar_size;        \r
+};\r
+\r
+struct mthca_mtt;\r
+\r
+struct mthca_mr {\r
+       //NB: the start of this structure is to be equal to mlnx_mro_t !\r
+       //NB: the structure was not inserted here for not to mix driver and provider structures\r
+       struct ib_mr      ibmr;\r
+       struct mthca_mtt *mtt;\r
+       int                     iobuf_used;\r
+       mt_iobuf_t      iobuf;\r
+       void *secure_handle;\r
+};\r
+\r
+struct mthca_fmr {\r
+       struct ib_fmr      ibmr;\r
+       struct ib_fmr_attr attr;\r
+       struct mthca_mtt  *mtt;\r
+       int                maps;\r
+       union {\r
+               struct {\r
+                       struct mthca_mpt_entry __iomem *mpt;\r
+                       u64 __iomem *mtts;\r
+               } tavor;\r
+               struct {\r
+                       struct mthca_mpt_entry *mpt;\r
+                       __be64 *mtts;\r
+               } arbel;\r
+       } mem;\r
+};\r
+\r
+struct mthca_pd {\r
+       struct ib_pd    ibpd;\r
+       u32             pd_num;\r
+       atomic_t        sqp_count;\r
+       struct mthca_mr ntmr;\r
+       int             privileged;\r
+};\r
+\r
+struct mthca_eq {\r
+       struct mthca_dev      *dev;\r
+       int                    eqn;\r
+       int                    eq_num;\r
+       u32                    eqn_mask;\r
+       u32                    cons_index;\r
+       u16                    msi_x_vector;\r
+       u16                    msi_x_entry;\r
+       int                    have_irq;\r
+       int                    nent;\r
+       struct scatterlist *page_list;\r
+       struct mthca_mr        mr;\r
+       KDPC                            dpc;                    /* DPC for MSI-X interrupts */\r
+       spinlock_t  lock;                       /* spinlock for simult DPCs */\r
+};\r
+\r
+struct mthca_av;\r
+\r
+enum mthca_ah_type {\r
+       MTHCA_AH_ON_HCA,\r
+       MTHCA_AH_PCI_POOL,\r
+       MTHCA_AH_KMALLOC\r
+};\r
+\r
+struct mthca_ah {\r
+       struct ib_ah       ibah;\r
+       enum mthca_ah_type type;\r
+       u32                key;\r
+       struct mthca_av   *av;\r
+       dma_addr_t         avdma;\r
+};\r
+\r
+/*\r
+ * Quick description of our CQ/QP locking scheme:\r
+ *\r
+ * We have one global lock that protects dev->cq/qp_table.  Each\r
+ * struct mthca_cq/qp also has its own lock.  An individual qp lock\r
+ * may be taken inside of an individual cq lock.  Both cqs attached to\r
+ * a qp may be locked, with the send cq locked first.  No other\r
+ * nesting should be done.\r
+ *\r
+ * Each struct mthca_cq/qp also has an atomic_t ref count.  The\r
+ * pointer from the cq/qp_table to the struct counts as one reference.\r
+ * This reference also is good for access through the consumer API, so\r
+ * modifying the CQ/QP etc doesn't need to take another reference.\r
+ * Access because of a completion being polled does need a reference.\r
+ *\r
+ * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the\r
+ * destroy function to sleep on.\r
+ *\r
+ * This means that access from the consumer API requires nothing but\r
+ * taking the struct's lock.\r
+ *\r
+ * Access because of a completion event should go as follows:\r
+ * - lock cq/qp_table and look up struct\r
+ * - increment ref count in struct\r
+ * - drop cq/qp_table lock\r
+ * - lock struct, do your thing, and unlock struct\r
+ * - decrement ref count; if zero, wake up waiters\r
+ *\r
+ * To destroy a CQ/QP, we can do the following:\r
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock\r
+ * - decrement ref count\r
+ * - wait_event until ref count is zero\r
+ *\r
+ * It is the consumer's responsibilty to make sure that no QP\r
+ * operations (WQE posting or state modification) are pending when the\r
+ * QP is destroyed.  Also, the consumer must make sure that calls to\r
+ * qp_modify are serialized.\r
+ *\r
+ * Possible optimizations (wait for profile data to see if/where we\r
+ * have locks bouncing between CPUs):\r
+ * - split cq/qp table lock into n separate (cache-aligned) locks,\r
+ *   indexed (say) by the page in the table\r
+ * - split QP struct lock into three (one for common info, one for the\r
+ *   send queue and one for the receive queue)\r
+ */\r
+//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP\r
+// operations (WQE posting or state modification) are pending when the QP is destroyed"\r
+\r
+struct mthca_cq {\r
+       struct ib_cq           ibcq;\r
+       void                                            *cq_context;    // leo: for IBAL shim\r
+       spinlock_t             lock;\r
+       atomic_t               refcount;\r
+       int                    cqn;\r
+       u32                    cons_index;\r
+       int                    is_direct;\r
+       int                    is_kernel;\r
+\r
+       /* Next fields are Arbel only */\r
+       int                    set_ci_db_index;\r
+       __be32                *set_ci_db;\r
+       int                    arm_db_index;\r
+       __be32                *arm_db;\r
+       int                    arm_sn;\r
+       int                    u_arm_db_index;\r
+       int                *p_u_arm_sn;\r
+\r
+       union mthca_buf        queue;\r
+       struct mthca_mr        mr;\r
+       wait_queue_head_t      wait;\r
+       KMUTEX                      mutex;\r
+};\r
+\r
+struct mthca_srq {\r
+       struct ib_srq           ibsrq;\r
+       spinlock_t              lock;\r
+       atomic_t                refcount;\r
+       int                     srqn;\r
+       int                     max;\r
+       int                     max_gs;\r
+       int                     wqe_shift;\r
+       int                     first_free;\r
+       int                     last_free;\r
+       u16                     counter;  /* Arbel only */\r
+       int                     db_index; /* Arbel only */\r
+       __be32                 *db;       /* Arbel only */\r
+       void                   *last;\r
+\r
+       int                     is_direct;\r
+       u64                    *wrid;\r
+       union mthca_buf         queue;\r
+       struct mthca_mr         mr;\r
+\r
+       wait_queue_head_t       wait;\r
+       KMUTEX                      mutex;\r
+};\r
+\r
+struct mthca_wq {\r
+       spinlock_t lock;\r
+       int        max;\r
+       unsigned   next_ind;\r
+       unsigned   last_comp;\r
+       unsigned   head;\r
+       unsigned   tail;\r
+       void      *last;\r
+       int        max_gs;\r
+       int        wqe_shift;\r
+\r
+       int        db_index;    /* Arbel only */\r
+       __be32    *db;\r
+};\r
+\r
+struct mthca_qp {\r
+       struct ib_qp           ibqp;\r
+       void                                            *qp_context;    // leo: for IBAL shim\r
+       //TODO: added just because absense of ibv_query_qp\r
+       // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;\r
+       struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp\r
+       atomic_t               refcount;\r
+       u32                    qpn;\r
+       int                    is_direct;\r
+       u8                     transport;\r
+       u8                     state;\r
+       u8                     atomic_rd_en;\r
+       u8                     resp_depth;\r
+\r
+       struct mthca_mr        mr;\r
+\r
+       struct mthca_wq        rq;\r
+       struct mthca_wq        sq;\r
+       enum ib_sig_type       sq_policy;\r
+       int                    send_wqe_offset;\r
+       int                    max_inline_data;\r
+\r
+       u64                   *wrid;\r
+       union mthca_buf        queue;\r
+\r
+       wait_queue_head_t      wait;\r
+       KMUTEX                      mutex;\r
+};\r
+\r
+struct mthca_sqp {\r
+       struct mthca_qp qp;\r
+       int             port;\r
+       int             pkey_index;\r
+       u32             qkey;\r
+       u32             send_psn;\r
+       struct ib_ud_header ud_header;\r
+       struct scatterlist sg;\r
+};\r
+\r
+static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)\r
+{\r
+       return container_of(ibucontext, struct mthca_ucontext, ibucontext);\r
+}\r
+\r
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)\r
+{\r
+       return container_of(ibmr, struct mthca_fmr, ibmr);\r
+}\r
+\r
+static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)\r
+{\r
+       return container_of(ibmr, struct mthca_mr, ibmr);\r
+}\r
+\r
+static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)\r
+{\r
+       return container_of(ibpd, struct mthca_pd, ibpd);\r
+}\r
+\r
+static inline struct mthca_ah *to_mah(struct ib_ah *ibah)\r
+{\r
+       return container_of(ibah, struct mthca_ah, ibah);\r
+}\r
+\r
+static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)\r
+{\r
+       return container_of(ibcq, struct mthca_cq, ibcq);\r
+}\r
+\r
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)\r
+{\r
+       return container_of(ibsrq, struct mthca_srq, ibsrq);\r
+}\r
+\r
+static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)\r
+{\r
+       return container_of(ibqp, struct mthca_qp, ibqp);\r
+}\r
+\r
+static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)\r
+{\r
+       return container_of(qp, struct mthca_sqp, qp);\r
+}\r
+\r
+static inline uint8_t start_port(struct ib_device *device)\r
+{\r
+       return device->node_type == IB_NODE_SWITCH ? 0 : 1;\r
+}\r
+\r
+static inline uint8_t end_port(struct ib_device *device)\r
+{\r
+       return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;\r
+}\r
+\r
+static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)\r
+{\r
+       RtlCopyMemory(dest, p_umv_buf->p_inout_buf,  len);\r
+       return 0;\r
+}\r
+\r
+static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)\r
+{\r
+       if (p_umv_buf->output_size < len) {\r
+               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
+               p_umv_buf->output_size = 0;\r
+               return -EFAULT;\r
+       }\r
+       RtlCopyMemory(p_umv_buf->p_inout_buf,  src, len);\r
+       p_umv_buf->status = IB_SUCCESS;\r
+       p_umv_buf->output_size = (uint32_t)len;\r
+       return 0;\r
+}\r
+\r
+\r
+\r
+// API\r
+int mthca_query_device(struct ib_device *ibdev,\r
+                                        struct ib_device_attr *props);\r
+\r
+int mthca_query_port(struct ib_device *ibdev,\r
+                           u8 port, struct ib_port_attr *props);\r
+\r
+int mthca_modify_port(struct ib_device *ibdev,\r
+                            u8 port, int port_modify_mask,\r
+                            struct ib_port_modify *props);\r
+\r
+int mthca_query_pkey_chunk(struct ib_device *ibdev,\r
+                           u8 port, u16 index, u16 pkey[32]);\r
+\r
+int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,\r
+                          int index, union ib_gid gid[8]);\r
+\r
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,\r
+                                               ci_umv_buf_t* const                     p_umv_buf);\r
+\r
+int mthca_dealloc_ucontext(struct ib_ucontext *context);\r
+\r
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,\r
+                                   struct ib_ucontext *context,\r
+                                   ci_umv_buf_t* const                 p_umv_buf);\r
+\r
+int mthca_dealloc_pd(struct ib_pd *pd);\r
+\r
+struct ib_ah *mthca_ah_create(struct ib_pd *pd,\r
+                                    struct ib_ah_attr *ah_attr);\r
+\r
+int mthca_ah_destroy(struct ib_ah *ah);\r
+\r
+struct ib_srq *mthca_create_srq(struct ib_pd *pd,\r
+                                      struct ib_srq_init_attr *init_attr,\r
+                                      ci_umv_buf_t* const                      p_umv_buf);\r
+\r
+int mthca_destroy_srq(struct ib_srq *srq);\r
+\r
+struct ib_qp *mthca_create_qp(struct ib_pd *pd,\r
+                                    struct ib_qp_init_attr *init_attr,\r
+                                    ci_umv_buf_t* const                        p_umv_buf);\r
+\r
+int mthca_destroy_qp(struct ib_qp *qp);\r
+\r
+struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,\r
+                                    struct ib_ucontext *context,\r
+                                    ci_umv_buf_t* const                        p_umv_buf);\r
+\r
+int mthca_destroy_cq(struct ib_cq *cq);\r
+\r
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);\r
+\r
+struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,\r
+                                      struct ib_phys_buf *buffer_list,\r
+                                      int                 num_phys_buf,\r
+                                      mthca_qp_access_t                 acc,\r
+                                      u64                *iova_start);\r
+\r
+struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, \r
+        void* __ptr64  vaddr, uint64_t length, uint64_t hca_va,\r
+        mthca_qp_access_t acc, boolean_t um_call);\r
+\r
+int mthca_dereg_mr(struct ib_mr *mr);\r
+\r
+struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,\r
+                                     struct ib_fmr_attr *fmr_attr);\r
+\r
+int mthca_dealloc_fmr(struct ib_fmr *fmr);\r
+\r
+int mthca_unmap_fmr(struct list_head *fmr_list);\r
+\r
+int mthca_poll_cq_list(\r
+       IN              struct ib_cq *ibcq, \r
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,\r
+               OUT                     ib_wc_t** const                         pp_done_wclist );\r
+\r
+\r
+#endif /* MTHCA_PROVIDER_H */\r