]> git.openfabrics.org - ~shefty/rdma-win.git/commitdiff
update to trunk svn 1458
authorshefty <shefty@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 4 Aug 2008 06:14:41 +0000 (06:14 +0000)
committershefty <shefty@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 4 Aug 2008 06:14:41 +0000 (06:14 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1@1459 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

27 files changed:
branches/winverbs/BuildRelease.bat
branches/winverbs/hw/mlx4/kernel/bus/drv/makefile.inc
branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inf [deleted file]
branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inx [new file with mode: 0644]
branches/winverbs/hw/mlx4/kernel/bus/drv/sources
branches/winverbs/hw/mlx4/kernel/bus/inc/ib_verbs.h
branches/winverbs/hw/mlx4/kernel/bus/inc/ib_verbs_ex.h
branches/winverbs/hw/mlx4/kernel/hca/SOURCES
branches/winverbs/hw/mlx4/kernel/hca/cq.c
branches/winverbs/hw/mlx4/kernel/hca/makefile.inc
branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inf [deleted file]
branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inx [new file with mode: 0644]
branches/winverbs/hw/mlx4/kernel/hca/qp.c
branches/winverbs/hw/mlx4/kernel/hca/srq.c
branches/winverbs/hw/mthca/kernel/hca_verbs.c
branches/winverbs/hw/mthca/kernel/mthca_cq.c
branches/winverbs/hw/mthca/kernel/mthca_provider.h
branches/winverbs/inc/kernel/ip_packet.h
branches/winverbs/ulp/ipoib/kernel/ipoib_adapter.c
branches/winverbs/ulp/ipoib/kernel/ipoib_adapter.h
branches/winverbs/ulp/ipoib/kernel/ipoib_driver.c
branches/winverbs/ulp/ipoib/kernel/ipoib_endpoint.h
branches/winverbs/ulp/ipoib/kernel/ipoib_port.c
branches/winverbs/ulp/ipoib/kernel/ipoib_port.h
branches/winverbs/ulp/ipoib/kernel/netipoib.inx
branches/winverbs/ulp/opensm/user/include/iba/ib_types.h
branches/winverbs/ulp/opensm/user/include/iba/ib_types_extended.h

index 320f52a3869d4a179cace839760622197d1c5a35..798ee9202cf106bdaf2e5f779854c89a3050d6b0 100644 (file)
@@ -97,7 +97,7 @@ if "%1" == "msi" (
 \r
 rem Use this WDK\r
 \r
-set _DDK=C:\WinDDK\6001.18001\r
+set _DDK=%SystemDrive%\WinDDK\6001.18001\r
 \r
 if NOT EXIST %_DDK% (\r
     echo Missing WDK @ %_DDK%\r
@@ -106,7 +106,7 @@ if NOT EXIST %_DDK% (
 echo %0 - Building with WDK @ %_DDK%\r
 \r
 rem Platform SDK path - watchout for missing LoadPerf.h (installsp.c)\r
-set _PSDK=C:\progra~1\mi2578~1\r
+set _PSDK=%SystemDrive%\progra~1\mi2578~1\r
 \r
 rem setup value for OPENIB_REV assignment.\r
 \r
index 6f8b1a66a7d84366c8b3ec565a7633c3fb940dbd..f2e12253d1eca10a6ae21348164b4d072f81201c 100644 (file)
@@ -5,6 +5,20 @@ mlx4_bus.bmf: bus.mof
         wmimofck $(OBJ_PATH)\$O\mlx4_bus.bmf\r
 \r
 \r
+# Transform .inx file to .inf file adding date + major,min & svn.version stamp\r
+# Output .inf file is copied to the $(INF_TARGET) folder (commonly where .sys file resides).\r
 \r
+_LNG=$(LANGUAGE)\r
 \r
+!IF !DEFINED(_INX)\r
+_INX=.\r
+!ENDIF\r
+\r
+STAMP=stampinf -a $(_BUILDARCH)\r
+\r
+!INCLUDE mod_ver.def\r
+\r
+$(INF_TARGET) : $(_INX)\$(INF_NAME).inx\r
+    copy $(_INX)\$(@B).inx $@\r
+    $(STAMP) -f $@ -d * -v $(IB_MAJORVERSION).$(IB_MINORVERSION).$(IB_BUILDVERSION).$(OPENIB_REV)\r
 \r
diff --git a/branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inf b/branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inf
deleted file mode 100644 (file)
index be5495d..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-; Mellanox Technologies InfiniBand HCAs.\r
-; Copyright 2008 Mellanox Technologies all Rights Reserved.\r
-\r
-[Version]\r
-Signature="$WINDOWS NT$"\r
-Class=Mlx4Bus\r
-ClassGUID={714995B2-CD65-4a47-BCFE-95AC73A0D780}\r
-Provider=%MTL%\r
-; must be synchronized with bus\drv.c\r
-DriverVer=02/01/2008,1.0.0.0\r
-CatalogFile=mlx4_bus.cat\r
-\r
-\r
-;*****************************************\r
-; Destination directory section\r
-;*****************************************\r
-\r
-[DestinationDirs]\r
-DefaultDestDir = 12\r
-Wdf_CoInstaller_CopyFiles = 11\r
-\r
-\r
-;*****************************************\r
-; Class Install section\r
-;*****************************************\r
-\r
-[ClassInstall32]\r
-AddReg=ClassAddReg\r
-\r
-[ClassAddReg]\r
-HKR,,,,"Mellanox ConnectX Adapters"\r
-HKR,,Icon,,-5\r
-HKR,,SilentInstall,,1\r
-\r
-\r
-;*****************************************\r
-; Device Install section\r
-;*****************************************\r
-\r
-[SourceDisksNames.x86]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.amd64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.ia64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksFiles.x86]\r
-mlx4_bus.sys = 1,,\r
-wdfcoinstaller01007.dll = 1,,\r
-\r
-[SourceDisksFiles.amd64]\r
-mlx4_bus.sys = 1,,\r
-wdfcoinstaller01007.dll = 1,,\r
-\r
-[SourceDisksFiles.ia64]\r
-mlx4_bus.sys = 1,,\r
-wdfcoinstaller01007.dll = 1,,\r
-\r
-;*****************************************\r
-; Mlx4Bus  Install Section\r
-;*****************************************\r
-\r
-[Manufacturer]\r
-%MTL% = MLX4BUS.DeviceSection,ntx86,ntamd64,ntia64\r
-\r
-[MLX4BUS.DeviceSection]\r
-; empty since we don't support W9x/Me\r
-\r
-[MLX4BUS.DeviceSection.ntx86]\r
-%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
-%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
-%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
-%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
-%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
-%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
-%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
-\r
-[MLX4BUS.DeviceSection.ntamd64]\r
-%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
-%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
-%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
-%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
-%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
-%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
-%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
-\r
-[MLX4BUS.DeviceSection.ntia64]\r
-%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
-%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
-%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
-%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
-%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
-%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
-%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
-\r
-[MLX4BUS.DDInstall.ntx86]\r
-CopyFiles = MLX4BUS.CopyFiles\r
-\r
-[MLX4BUS.DDInstall.ntamd64]\r
-CopyFiles = MLX4BUS.CopyFiles\r
-\r
-[MLX4BUS.DDInstall.ntia64]\r
-CopyFiles = MLX4BUS.CopyFiles\r
-\r
-[MLX4BUS.DDInstall.ntx86.Services]\r
-AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
-\r
-[MLX4BUS.DDInstall.ntamd64.Services]\r
-AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
-\r
-[MLX4BUS.DDInstall.ntia64.Services]\r
-AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
-\r
-[MLX4BUS.CopyFiles]\r
-mlx4_bus.sys\r
-\r
-\r
-;*****************************************\r
-; Service Install section\r
-;*****************************************\r
-\r
-[MLX4BUS.ServiceInstall]\r
-DisplayName     = %MLX4BUS.ServiceDesc%\r
-ServiceType     = %SERVICE_KERNEL_DRIVER%\r
-StartType       = %SERVICE_DEMAND_START%\r
-ErrorControl    = %SERVICE_ERROR_NORMAL%\r
-ServiceBinary   = %12%\mlx4_bus.sys\r
-LoadOrderGroup  = extended base\r
-AddReg          = MLX4BUS.ParamsReg\r
-\r
-[MLX4BUS.EventLog]\r
-AddReg = MLX4BUS.AddEventLogReg\r
-\r
-[MLX4BUS.AddEventLogReg]\r
-HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mlx4_bus.sys"\r
-HKR, , TypesSupported,   0x00010001, 7\r
-\r
-[MLX4BUS.ParamsReg]\r
-HKR,,DeviceCharacteristics,0x10001,0x0100         ; Use same security checks on relative opens\r
-HKR,,Security,,"D:P(A;;GA;;;BA)(A;;GA;;;SY)"      ; Allow generic-all access to Built-in administrators and Local system \r
-HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003\r
-HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff\r
-HKR,"Parameters","LogNumQp",%REG_DWORD%,0x00000011\r
-HKR,"Parameters","LogNumRdmaRc",%REG_DWORD%,0x00000004\r
-HKR,"Parameters","LogNumSrq",%REG_DWORD%,0x00000010\r
-HKR,"Parameters","LogNumCq",%REG_DWORD%,0x00000010\r
-HKR,"Parameters","LogNumMcg",%REG_DWORD%,0x0000000D\r
-HKR,"Parameters","LogNumMpt",%REG_DWORD%,0x00000012\r
-HKR,"Parameters","LogNumMtt",%REG_DWORD%,0x00000014\r
-HKR,"Parameters","EnableQoS",%REG_DWORD%,0x00000001\r
-HKR,"Parameters","BlockMcastLoopBack",%REG_DWORD%,0x00000000\r
-HKR,"Parameters","InterruptFromFirstPacket",%REG_DWORD%,0x00000001\r
-\r
-HKR,"Parameters","NumMac",%REG_DWORD%,0x00000001\r
-HKR,"Parameters","NumVlan",%REG_DWORD%,0x00000000\r
-HKR,"Parameters","UsePrio",%REG_DWORD%,0x00000000\r
-HKR,"Parameters","PortType",%REG_SZ%,"ib,ib"\r
-\r
-\r
-HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\E51BB6E2-914A-4e21-93C0-192F4801BBFF","Flags",%REG_DWORD%,0xffff\r
-HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\E51BB6E2-914A-4e21-93C0-192F4801BBFF","Level",%REG_DWORD%,0x3\r
-\r
-;*****************************************\r
-; WDF Coinstaller installation section\r
-;*****************************************\r
-\r
-[MLX4BUS.DDInstall.ntx86.CoInstallers]\r
-AddReg=Wdf_CoInstaller_AddReg\r
-CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-[MLX4BUS.DDInstall.ntamd64.CoInstallers]\r
-AddReg=Wdf_CoInstaller_AddReg\r
-CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-[MLX4BUS.DDInstall.ntia64.CoInstallers]\r
-AddReg=Wdf_CoInstaller_AddReg\r
-CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-[Wdf_CoInstaller_AddReg]\r
-HKR,,CoInstallers32,0x00010000, "wdfcoinstaller01007.dll,WdfCoInstaller"\r
-\r
-[Wdf_CoInstaller_CopyFiles]\r
-wdfcoinstaller01007.dll\r
-\r
-[MLX4BUS.DDInstall.ntx86.Wdf]\r
-KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
-\r
-[MLX4BUS.DDInstall.ntamd64.Wdf]\r
-KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
-\r
-[MLX4BUS.DDInstall.ntia64.Wdf]\r
-KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
-\r
-[mlx4_bus_wdfsect]\r
-KmdfLibraryVersion = 1.7\r
-\r
-\r
-;*****************************************\r
-; Strings\r
-;*****************************************\r
-\r
-[Strings]\r
-MTL="Mellanox Technologies Ltd."\r
-MLX4BUS.ServiceDesc = "Mellanox ConnectX Bus Enumerator"\r
-MT25408.DeviceDesc="ConnectX (MT25408) - Mellanox ConnectX SDR Channel Adapter"\r
-MT25418.DeviceDesc="ConnectX (MT25418) - Mellanox ConnectX DDR Channel Adapter"\r
-MT25428.DeviceDesc="ConnectX (MT25428) - Mellanox ConnectX QDR Channel Adapter"\r
-MT25448.DeviceDesc="ConnectX (MT25448) - Mellanox ConnectX Condor Ethernet Adapter"\r
-MT26418.DeviceDesc="ConnectX (MT26418) - Mellanox ConnectX DDR_G2 Channel Adapter"\r
-MT26428.DeviceDesc="ConnectX (MT26428) - Mellanox ConnectX QDR_G2 Channel Adapter"\r
-MT00401.DeviceDesc="ConnectX (MT00401) - Mellanox ConnectX Channel Adapter in Burning Mode"\r
-DiskId = "Mellanox Mlx4 Bus installation disk"\r
-SPSVCINST_NULL = 0x0\r
-SPSVCINST_ASSOCSERVICE = 0x00000002\r
-SERVICE_KERNEL_DRIVER  = 1\r
-SERVICE_DEMAND_START   = 3\r
-SERVICE_ERROR_NORMAL   = 1\r
-REG_DWORD              = 0x00010001\r
-REG_MULTI_SZ_APPEND    = 0x00010008\r
-REG_SZ                 = 0x00000000\r
diff --git a/branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inx b/branches/winverbs/hw/mlx4/kernel/bus/drv/mlx4_bus.inx
new file mode 100644 (file)
index 0000000..fa4fbca
--- /dev/null
@@ -0,0 +1,256 @@
+; Mellanox Technologies InfiniBand HCAs.\r
+; Copyright 2008 Mellanox Technologies all Rights Reserved.\r
+\r
+[Version]\r
+Signature="$WINDOWS NT$"\r
+Class=Mlx4Bus\r
+ClassGUID={714995B2-CD65-4a47-BCFE-95AC73A0D780}\r
+Provider=%MTL%\r
+; must be synchronized with bus\drv.c\r
+DriverVer=02/01/2008,1.0.0.0\r
+CatalogFile=mlx4_bus.cat\r
+\r
+\r
+;*****************************************\r
+; Destination directory section\r
+;*****************************************\r
+\r
+[DestinationDirs]\r
+DefaultDestDir = 12\r
+Wdf_CoInstaller_CopyFiles = 11\r
+\r
+\r
+;*****************************************\r
+; Class Install section\r
+;*****************************************\r
+\r
+[ClassInstall32]\r
+AddReg=ClassAddReg\r
+\r
+[ClassAddReg]\r
+HKR,,,,"Mellanox ConnectX Adapters"\r
+HKR,,Icon,,-5\r
+HKR,,SilentInstall,,1\r
+\r
+\r
+;*****************************************\r
+; Device Install section\r
+;*****************************************\r
+\r
+[SourceDisksNames.x86]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksNames.amd64]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksNames.ia64]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksFiles.x86]\r
+mlx4_bus.sys = 1,,\r
+wdfcoinstaller01007.dll = 1,,\r
+\r
+[SourceDisksFiles.amd64]\r
+mlx4_bus.sys = 1,,\r
+wdfcoinstaller01007.dll = 1,,\r
+\r
+[SourceDisksFiles.ia64]\r
+mlx4_bus.sys = 1,,\r
+wdfcoinstaller01007.dll = 1,,\r
+\r
+;*****************************************\r
+; Mlx4Bus  Install Section\r
+;*****************************************\r
+\r
+[Manufacturer]\r
+%MTL% = MLX4BUS.DeviceSection,ntx86,ntamd64,ntia64\r
+\r
+[MLX4BUS.DeviceSection]\r
+; empty since we don't support W9x/Me\r
+\r
+[MLX4BUS.DeviceSection.ntx86]\r
+%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
+%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
+%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
+%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
+%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
+%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
+%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
+\r
+[MLX4BUS.DeviceSection.ntamd64]\r
+%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
+%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
+%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
+%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
+%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
+%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
+%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
+\r
+[MLX4BUS.DeviceSection.ntia64]\r
+%MT25408.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6340\r
+%MT25418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_634A\r
+%MT25428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6354\r
+%MT25448.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6368\r
+%MT26418.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_6732\r
+%MT26428.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_673c\r
+%MT00401.DeviceDesc%=MLX4BUS.DDInstall, PCI\VEN_15B3&DEV_0191\r
+\r
+[MLX4BUS.DDInstall.ntx86.hw]\r
+AddReg = MLX4BUS.HwReg\r
+\r
+[MLX4BUS.DDInstall.ntamd64.hw]\r
+AddReg = MLX4BUS.HwReg\r
+\r
+[MLX4BUS.DDInstall.ntia64.hw]\r
+AddReg = MLX4BUS.HwReg\r
+\r
+[MLX4BUS.HwReg]\r
+HKR,"Interrupt Management", 0x00000010\r
+HKR,"Interrupt Management\MessageSignaledInterruptProperties",0x00000010\r
+\r
+; MSI/MSI-X support\r
+HKR,"Interrupt Management\MessageSignaledInterruptProperties",MSISupported,0x00010001,0\r
+HKR,"Interrupt Management\MessageSignaledInterruptProperties",MessageNumberLimit,0x00010001,8\r
+HKR,"Interrupt Management\Affinity Policy",0x00000010\r
+\r
+; AssignmentSetOverride - processors KAFFINITY mask  \r
+HKR,"Interrupt Management\Affinity Policy",AssignmentSetOverride,0x00000001,0x0\r
+\r
+; IrqPolicyMachineDefault (0) - use default policy for the computer\r
+; IrqPolicyAllCloseProcessors (1) - connect interrupts to all processors of the near NUMA node\r
+; IrqPolicyOneCloseProcessor (2) - connect interrupts to one processor\r
+; IrqPolicyAllProcessorsInMachine (3) - connect interrupts to all processors in the machine\r
+; IrqPolicySpecifiedProcessors (4) - connects interrupts according to AssignmentSetOverride\r
+HKR,"Interrupt Management\Affinity Policy",DevicePolicy,0x00010001,0x0\r
+\r
+; IrqArbPriorityUndefined (0) - no interrupt priority policy. \r
+; IrqArbPriorityLow (1) - device can tolerate low IRQL\r
+; IrqArbPriorityNormal (2) - device expects normal interrupt latencies\r
+; IrqArbPriorityHigh (3) - device requires the lowest possible interrupt latency\r
+HKR,"Interrupt Management\Affinity Policy",DevicePriority,0x00010001,0x0\r
+\r
+[MLX4BUS.DDInstall.ntx86]\r
+CopyFiles = MLX4BUS.CopyFiles\r
+\r
+[MLX4BUS.DDInstall.ntamd64]\r
+CopyFiles = MLX4BUS.CopyFiles\r
+\r
+[MLX4BUS.DDInstall.ntia64]\r
+CopyFiles = MLX4BUS.CopyFiles\r
+\r
+[MLX4BUS.DDInstall.ntx86.Services]\r
+AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
+\r
+[MLX4BUS.DDInstall.ntamd64.Services]\r
+AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
+\r
+[MLX4BUS.DDInstall.ntia64.Services]\r
+AddService = mlx4_bus,%SPSVCINST_ASSOCSERVICE%,MLX4BUS.ServiceInstall,MLX4BUS.EventLog\r
+\r
+[MLX4BUS.CopyFiles]\r
+mlx4_bus.sys\r
+\r
+\r
+;*****************************************\r
+; Service Install section\r
+;*****************************************\r
+\r
+[MLX4BUS.ServiceInstall]\r
+DisplayName     = %MLX4BUS.ServiceDesc%\r
+ServiceType     = %SERVICE_KERNEL_DRIVER%\r
+StartType       = %SERVICE_DEMAND_START%\r
+ErrorControl    = %SERVICE_ERROR_NORMAL%\r
+ServiceBinary   = %12%\mlx4_bus.sys\r
+LoadOrderGroup  = extended base\r
+AddReg          = MLX4BUS.ParamsReg\r
+\r
+[MLX4BUS.EventLog]\r
+AddReg = MLX4BUS.AddEventLogReg\r
+\r
+[MLX4BUS.AddEventLogReg]\r
+HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mlx4_bus.sys"\r
+HKR, , TypesSupported,   0x00010001, 7\r
+\r
+[MLX4BUS.ParamsReg]\r
+HKR,,DeviceCharacteristics,0x10001,0x0100         ; Use same security checks on relative opens\r
+HKR,,Security,,"D:P(A;;GA;;;BA)(A;;GA;;;SY)"      ; Allow generic-all access to Built-in administrators and Local system \r
+HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003\r
+HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff\r
+HKR,"Parameters","LogNumQp",%REG_DWORD%,0x00000011\r
+HKR,"Parameters","LogNumRdmaRc",%REG_DWORD%,0x00000004\r
+HKR,"Parameters","LogNumSrq",%REG_DWORD%,0x00000010\r
+HKR,"Parameters","LogNumCq",%REG_DWORD%,0x00000010\r
+HKR,"Parameters","LogNumMcg",%REG_DWORD%,0x0000000D\r
+HKR,"Parameters","LogNumMpt",%REG_DWORD%,0x00000012\r
+HKR,"Parameters","LogNumMtt",%REG_DWORD%,0x00000014\r
+HKR,"Parameters","EnableQoS",%REG_DWORD%,0x00000001\r
+HKR,"Parameters","BlockMcastLoopBack",%REG_DWORD%,0x00000000\r
+HKR,"Parameters","InterruptFromFirstPacket",%REG_DWORD%,0x00000001\r
+\r
+HKR,"Parameters","NumMac",%REG_DWORD%,0x00000001\r
+HKR,"Parameters","NumVlan",%REG_DWORD%,0x00000000\r
+HKR,"Parameters","UsePrio",%REG_DWORD%,0x00000000\r
+HKR,"Parameters","PortType",%REG_SZ%,"ib,ib"\r
+\r
+\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\E51BB6E2-914A-4e21-93C0-192F4801BBFF","Flags",%REG_DWORD%,0xffff\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\E51BB6E2-914A-4e21-93C0-192F4801BBFF","Level",%REG_DWORD%,0x3\r
+\r
+;*****************************************\r
+; WDF Coinstaller installation section\r
+;*****************************************\r
+\r
+[MLX4BUS.DDInstall.ntx86.CoInstallers]\r
+AddReg=Wdf_CoInstaller_AddReg\r
+CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+[MLX4BUS.DDInstall.ntamd64.CoInstallers]\r
+AddReg=Wdf_CoInstaller_AddReg\r
+CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+[MLX4BUS.DDInstall.ntia64.CoInstallers]\r
+AddReg=Wdf_CoInstaller_AddReg\r
+CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+[Wdf_CoInstaller_AddReg]\r
+HKR,,CoInstallers32,0x00010000, "wdfcoinstaller01007.dll,WdfCoInstaller"\r
+\r
+[Wdf_CoInstaller_CopyFiles]\r
+wdfcoinstaller01007.dll\r
+\r
+[MLX4BUS.DDInstall.ntx86.Wdf]\r
+KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
+\r
+[MLX4BUS.DDInstall.ntamd64.Wdf]\r
+KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
+\r
+[MLX4BUS.DDInstall.ntia64.Wdf]\r
+KmdfService = mlx4_bus, mlx4_bus_wdfsect\r
+\r
+[mlx4_bus_wdfsect]\r
+KmdfLibraryVersion = 1.7\r
+\r
+\r
+;*****************************************\r
+; Strings\r
+;*****************************************\r
+\r
+[Strings]\r
+MTL="Mellanox Technologies Ltd."\r
+MLX4BUS.ServiceDesc = "Mellanox ConnectX Bus Enumerator"\r
+MT25408.DeviceDesc="ConnectX (MT25408) - Mellanox ConnectX SDR Channel Adapter"\r
+MT25418.DeviceDesc="ConnectX (MT25418) - Mellanox ConnectX DDR Channel Adapter"\r
+MT25428.DeviceDesc="ConnectX (MT25428) - Mellanox ConnectX QDR Channel Adapter"\r
+MT25448.DeviceDesc="ConnectX (MT25448) - Mellanox ConnectX Condor Ethernet Adapter"\r
+MT26418.DeviceDesc="ConnectX (MT26418) - Mellanox ConnectX DDR_G2 Channel Adapter"\r
+MT26428.DeviceDesc="ConnectX (MT26428) - Mellanox ConnectX QDR_G2 Channel Adapter"\r
+MT00401.DeviceDesc="ConnectX (MT00401) - Mellanox ConnectX Channel Adapter in Burning Mode"\r
+DiskId = "Mellanox Mlx4 Bus installation disk"\r
+SPSVCINST_NULL = 0x0\r
+SPSVCINST_ASSOCSERVICE = 0x00000002\r
+SERVICE_KERNEL_DRIVER  = 1\r
+SERVICE_DEMAND_START   = 3\r
+SERVICE_ERROR_NORMAL   = 1\r
+REG_DWORD              = 0x00010001\r
+REG_MULTI_SZ_APPEND    = 0x00010008\r
+REG_SZ                 = 0x00000000\r
index 16cfa29978f32a4768816c3bcbe9b0c5b49a7b93..54d498205e957cd05fb59f0cce6f5edece398ba1 100644 (file)
@@ -2,6 +2,14 @@ TARGETNAME=mlx4_bus
 TARGETPATH=..\..\..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)\r
 TARGETTYPE=DRIVER\r
 \r
+!if $(_NT_TOOLS_VERSION) != 0x700\r
+# WDK build only - transform .inx --> .inf adding date & version stamp.\r
+# see .\makefile.inc\r
+INF_NAME=$(TARGETNAME)\r
+INF_TARGET=..\..\..\..\..\bin\kernel\$(O)\$(INF_NAME).inf\r
+NTTARGETFILES=$(INF_TARGET)\r
+!endif\r
+\r
 !if $(FREEBUILD)\r
 #ENABLE_EVENT_TRACING=1\r
 !else\r
index e7eb8455c61609e8faec2143f6677045f1277fc0..7e40518fee0193d6ac978c93750b33aa5c6944d2 100644 (file)
-/*
- * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
- * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
- * Copyright (c) 2004 Intel Corporation.  All rights reserved.
- * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
- * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
- */
-
-#if !defined(IB_VERBS_H)
-#define IB_VERBS_H
-
-#include <iba\ib_ci.h>
-
-union ib_gid {
-       u8      raw[16];
-       struct {
-               __be64  subnet_prefix;
-               __be64  interface_id;
-       } global;
-};
-
-#include "ib_verbs_ex.h"
-
-enum rdma_node_type {
-       /* IB values map to NodeInfo:NodeType. */
-       RDMA_NODE_IB_CA         = 1,
-       RDMA_NODE_IB_SWITCH,
-       RDMA_NODE_IB_ROUTER,
-       RDMA_NODE_RNIC
-};
-
-enum rdma_transport_type {
-       RDMA_TRANSPORT_IB,
-       RDMA_TRANSPORT_IWARP
-};
-
-enum rdma_transport_type
-rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
-
-enum ib_device_cap_flags {
-       IB_DEVICE_RESIZE_MAX_WR         = 1,
-       IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
-       IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
-       IB_DEVICE_RAW_MULTI             = (1<<3),
-       IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
-       IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
-       IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
-       IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
-       IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
-       IB_DEVICE_INIT_TYPE             = (1<<9),
-       IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
-       IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
-       IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
-       IB_DEVICE_SRQ_RESIZE            = (1<<13),
-       IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
-       IB_DEVICE_ZERO_STAG                     = (1<<15),
-       IB_DEVICE_SEND_W_INV            = (1<<16),
-       IB_DEVICE_MEM_WINDOW            = (1<<17),
-       IB_DEVICE_IPOIB_CSUM            = (1<<18)
-};
-
-enum ib_atomic_cap {
-       IB_ATOMIC_NON,
-       IB_ATOMIC_HCA,
-       IB_ATOMIC_GLOB
-};
-
-struct ib_device_attr {
-       u64                     fw_ver;
-       __be64                  sys_image_guid;
-       u64                     max_mr_size;
-       u64                     page_size_cap;
-       u32                     vendor_id;
-       u32                     vendor_part_id;
-       u32                     hw_ver;
-       int                     max_qp;
-       int                     max_qp_wr;
-       int                     device_cap_flags;
-       int                     max_sge;
-       int                     max_sge_rd;
-       int                     max_cq;
-       int                     max_cqe;
-       int                     max_mr;
-       int                     max_pd;
-       int                     max_qp_rd_atom;
-       int                     max_ee_rd_atom;
-       int                     max_res_rd_atom;
-       int                     max_qp_init_rd_atom;
-       int                     max_ee_init_rd_atom;
-       enum ib_atomic_cap      atomic_cap;
-       int                     max_ee;
-       int                     max_rdd;
-       int                     max_mw;
-       int                     max_raw_ipv6_qp;
-       int                     max_raw_ethy_qp;
-       int                     max_mcast_grp;
-       int                     max_mcast_qp_attach;
-       int                     max_total_mcast_qp_attach;
-       u64                     max_ah;
-       int                     max_fmr;
-       int                     max_map_per_fmr;
-       int                     max_srq;
-       int                     max_srq_wr;
-       int                     max_srq_sge;
-       u16                     max_pkeys;
-       u8                      local_ca_ack_delay;
-};
-
-enum ib_mtu {
-       IB_MTU_256  = 1,
-       IB_MTU_512  = 2,
-       IB_MTU_1024 = 3,
-       IB_MTU_2048 = 4,
-       IB_MTU_4096 = 5
-};
-
-static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
-{
-       switch (mtu) {
-       case IB_MTU_256:  return  256;
-       case IB_MTU_512:  return  512;
-       case IB_MTU_1024: return 1024;
-       case IB_MTU_2048: return 2048;
-       case IB_MTU_4096: return 4096;
-       default:          return -1;
-       }
-}
-
-enum ib_port_state {
-       IB_PORT_NOP             = 0,
-       IB_PORT_DOWN            = 1,
-       IB_PORT_INIT            = 2,
-       IB_PORT_ARMED           = 3,
-       IB_PORT_ACTIVE          = 4,
-       IB_PORT_ACTIVE_DEFER    = 5
-};
-
-enum ib_port_cap_flags {
-       IB_PORT_SM                              = 1 <<  1,
-       IB_PORT_NOTICE_SUP                      = 1 <<  2,
-       IB_PORT_TRAP_SUP                        = 1 <<  3,
-       IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
-       IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
-       IB_PORT_SL_MAP_SUP                      = 1 <<  6,
-       IB_PORT_MKEY_NVRAM                      = 1 <<  7,
-       IB_PORT_PKEY_NVRAM                      = 1 <<  8,
-       IB_PORT_LED_INFO_SUP                    = 1 <<  9,
-       IB_PORT_SM_DISABLED                     = 1 << 10,
-       IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
-       IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
-       IB_PORT_CM_SUP                          = 1 << 16,
-       IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
-       IB_PORT_REINIT_SUP                      = 1 << 18,
-       IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
-       IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
-       IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
-       IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
-       IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
-       IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
-       IB_PORT_CLIENT_REG_SUP                  = 1 << 25
-};
-
-enum ib_port_width {
-       IB_WIDTH_1X     = 1,
-       IB_WIDTH_4X     = 2,
-       IB_WIDTH_8X     = 4,
-       IB_WIDTH_12X    = 8
-};
-
-static inline int ib_width_enum_to_int(enum ib_port_width width)
-{
-       switch (width) {
-       case IB_WIDTH_1X:  return  1;
-       case IB_WIDTH_4X:  return  4;
-       case IB_WIDTH_8X:  return  8;
-       case IB_WIDTH_12X: return 12;
-       default:          return -1;
-       }
-}
-
-struct ib_port_attr {
-       enum ib_port_state      state;
-       enum ib_mtu             max_mtu;
-       enum ib_mtu             active_mtu;
-       int                     gid_tbl_len;
-       u32                     port_cap_flags;
-       u32                     max_msg_sz;
-       u32                     bad_pkey_cntr;
-       u32                     qkey_viol_cntr;
-       u16                     pkey_tbl_len;
-       u16                     lid;
-       u16                     sm_lid;
-       u8                      lmc;
-       u8                      max_vl_num;
-       u8                      sm_sl;
-       u8                      subnet_timeout;
-       u8                      init_type_reply;
-       u8                      active_width;
-       u8                      active_speed;
-       u8                      phys_state;
-};
-
-enum ib_device_modify_flags {
-       IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
-       IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
-};
-
-struct ib_device_modify {
-       u64     sys_image_guid;
-       char    node_desc[64];
-};
-
-enum ib_port_modify_flags {
-       IB_PORT_SHUTDOWN                = 1,
-       IB_PORT_INIT_TYPE               = (1<<2),
-       IB_PORT_RESET_QKEY_CNTR         = (1<<3)
-};
-
-struct ib_port_modify {
-       u32     set_port_cap_mask;
-       u32     clr_port_cap_mask;
-       u8      init_type;
-};
-
-enum ib_event_type {
-       IB_EVENT_CQ_ERR                                                                 = IB_AE_CQ_ERROR,
-       IB_EVENT_QP_FATAL                                                               = IB_AE_QP_FATAL,
-       IB_EVENT_QP_REQ_ERR                                                     = IB_AE_WQ_REQ_ERROR,
-       IB_EVENT_QP_ACCESS_ERR                                  = IB_AE_WQ_ACCESS_ERROR,
-       IB_EVENT_COMM_EST                                                       = IB_AE_QP_COMM,
-       IB_EVENT_SQ_DRAINED                                             = IB_AE_SQ_DRAINED,
-       IB_EVENT_PATH_MIG                                                               = IB_AE_QP_APM,
-       IB_EVENT_PATH_MIG_ERR                                   = IB_AE_QP_APM_ERROR,
-       IB_EVENT_DEVICE_FATAL                                           = IB_AE_LOCAL_FATAL,
-       IB_EVENT_PORT_ACTIVE                                            = IB_AE_PORT_ACTIVE,
-       IB_EVENT_PORT_ERR                                                               = IB_AE_PORT_DOWN,
-       IB_EVENT_SRQ_LIMIT_REACHED                              = IB_AE_SRQ_LIMIT_REACHED,
-       IB_EVENT_SRQ_ERR                                                = IB_AE_SRQ_CATAS_ERROR,
-       IB_EVENT_QP_LAST_WQE_REACHED                    = IB_AE_SRQ_QP_LAST_WQE_REACHED,
-       IB_EVENT_LID_CHANGE                                                     = IB_AE_UNKNOWN + 1,
-       IB_EVENT_PKEY_CHANGE,
-       IB_EVENT_SM_CHANGE,
-       IB_EVENT_CLIENT_REREGISTER
-};
-
-struct ib_event {
-       struct ib_device        *device;
-       union {
-               struct ib_cq    *cq;
-               struct ib_qp    *qp;
-               struct ib_srq   *srq;
-               u8              port_num;
-       } element;
-       enum ib_event_type      event;
-       struct ib_event_ex      x;
-       };
-
-struct ib_event_handler {
-       struct ib_device *device;
-       void            (*handler)(struct ib_event_handler *, struct ib_event *);
-       void *            ctx;
-       struct list_head  list;
-};
-
-#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler, _ctx)           \
-       {                                                       \
-               (_ptr)->device  = _device;                      \
-               (_ptr)->handler = _handler;             \
-               (_ptr)->ctx = _ctx;             \
-               INIT_LIST_HEAD(&(_ptr)->list);                  \
-       }
-
-struct ib_global_route {
-       union ib_gid    dgid;
-       u32             flow_label;
-       u8              sgid_index;
-       u8              hop_limit;
-       u8              traffic_class;
-};
-
-struct ib_grh {
-       __be32          version_tclass_flow;
-       __be16          paylen;
-       u8              next_hdr;
-       u8              hop_limit;
-       union ib_gid    sgid;
-       union ib_gid    dgid;
-};
-
-enum {
-       IB_MULTICAST_QPN = 0xffffff
-};
-
-#define XIB_LID_PERMISSIVE     __constant_htons(0xFFFF)
-
-enum ib_ah_flags {
-       IB_AH_GRH       = 1
-};
-
-enum ib_rate {
-       IB_RATE_PORT_CURRENT = 0,
-       IB_RATE_2_5_GBPS = 2,
-       IB_RATE_5_GBPS   = 5,
-       IB_RATE_10_GBPS  = 3,
-       IB_RATE_20_GBPS  = 6,
-       IB_RATE_30_GBPS  = 4,
-       IB_RATE_40_GBPS  = 7,
-       IB_RATE_60_GBPS  = 8,
-       IB_RATE_80_GBPS  = 9,
-       IB_RATE_120_GBPS = 10
-};
-
-/**
- * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
- * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
- * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
- * @rate: rate to convert.
- */
-int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
-
-/**
- * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
- * enum.
- * @mult: multiple to convert.
- */
-enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
-
-struct ib_ah_attr {
-       struct ib_global_route  grh;
-       u16                     dlid;
-       u8                      sl;
-       u8                      src_path_bits;
-       u8                      static_rate;
-       u8                      ah_flags;
-       u8                      port_num;
-};
-
-enum ib_wc_status {
-       IB_WC_SUCCESS,
-       IB_WC_LOC_LEN_ERR,
-       IB_WC_LOC_QP_OP_ERR,
-       IB_WC_LOC_EEC_OP_ERR,
-       IB_WC_LOC_PROT_ERR,
-       IB_WC_WR_FLUSH_ERR,
-       IB_WC_MW_BIND_ERR,
-       IB_WC_BAD_RESP_ERR,
-       IB_WC_LOC_ACCESS_ERR,
-       IB_WC_REM_INV_REQ_ERR,
-       IB_WC_REM_ACCESS_ERR,
-       IB_WC_REM_OP_ERR,
-       IB_WC_RETRY_EXC_ERR,
-       IB_WC_RNR_RETRY_EXC_ERR,
-       IB_WC_LOC_RDD_VIOL_ERR,
-       IB_WC_REM_INV_RD_REQ_ERR,
-       IB_WC_REM_ABORT_ERR,
-       IB_WC_INV_EECN_ERR,
-       IB_WC_INV_EEC_STATE_ERR,
-       IB_WC_FATAL_ERR,
-       IB_WC_RESP_TIMEOUT_ERR,
-       IB_WC_GENERAL_ERR
-};
-
-enum ib_wc_opcode {
-       XIB_WC_SEND,
-       XIB_WC_RDMA_WRITE,
-       XIB_WC_RDMA_READ,
-       XIB_WC_COMP_SWAP,
-       XIB_WC_FETCH_ADD,
-       XIB_WC_BIND_MW,
-/*
- * Set value of XIB_WC_RECV so consumers can test if a completion is a
- * receive by testing (opcode & XIB_WC_RECV).
- */
-       XIB_WC_RECV                     = 1 << 7,
-       XIB_WC_RECV_RDMA_WITH_IMM
-};
-
-enum ib_wc_flags {
-       IB_WC_GRH               = 1,
-       IB_WC_WITH_IMM          = (1<<1),
-       IB_WC_FORWARD           = (1<<2)
-};
-
-struct ib_wc {
-       u64                     wr_id;
-       enum ib_wc_status       status;
-       enum ib_wc_opcode       opcode;
-       u32                     vendor_err;
-       u32                     byte_len;
-       struct ib_qp           *qp;
-       __be32                  imm_data;
-       u32                     src_qp;
-       int                     wc_flags;
-       u16                     pkey_index;
-       u16                     slid;
-       u8                      sl;
-       u8                      dlid_path_bits;
-       u8                      port_num;       /* valid only for DR SMPs on switches */
-};
-
-enum ib_cq_notify_flags {
-       IB_CQ_SOLICITED                 = 1 << 0,
-       IB_CQ_NEXT_COMP                 = 1 << 1,
-       IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
-       IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
-};
-
-enum ib_srq_attr_mask {
-       XIB_SRQ_MAX_WR  = 1 << 0,
-       XIB_SRQ_LIMIT   = 1 << 1,
-};
-
-struct ib_srq_attr {
-       u32     max_wr;
-       u32     max_sge;
-       u32     srq_limit;
-};
-
-struct ib_srq_init_attr {
-       void                  (*event_handler)(ib_event_rec_t *);
-       void                   *srq_context;
-       struct ib_srq_attr      attr;
-};
-
-struct ib_qp_cap {
-       u32     max_send_wr;
-       u32     max_recv_wr;
-       u32     max_send_sge;
-       u32     max_recv_sge;
-       u32     max_inline_data;
-};
-
-enum ib_sig_type {
-       IB_SIGNAL_ALL_WR,
-       IB_SIGNAL_REQ_WR
-};
-
-enum ib_qp_type {
-       /*
-        * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
-        * here (and in that order) since the MAD layer uses them as
-        * indices into a 2-entry table.
-        */
-       IB_QPT_SMI,
-       IB_QPT_GSI,
-
-       IB_QPT_RC,
-       IB_QPT_UC,
-       IB_QPT_UD,
-       IB_QPT_RAW_IP_V6,
-       IB_QPT_RAW_ETY
-};
-
-struct ib_qp_init_attr {
-       void                  (*event_handler)(ib_event_rec_t *);
-       void                   *qp_context;
-       struct ib_cq           *send_cq;
-       struct ib_cq           *recv_cq;
-       struct ib_srq          *srq;
-       struct ib_qp_cap        cap;
-       enum ib_sig_type        sq_sig_type;
-       enum ib_qp_type         qp_type;
-       u8                      port_num; /* special QP types only */
-};
-
-enum ib_rnr_timeout {
-       IB_RNR_TIMER_655_36 =  0,
-       IB_RNR_TIMER_000_01 =  1,
-       IB_RNR_TIMER_000_02 =  2,
-       IB_RNR_TIMER_000_03 =  3,
-       IB_RNR_TIMER_000_04 =  4,
-       IB_RNR_TIMER_000_06 =  5,
-       IB_RNR_TIMER_000_08 =  6,
-       IB_RNR_TIMER_000_12 =  7,
-       IB_RNR_TIMER_000_16 =  8,
-       IB_RNR_TIMER_000_24 =  9,
-       IB_RNR_TIMER_000_32 = 10,
-       IB_RNR_TIMER_000_48 = 11,
-       IB_RNR_TIMER_000_64 = 12,
-       IB_RNR_TIMER_000_96 = 13,
-       IB_RNR_TIMER_001_28 = 14,
-       IB_RNR_TIMER_001_92 = 15,
-       IB_RNR_TIMER_002_56 = 16,
-       IB_RNR_TIMER_003_84 = 17,
-       IB_RNR_TIMER_005_12 = 18,
-       IB_RNR_TIMER_007_68 = 19,
-       IB_RNR_TIMER_010_24 = 20,
-       IB_RNR_TIMER_015_36 = 21,
-       IB_RNR_TIMER_020_48 = 22,
-       IB_RNR_TIMER_030_72 = 23,
-       IB_RNR_TIMER_040_96 = 24,
-       IB_RNR_TIMER_061_44 = 25,
-       IB_RNR_TIMER_081_92 = 26,
-       IB_RNR_TIMER_122_88 = 27,
-       IB_RNR_TIMER_163_84 = 28,
-       IB_RNR_TIMER_245_76 = 29,
-       IB_RNR_TIMER_327_68 = 30,
-       IB_RNR_TIMER_491_52 = 31
-};
-       
-enum ib_qp_attr_mask {
-       IB_QP_STATE                     = 1,
-       IB_QP_CUR_STATE                 = (1<<1),
-       IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
-       IB_QP_ACCESS_FLAGS              = (1<<3),
-       IB_QP_PKEY_INDEX                = (1<<4),
-       IB_QP_PORT                      = (1<<5),
-       IB_QP_QKEY                      = (1<<6),
-       IB_QP_AV                        = (1<<7),
-       IB_QP_PATH_MTU                  = (1<<8),
-       IB_QP_TIMEOUT                   = (1<<9),
-       IB_QP_RETRY_CNT                 = (1<<10),
-       IB_QP_RNR_RETRY                 = (1<<11),
-       IB_QP_RQ_PSN                    = (1<<12),
-       IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
-       IB_QP_ALT_PATH                  = (1<<14),
-       IB_QP_MIN_RNR_TIMER             = (1<<15),
-       IB_QP_SQ_PSN                    = (1<<16),
-       IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
-       IB_QP_PATH_MIG_STATE            = (1<<18),
-       IB_QP_CAP                       = (1<<19),
-       IB_QP_DEST_QPN                  = (1<<20)
-};
-
-enum ib_qp_state {
-       XIB_QPS_RESET,
-       XIB_QPS_INIT,
-       XIB_QPS_RTR,
-       XIB_QPS_RTS,
-       XIB_QPS_SQD,
-       XIB_QPS_SQE,
-       XIB_QPS_ERR
-};
-
-enum ib_mig_state {
-       IB_MIG_MIGRATED,
-       IB_MIG_REARM,
-       IB_MIG_ARMED
-};
-
-struct ib_qp_attr {
-       enum ib_qp_state        qp_state;
-       enum ib_qp_state        cur_qp_state;
-       enum ib_mtu             path_mtu;
-       enum ib_mig_state       path_mig_state;
-       u32                     qkey;
-       u32                     rq_psn;
-       u32                     sq_psn;
-       u32                     dest_qp_num;
-       int                     qp_access_flags;
-       struct ib_qp_cap        cap;
-       struct ib_ah_attr       ah_attr;
-       struct ib_ah_attr       alt_ah_attr;
-       u16                     pkey_index;
-       u16                     alt_pkey_index;
-       u8                      en_sqd_async_notify;
-       u8                      sq_draining;
-       u8                      max_rd_atomic;
-       u8                      max_dest_rd_atomic;
-       u8                      min_rnr_timer;
-       u8                      port_num;
-       u8                      timeout;
-       u8                      retry_cnt;
-       u8                      rnr_retry;
-       u8                      alt_port_num;
-       u8                      alt_timeout;
-};
-
-enum ib_wr_opcode {
-       IB_WR_RDMA_WRITE,
-       IB_WR_RDMA_WRITE_WITH_IMM,
-       IB_WR_SEND,
-       IB_WR_SEND_WITH_IMM,
-       IB_WR_RDMA_READ,
-       IB_WR_ATOMIC_CMP_AND_SWP,
-       IB_WR_ATOMIC_FETCH_AND_ADD
-};
-
-enum ib_send_flags {
-       IB_SEND_FENCE           = 1,
-       IB_SEND_SIGNALED        = (1<<1),
-       IB_SEND_SOLICITED       = (1<<2),
-       IB_SEND_INLINE          = (1<<3)
-};
-
-struct ib_sge {
-       u64     addr;
-       u32     length;
-       u32     lkey;
-};
-
-struct ib_send_wr {
-       struct ib_send_wr      *next;
-       u64                     wr_id;
-       struct ib_sge          *sg_list;
-       int                     num_sge;
-       enum ib_wr_opcode       opcode;
-       int                     send_flags;
-       __be32                  imm_data;
-       union {
-               struct {
-                       u64     remote_addr;
-                       u32     rkey;
-               } rdma;
-               struct {
-                       u64     remote_addr;
-                       u64     compare_add;
-                       u64     swap;
-                       u32     rkey;
-               } atomic;
-               struct {
-                       struct ib_ah *ah;
-                       u32     remote_qpn;
-                       u32     remote_qkey;
-                       u16     pkey_index; /* valid for GSI only */
-                       u8      port_num;   /* valid for DR SMPs on switch only */
-               } ud;
-       } wr;
-};
-
-struct ib_recv_wr {
-       struct ib_recv_wr      *next;
-       u64                     wr_id;
-       struct ib_sge          *sg_list;
-       int                     num_sge;
-};
-
-enum ib_access_flags {
-       IB_ACCESS_LOCAL_WRITE   = 1,
-       IB_ACCESS_REMOTE_WRITE  = (1<<1),
-       IB_ACCESS_REMOTE_READ   = (1<<2),
-       IB_ACCESS_REMOTE_ATOMIC = (1<<3),
-       IB_ACCESS_MW_BIND       = (1<<4)
-};
-
-struct ib_phys_buf {
-       u64      addr;
-       u64      size;
-};
-
-struct ib_mr_attr {
-       struct ib_pd    *pd;
-       u64             device_virt_addr;
-       u64             size;
-       int             mr_access_flags;
-       u32             lkey;
-       u32             rkey;
-};
-
-enum ib_mr_rereg_flags {
-       IB_MR_REREG_TRANS       = 1,
-       IB_MR_REREG_PD          = (1<<1),
-       IB_MR_REREG_ACCESS      = (1<<2)
-};
-
-struct ib_mw_bind {
-       struct ib_mr   *mr;
-       u64             wr_id;
-       u64             addr;
-       u32             length;
-       int             send_flags;
-       int             mw_access_flags;
-};
-
-struct ib_fmr_attr {
-       int     max_pages;
-       int     max_maps;
-       u8      page_shift;
-};
-struct ib_ucontext {
-       struct ib_device       *device;
-       int                     closing;
-       struct ib_ucontext_ex   x;
-};
-
-struct ib_udata {
-       void        *inbuf;
-       void        *outbuf;
-       size_t       inlen;
-       size_t       outlen;
-};
-
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \
-       {                                                               \
-               (udata)->inbuf  = (void *) (ibuf);              \
-               (udata)->outbuf = (void *) (obuf);              \
-               (udata)->inlen  = (ilen);                               \
-               (udata)->outlen = (olen);                               \
-       }
-
-struct ib_pd {
-       struct ib_device       *device;
-       struct ib_ucontext     *p_uctx;
-       atomic_t                usecnt; /* count all resources */
-};
-
-struct ib_ah {
-       struct ib_device        *device;
-       struct ib_pd            *pd;
-       struct ib_ucontext      *p_uctx;
-};
-
-typedef void (*ib_comp_handler)(void *cq_context);
-
-struct ib_cq {
-       struct ib_device       *device;
-       struct ib_ucontext     *p_uctx;
-       ib_comp_handler         comp_handler;
-       void                  (*event_handler)(ib_event_rec_t *);
-       void *                  cq_context;
-       int                     cqe;
-       atomic_t                usecnt; /* count number of work queues */
-       struct ib_cq_ex         x;
-};
-
-struct ib_srq {
-       struct ib_device       *device;
-       struct ib_pd           *pd;
-       struct ib_ucontext     *p_uctx;
-       void                  (*event_handler)(ib_event_rec_t *);
-       void                   *srq_context;
-       atomic_t                usecnt;
-       struct ib_srq_ex        x;
-};
-
-struct ib_qp {
-       struct ib_device       *device;
-       struct ib_pd           *pd;
-       struct ib_cq           *send_cq;
-       struct ib_cq           *recv_cq;
-       struct ib_srq          *srq;
-       struct ib_ucontext     *p_uctx;
-       void                  (*event_handler)(ib_event_rec_t *);
-       void                   *qp_context;
-       u32                     qp_num;
-       enum ib_qp_type         qp_type;
-       struct ib_qp_ex         x;
-};
-
-struct ib_mr {
-       struct ib_device  *device;
-       struct ib_pd      *pd;
-       struct ib_ucontext *p_uctx;
-       u32                lkey;
-       u32                rkey;
-       atomic_t           usecnt; /* count number of MWs */
-};
-
-struct ib_mw {
-       struct ib_device        *device;
-       struct ib_pd            *pd;
-       struct ib_ucontext  *p_uctx;
-       u32                     rkey;
-};
-
-struct ib_fmr {
-       struct ib_device        *device;
-       struct ib_pd            *pd;
-       struct list_head        list;
-       u32                     lkey;
-       u32                     rkey;
-};
-
-struct ib_mad;
-struct ib_grh;
-
-enum ib_process_mad_flags {
-       IB_MAD_IGNORE_MKEY      = 1,
-       IB_MAD_IGNORE_BKEY      = 2,
-       IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
-};
-
-enum ib_mad_result {
-       IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
-       IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
-       IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
-       IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
-};
-
-#define IB_DEVICE_NAME_MAX 64
-
-struct ib_cache {
-       rwlock_t                lock;
-       struct ib_event_handler event_handler;
-       struct ib_pkey_cache  **pkey_cache;
-       struct ib_gid_cache   **gid_cache;
-       u8                     *lmc_cache;
-       struct ib_cache_ex      x;
-};
-
-struct ib_dma_mapping_ops {
-       int             (*mapping_error)(struct ib_device *dev,
-                                        u64 dma_addr);
-       u64             (*map_single)(struct ib_device *dev,
-                                     void *ptr, size_t size,
-                                     enum dma_data_direction direction);
-       void            (*unmap_single)(struct ib_device *dev,
-                                       u64 addr, size_t size,
-                                       enum dma_data_direction direction);
-       u64             (*map_page)(struct ib_device *dev,
-                                   dma_addr_t page, unsigned long offset,
-                                   size_t size,
-                                   enum dma_data_direction direction);
-       void            (*unmap_page)(struct ib_device *dev,
-                                     u64 addr, size_t size,
-                                     enum dma_data_direction direction);
-       int             (*map_sg)(struct ib_device *dev,
-                                 struct scatterlist *sg, int nents,
-                                 enum dma_data_direction direction);
-       void            (*unmap_sg)(struct ib_device *dev,
-                                   struct scatterlist *sg, int nents,
-                                   enum dma_data_direction direction);
-       u64             (*dma_address)(struct ib_device *dev,
-                                      struct scatterlist *sg);
-       unsigned int    (*dma_len)(struct ib_device *dev,
-                                  struct scatterlist *sg);
-       void            (*sync_single_for_cpu)(struct ib_device *dev,
-                                              u64 dma_handle,
-                                              size_t size,
-                                              enum dma_data_direction dir);
-       void            (*sync_single_for_device)(struct ib_device *dev,
-                                                 u64 dma_handle,
-                                                 size_t size,
-                                                 enum dma_data_direction dir);
-       void            *(*alloc_coherent)(struct ib_device *dev,
-                                          size_t size,
-                                          u64 *dma_handle,
-                                          gfp_t flag);
-       void            (*free_coherent)(struct ib_device *dev,
-                                        size_t size, void *cpu_addr,
-                                        u64 dma_handle);
-};
-
-struct iw_cm_verbs;
-
-struct ib_device {
-       struct mlx4_dev                *dma_device;
-
-       char                          name[IB_DEVICE_NAME_MAX];
-
-       struct list_head              event_handler_list;
-       spinlock_t                    event_handler_lock;
-
-       struct list_head              core_list;
-       struct list_head              client_data_list;
-       spinlock_t                    client_data_lock;
-
-       struct ib_cache               cache;
-       int                          *pkey_tbl_len;
-       int                          *gid_tbl_len;
-
-       u32                           flags;
-
-       int                           num_comp_vectors;
-
-       struct iw_cm_verbs           *iwcm;
-
-       int                        (*query_device)(struct ib_device *device,
-                                                  struct ib_device_attr *device_attr);
-       int                        (*query_port)(struct ib_device *device,
-                                                u8 port_num,
-                                                struct ib_port_attr *port_attr);
-       int                        (*query_gid_chunk)(struct ib_device *device,
-                                               u8 port_num, int index,
-                                               union ib_gid gid[8], int size);
-       int                        (*query_pkey_chunk)(struct ib_device *device,
-                                                u8 port_num, u16 index, __be16 pkey[32], int size);
-       int                        (*modify_device)(struct ib_device *device,
-                                                   int device_modify_mask,
-                                                   struct ib_device_modify *device_modify);
-       int                        (*modify_port)(struct ib_device *device,
-                                                 u8 port_num, int port_modify_mask,
-                                                 struct ib_port_modify *port_modify);
-       struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
-                                                    struct ib_udata *udata);
-       int                        (*dealloc_ucontext)(struct ib_ucontext *context);
-       int                        (*mmap)(struct ib_ucontext *context,
-                                          struct vm_area_struct *vma);
-       struct ib_pd *             (*alloc_pd)(struct ib_device *device,
-                                              struct ib_ucontext *context,
-                                              struct ib_udata *udata);
-       int                        (*dealloc_pd)(struct ib_pd *pd);
-       struct ib_ah *             (*create_ah)(struct ib_pd *pd,
-                                               struct ib_ah_attr *ah_attr);
-       int                        (*modify_ah)(struct ib_ah *ah,
-                                               struct ib_ah_attr *ah_attr);
-       int                        (*query_ah)(struct ib_ah *ah,
-                                              struct ib_ah_attr *ah_attr);
-       int                        (*destroy_ah)(struct ib_ah *ah);
-       struct ib_srq *            (*create_srq)(struct ib_pd *pd,
-                                                struct ib_srq_init_attr *srq_init_attr,
-                                                struct ib_udata *udata);
-       int                        (*modify_srq)(struct ib_srq *srq,
-                                                struct ib_srq_attr *srq_attr,
-                                                enum ib_srq_attr_mask srq_attr_mask,
-                                                struct ib_udata *udata);
-       int                        (*query_srq)(struct ib_srq *srq,
-                                               struct ib_srq_attr *srq_attr);
-       int                        (*destroy_srq)(struct ib_srq *srq);
-       int                        (*post_srq_recv)(struct ib_srq *srq,
-                                                   ib_recv_wr_t *recv_wr,
-                                                   ib_recv_wr_t **bad_recv_wr);
-       struct ib_qp *             (*create_qp)(struct ib_pd *pd,
-                                               struct ib_qp_init_attr *qp_init_attr,
-                                               struct ib_udata *udata);
-       int                        (*modify_qp)(struct ib_qp *qp,
-                                               struct ib_qp_attr *qp_attr,
-                                               int qp_attr_mask,
-                                               struct ib_udata *udata);
-       int                        (*query_qp)(struct ib_qp *qp,
-                                              struct ib_qp_attr *qp_attr,
-                                              int qp_attr_mask,
-                                              struct ib_qp_init_attr *qp_init_attr);
-       int                        (*destroy_qp)(struct ib_qp *qp);
-       int                        (*post_send)(struct ib_qp *qp,
-                                               ib_send_wr_t *send_wr,
-                                               ib_send_wr_t **bad_send_wr);
-       int                        (*post_recv)(struct ib_qp *qp,
-                                               ib_recv_wr_t *recv_wr,
-                                               ib_recv_wr_t **bad_recv_wr);
-       struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
-                                               int comp_vector,
-                                               struct ib_ucontext *context,
-                                               struct ib_udata *udata);
-       int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
-                                               u16 cq_period);
-       int                        (*destroy_cq)(struct ib_cq *cq);
-       int                        (*resize_cq)(struct ib_cq *cq, int cqe,
-                                               struct ib_udata *udata);
-       int                        (*poll_cq)(struct ib_cq *ibcq,
-                       ib_wc_t** const pp_free_wclist, ib_wc_t** const pp_done_wclist);
-       int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
-       int                        (*req_notify_cq)(struct ib_cq *cq,
-                                                   enum ib_cq_notify_flags flags);
-       int                        (*req_ncomp_notif)(struct ib_cq *cq,
-                                                     int wc_cnt);
-       struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
-                                                int mr_access_flags);
-       struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
-                                                 struct ib_phys_buf *phys_buf_array,
-                                                 int num_phys_buf,
-                                                 int mr_access_flags,
-                                                 u64 *iova_start);
-       struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
-                                                 u64 start, u64 length,
-                                                 u64 virt_addr,
-                                                 int mr_access_flags,
-                                                 struct ib_udata *udata);
-       int                        (*query_mr)(struct ib_mr *mr,
-                                              struct ib_mr_attr *mr_attr);
-       int                        (*dereg_mr)(struct ib_mr *mr);
-       int                        (*rereg_phys_mr)(struct ib_mr *mr,
-                                                   int mr_rereg_mask,
-                                                   struct ib_pd *pd,
-                                                   struct ib_phys_buf *phys_buf_array,
-                                                   int num_phys_buf,
-                                                   int mr_access_flags,
-                                                   u64 *iova_start);
-       struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
-       int                        (*bind_mw)(struct ib_qp *qp,
-                                             struct ib_mw *mw,
-                                             struct ib_mw_bind *mw_bind);
-       int                        (*dealloc_mw)(struct ib_mw *mw);
-       struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
-                                               int mr_access_flags,
-                                               struct ib_fmr_attr *fmr_attr);
-       int                        (*map_phys_fmr)(struct ib_fmr *fmr,
-                                                  u64 *page_list, int list_len,
-                                                  u64 iova);
-       int                        (*unmap_fmr)(struct list_head *fmr_list);
-       int                        (*dealloc_fmr)(struct ib_fmr *fmr);
-       int                        (*attach_mcast)(struct ib_qp *qp,
-                                                  union ib_gid *gid,
-                                                  u16 lid);
-       int                        (*detach_mcast)(struct ib_qp *qp,
-                                                  union ib_gid *gid,
-                                                  u16 lid);
-       int                        (*process_mad)(struct ib_device *device,
-                                                 int process_mad_flags,
-                                                 u8 port_num,
-                                                 ib_wc_t *in_wc,
-                                                 struct ib_grh *in_grh,
-                                                 struct ib_mad *in_mad,
-                                                 struct ib_mad *out_mad);
-
-       struct ib_dma_mapping_ops   *dma_ops;
-       struct list_head             port_list;
-
-       enum {
-               IB_DEV_UNINITIALIZED,
-               IB_DEV_REGISTERED,
-               IB_DEV_UNREGISTERED
-       }                            reg_state;
-
-       u64                          uverbs_cmd_mask;
-       int                          uverbs_abi_ver;
-
-       char                         node_desc[64];
-       __be64                       node_guid;
-       u8                           node_type;
-       u8                           phys_port_cnt;
-       struct ib_device_ex          x;
-};
-
-struct ib_client {
-       char  *name;
-       void (*add)   (struct ib_device *);
-       void (*remove)(struct ib_device *);
-
-       struct list_head list;
-};
-
-struct ib_device *ib_alloc_device(size_t size);
-void ib_dealloc_device(struct ib_device *device);
-
-int ib_register_device   (struct ib_device *device);
-void ib_unregister_device(struct ib_device *device);
-
-int ib_register_client   (struct ib_client *client);
-void ib_unregister_client(struct ib_client *client);
-
-void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
-void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
-                        void *data);
-
-static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
-{
-       if (len > udata->inlen)
-               return -EFAULT;
-       memcpy(dest, udata->inbuf, len);
-       return 0;
-}
-
-static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
-{
-       if (len > udata->outlen)
-               return -EFAULT;
-       memcpy(udata->outbuf, src, len);
-       return 0;
-}
-
-/**
- * ib_modify_qp_is_ok - Check that the supplied attribute mask
- * contains all required attributes and no attributes not allowed for
- * the given QP state transition.
- * @cur_state: Current QP state
- * @next_state: Next QP state
- * @type: QP type
- * @mask: Mask of supplied QP attributes
- *
- * This function is a helper function that a low-level driver's
- * modify_qp method can use to validate the consumer's input.  It
- * checks that cur_state and next_state are valid QP states, that a
- * transition from cur_state to next_state is allowed by the IB spec,
- * and that the attribute mask supplied is allowed for the transition.
- */
-int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
-                      enum ib_qp_type type, enum ib_qp_attr_mask mask);
-
-int ib_register_event_handler  (struct ib_event_handler *event_handler);
-int ib_unregister_event_handler(struct ib_event_handler *event_handler);
-void ib_dispatch_event(struct ib_event *event);
-
-int ib_query_device(struct ib_device *device,
-                   struct ib_device_attr *device_attr);
-
-int ib_query_port(struct ib_device *device,
-                 u8 port_num, struct ib_port_attr *port_attr);
-
-int ib_query_gid_chunk(struct ib_device *device,
-                u8 port_num, int index, union ib_gid gid[8], int size);
-
-int ib_query_pkey_chunk(struct ib_device *device,
-                 u8 port_num, u16 index, __be16 pkey[32], int size);
-
-int ib_modify_device(struct ib_device *device,
-                    int device_modify_mask,
-                    struct ib_device_modify *device_modify);
-
-int ib_modify_port(struct ib_device *device,
-                  u8 port_num, int port_modify_mask,
-                  struct ib_port_modify *port_modify);
-
-int ib_find_gid(struct ib_device *device, union ib_gid *gid,
-               u8 *port_num, u16 *index);
-
-int ib_find_pkey(struct ib_device *device,
-                u8 port_num, __be16 pkey, u16 *index);
-
-/**
- * ib_alloc_pd - Allocates an unused protection domain.
- * @device: The device on which to allocate the protection domain.
- *
- * A protection domain object provides an association between QPs, shared
- * receive queues, address handles, memory regions, and memory windows.
- */
-struct ib_pd *ib_alloc_pd(struct ib_device *device);
-
-/**
- * ib_dealloc_pd - Deallocates a protection domain.
- * @pd: The protection domain to deallocate.
- */
-int ib_dealloc_pd(struct ib_pd *pd);
-
-/**
- * ib_create_ah - Creates an address handle for the given address vector.
- * @pd: The protection domain associated with the address handle.
- * @ah_attr: The attributes of the address vector.
- *
- * The address handle is used to reference a local or global destination
- * in all UD QP post sends.
- */
-struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
- *   work completion.
- * @device: Device on which the received message arrived.
- * @port_num: Port on which the received message arrived.
- * @wc: Work completion associated with the received message.
- * @grh: References the received global route header.  This parameter is
- *   ignored unless the work completion indicates that the GRH is valid.
- * @ah_attr: Returned attributes that can be used when creating an address
- *   handle for replying to the message.
- */
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, ib_wc_t *wc,
-                      struct ib_grh *grh, struct ib_ah_attr *ah_attr);
-
-/**
- * ib_create_ah_from_wc - Creates an address handle associated with the
- *   sender of the specified work completion.
- * @pd: The protection domain associated with the address handle.
- * @wc: Work completion information associated with a received message.
- * @grh: References the received global route header.  This parameter is
- *   ignored unless the work completion indicates that the GRH is valid.
- * @port_num: The outbound port number to associate with the address.
- *
- * The address handle is used to reference a local or global destination
- * in all UD QP post sends.
- */
-struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, ib_wc_t *wc,
-                                  struct ib_grh *grh, u8 port_num);
-
-/**
- * ib_modify_ah - Modifies the address vector associated with an address
- *   handle.
- * @ah: The address handle to modify.
- * @ah_attr: The new address vector attributes to associate with the
- *   address handle.
- */
-int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-/**
- * ib_query_ah - Queries the address vector associated with an address
- *   handle.
- * @ah: The address handle to query.
- * @ah_attr: The address vector attributes associated with the address
- *   handle.
- */
-int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-/**
- * ib_destroy_ah - Destroys an address handle.
- * @ah: The address handle to destroy.
- */
-int ib_destroy_ah(struct ib_ah *ah);
-
-/**
- * ib_create_srq - Creates a SRQ associated with the specified protection
- *   domain.
- * @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the
- *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
- *   the actual capabilities of the created SRQ.
- *
- * srq_attr->max_wr and srq_attr->max_sge are read the determine the
- * requested size of the SRQ, and set to the actual values allocated
- * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
- * will always be at least as large as the requested values.
- */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
-                            struct ib_srq_init_attr *srq_init_attr);
-
-/**
- * ib_modify_srq - Modifies the attributes for the specified SRQ.
- * @srq: The SRQ to modify.
- * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
- *   the current values of selected SRQ attributes are returned.
- * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
- *   are being modified.
- *
- * The mask may contain XIB_SRQ_MAX_WR to resize the SRQ and/or
- * XIB_SRQ_LIMIT to set the SRQ's limit and request notification when
- * the number of receives queued drops below the limit.
- */
-int ib_modify_srq(struct ib_srq *srq,
-                 struct ib_srq_attr *srq_attr,
-                 enum ib_srq_attr_mask srq_attr_mask);
-
-/**
- * ib_query_srq - Returns the attribute list and current values for the
- *   specified SRQ.
- * @srq: The SRQ to query.
- * @srq_attr: The attributes of the specified SRQ.
- */
-int ib_query_srq(struct ib_srq *srq,
-                struct ib_srq_attr *srq_attr);
-
-/**
- * ib_destroy_srq - Destroys the specified SRQ.
- * @srq: The SRQ to destroy.
- */
-int ib_destroy_srq(struct ib_srq *srq);
-
-/**
- * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
- * @srq: The SRQ to post the work request on.
- * @recv_wr: A list of work requests to post on the receive queue.
- * @bad_recv_wr: On an immediate failure, this parameter will reference
- *   the work request that failed to be posted on the QP.
- */
-static inline int ib_post_srq_recv(struct ib_srq *srq,
-                                  ib_recv_wr_t *recv_wr,
-                                  ib_recv_wr_t **bad_recv_wr)
-{
-       return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
-}
-
-/**
- * ib_create_qp - Creates a QP associated with the specified protection
- *   domain.
- * @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
- *   QP.  If QP creation succeeds, then the attributes are updated to
- *   the actual capabilities of the created QP.
- */
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
-                          struct ib_qp_init_attr *qp_init_attr);
-
-/**
- * ib_modify_qp - Modifies the attributes for the specified QP and then
- *   transitions the QP to the given state.
- * @qp: The QP to modify.
- * @qp_attr: On input, specifies the QP attributes to modify.  On output,
- *   the current values of selected QP attributes are returned.
- * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
- *   are being modified.
- */
-int ib_modify_qp(struct ib_qp *qp,
-                struct ib_qp_attr *qp_attr,
-                int qp_attr_mask);
-
-/**
- * ib_query_qp - Returns the attribute list and current values for the
- *   specified QP.
- * @qp: The QP to query.
- * @qp_attr: The attributes of the specified QP.
- * @qp_attr_mask: A bit-mask used to select specific attributes to query.
- * @qp_init_attr: Additional attributes of the selected QP.
- *
- * The qp_attr_mask may be used to limit the query to gathering only the
- * selected attributes.
- */
-int ib_query_qp(struct ib_qp *qp,
-               struct ib_qp_attr *qp_attr,
-               int qp_attr_mask,
-               struct ib_qp_init_attr *qp_init_attr);
-
-/**
- * ib_modify_cq - Modifies moderation params of the CQ
- * @cq: The CQ to modify.
- * @cq_count: number of CQEs that will tirgger an event
- * @cq_period: max period of time beofre triggering an event
- *
- * Users can examine the cq structure to determine the actual CQ size.
- */
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
-
-/**
- * ib_destroy_qp - Destroys the specified QP.
- * @qp: The QP to destroy.
- */
-int ib_destroy_qp(struct ib_qp *qp);
-
-/**
- * ib_post_send - Posts a list of work requests to the send queue of
- *   the specified QP.
- * @qp: The QP to post the work request on.
- * @send_wr: A list of work requests to post on the send queue.
- * @bad_send_wr: On an immediate failure, this parameter will reference
- *   the work request that failed to be posted on the QP.
- */
-static inline int ib_post_send(struct ib_qp *qp,
-                              ib_send_wr_t *send_wr,
-                              ib_send_wr_t **bad_send_wr)
-{
-       return qp->device->post_send(qp, send_wr, bad_send_wr);
-}
-
-/**
- * ib_post_recv - Posts a list of work requests to the receive queue of
- *   the specified QP.
- * @qp: The QP to post the work request on.
- * @recv_wr: A list of work requests to post on the receive queue.
- * @bad_recv_wr: On an immediate failure, this parameter will reference
- *   the work request that failed to be posted on the QP.
- */
-static inline int ib_post_recv(struct ib_qp *qp,
-                              ib_recv_wr_t *recv_wr,
-                              ib_recv_wr_t **bad_recv_wr)
-{
-       return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
-}
-
-/**
- * ib_create_cq - Creates a CQ on the specified device.
- * @device: The device on which to create the CQ.
- * @comp_handler: A user-specified callback that is invoked when a
- *   completion event occurs on the CQ.
- * @event_handler: A user-specified callback that is invoked when an
- *   asynchronous event not associated with a completion occurs on the CQ.
- * @cq_context: Context associated with the CQ returned to the user via
- *   the associated completion and event handlers.
- * @cqe: The minimum size of the CQ.
- * @comp_vector - Completion vector used to signal completion events.
- *     Must be >= 0 and < context->num_comp_vectors.
- *
- * Users can examine the cq structure to determine the actual CQ size.
- */
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(ib_event_rec_t *),
-                          void *cq_context, int cqe, int comp_vector);
-
-/**
- * ib_resize_cq - Modifies the capacity of the CQ.
- * @cq: The CQ to resize.
- * @cqe: The minimum size of the CQ.
- *
- * Users can examine the cq structure to determine the actual CQ size.
- */
-int ib_resize_cq(struct ib_cq *cq, int cqe);
-
-/**
- * ib_destroy_cq - Destroys the specified CQ.
- * @cq: The CQ to destroy.
- */
-int ib_destroy_cq(struct ib_cq *cq);
-
-/**
- * ib_poll_cq - poll a CQ for completion(s)
- * @cq:the CQ being polled
- * @pp_free_wclist:
- *             On input, a list of work completion structures provided by
- *             the client.  These are used to report completed work requests through
- *             the pp_done_wclist.
- *
- *             On output, this contains the list of work completions structures for
- *             which no work completion was found.
- * @pp_done_wclist:A list of work completions retrieved from the completion queue.
- *
- * Poll a CQ for (possibly multiple) completions.  If the return value
- * is < 0, an error occurred.  If the return value is >= 0, it is the
- * number of completions returned.  If the return value is
- * non-negative and < num_entries, then the CQ was emptied.
- */
-static inline int ib_poll_cq(struct ib_cq *cq, ib_wc_t** const pp_free_wclist,
-                            ib_wc_t** const pp_done_wclist)
-{
-       return cq->device->poll_cq(cq, pp_free_wclist, pp_done_wclist);
-}
-
-/**
- * ib_peek_cq - Returns the number of unreaped completions currently
- *   on the specified CQ.
- * @cq: The CQ to peek.
- * @wc_cnt: A minimum number of unreaped completions to check for.
- *
- * If the number of unreaped completions is greater than or equal to wc_cnt,
- * this function returns wc_cnt, otherwise, it returns the actual number of
- * unreaped completions.
- */
-int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-/**
- * ib_req_notify_cq - Request completion notification on a CQ.
- * @cq: The CQ to generate an event for.
- * @flags:
- *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
- *   to request an event on the next solicited event or next work
- *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
- *   may also be |ed in to request a hint about missed events, as
- *   described below.
- *
- * Return Value:
- *    < 0 means an error occurred while requesting notification
- *   == 0 means notification was requested successfully, and if
- *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
- *        were missed and it is safe to wait for another event.  In
- *        this case is it guaranteed that any work completions added
- *        to the CQ since the last CQ poll will trigger a completion
- *        notification event.
- *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
- *        in.  It means that the consumer must poll the CQ again to
- *        make sure it is empty to avoid missing an event because of a
- *        race between requesting notification and an entry being
- *        added to the CQ.  This return value means it is possible
- *        (but not guaranteed) that a work completion has been added
- *        to the CQ since the last poll without triggering a
- *        completion notification event.
- */
-static inline int ib_req_notify_cq(struct ib_cq *cq,
-                                  enum ib_cq_notify_flags flags)
-{
-       return cq->device->req_notify_cq(cq, flags);
-}
-
-/**
- * ib_req_ncomp_notif - Request completion notification when there are
- *   at least the specified number of unreaped completions on the CQ.
- * @cq: The CQ to generate an event for.
- * @wc_cnt: The number of unreaped completions that should be on the
- *   CQ before an event is generated.
- */
-static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
-{
-       return cq->device->req_ncomp_notif ?
-               cq->device->req_ncomp_notif(cq, wc_cnt) :
-               -ENOSYS;
-}
-
-/**
- * ib_get_dma_mr - Returns a memory region for system memory that is
- *   usable for DMA.
- * @pd: The protection domain associated with the memory region.
- * @mr_access_flags: Specifies the memory access rights.
- *
- * Note that the ib_dma_*() functions defined below must be used
- * to create/destroy addresses used with the Lkey or Rkey returned
- * by ib_get_dma_mr().
- */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-#if 0
-// TODO: do we need that
-/**
- * ib_dma_mapping_error - check a DMA addr for error
- * @dev: The device for which the dma_addr was created
- * @dma_addr: The DMA address to check
- */
-static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->mapping_error(dev, dma_addr);
-       return dma_mapping_error(dma_addr);
-}
-
-/**
- * ib_dma_map_single - Map a kernel virtual address to DMA address
- * @dev: The device for which the dma_addr is to be created
- * @cpu_addr: The kernel virtual address
- * @size: The size of the region in bytes
- * @direction: The direction of the DMA
- */
-static inline u64 ib_dma_map_single(struct ib_device *dev,
-                                   void *cpu_addr, size_t size,
-                                   enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
-       return dma_map_single(dev->dma_device, cpu_addr, size, direction);
-}
-
-/**
- * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
- * @dev: The device for which the DMA address was created
- * @addr: The DMA address
- * @size: The size of the region in bytes
- * @direction: The direction of the DMA
- */
-static inline void ib_dma_unmap_single(struct ib_device *dev,
-                                      u64 addr, size_t size,
-                                      enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_single(dev, addr, size, direction);
-       else
-               dma_unmap_single(dev->dma_device, addr, size, direction);
-}
-
-/**
- * ib_dma_map_page - Map a physical page to DMA address
- * @dev: The device for which the dma_addr is to be created
- * @page: The page to be mapped
- * @offset: The offset within the page
- * @size: The size of the region in bytes
- * @direction: The direction of the DMA
- */
-static inline u64 ib_dma_map_page(struct ib_device *dev,
-                                 struct page *page,
-                                 unsigned long offset,
-                                 size_t size,
-                                        enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->map_page(dev, page, offset, size, direction);
-       return dma_map_page(dev->dma_device, page, offset, size, direction);
-}
-
-/**
- * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
- * @dev: The device for which the DMA address was created
- * @addr: The DMA address
- * @size: The size of the region in bytes
- * @direction: The direction of the DMA
- */
-static inline void ib_dma_unmap_page(struct ib_device *dev,
-                                    u64 addr, size_t size,
-                                    enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_page(dev, addr, size, direction);
-       else
-               dma_unmap_page(dev->dma_device, addr, size, direction);
-}
-
-/**
- * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
- * @dev: The device for which the DMA addresses are to be created
- * @sg: The array of scatter/gather entries
- * @nents: The number of scatter/gather entries
- * @direction: The direction of the DMA
- */
-static inline int ib_dma_map_sg(struct ib_device *dev,
-                               struct scatterlist *sg, int nents,
-                               enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->map_sg(dev, sg, nents, direction);
-       return dma_map_sg(dev->dma_device, sg, nents, direction);
-}
-
-/**
- * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
- * @dev: The device for which the DMA addresses were created
- * @sg: The array of scatter/gather entries
- * @nents: The number of scatter/gather entries
- * @direction: The direction of the DMA
- */
-static inline void ib_dma_unmap_sg(struct ib_device *dev,
-                                  struct scatterlist *sg, int nents,
-                                  enum dma_data_direction direction)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_sg(dev, sg, nents, direction);
-       else
-               dma_unmap_sg(dev->dma_device, sg, nents, direction);
-}
-
-/**
- * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
- * @dev: The device for which the DMA addresses were created
- * @sg: The scatter/gather entry
- */
-static inline u64 ib_sg_dma_address(struct ib_device *dev,
-                                   struct scatterlist *sg)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->dma_address(dev, sg);
-       return sg_dma_address(sg);
-}
-
-/**
- * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
- * @dev: The device for which the DMA addresses were created
- * @sg: The scatter/gather entry
- */
-static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
-                                        struct scatterlist *sg)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->dma_len(dev, sg);
-       return sg_dma_len(sg);
-}
-
-/**
- * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
- * @dev: The device for which the DMA address was created
- * @addr: The DMA address
- * @size: The size of the region in bytes
- * @dir: The direction of the DMA
- */
-static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
-                                             u64 addr,
-                                             size_t size,
-                                             enum dma_data_direction dir)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
-       else
-               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
-}
-
-/**
- * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
- * @dev: The device for which the DMA address was created
- * @addr: The DMA address
- * @size: The size of the region in bytes
- * @dir: The direction of the DMA
- */
-static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
-                                                u64 addr,
-                                                size_t size,
-                                                enum dma_data_direction dir)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
-       else
-               dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-/**
- * ib_dma_alloc_coherent - Allocate memory and map it for DMA
- * @dev: The device for which the DMA address is requested
- * @size: The size of the region to allocate in bytes
- * @dma_handle: A pointer for returning the DMA address of the region
- * @flag: memory allocator flags
- */
-static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
-                                          size_t size,
-                                          u64 *dma_handle,
-                                          gfp_t flag)
-{
-       if (dev->dma_ops)
-               return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-       else {
-               dma_addr_t handle;
-               void *ret;
-
-               ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
-               *dma_handle = handle;
-               return ret;
-       }
-}
-
-/**
- * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
- * @dev: The device for which the DMA addresses were allocated
- * @size: The size of the region
- * @cpu_addr: the address returned by ib_dma_alloc_coherent()
- * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
- */
-static inline void ib_dma_free_coherent(struct ib_device *dev,
-                                       size_t size, void *cpu_addr,
-                                       u64 dma_handle)
-{
-       if (dev->dma_ops)
-               dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-       else
-               dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
-}
-
-#endif
-
-/**
- * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
- *   by an HCA.
- * @pd: The protection domain associated assigned to the registered region.
- * @phys_buf_array: Specifies a list of physical buffers to use in the
- *   memory region.
- * @num_phys_buf: Specifies the size of the phys_buf_array.
- * @mr_access_flags: Specifies the memory access rights.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
-                            struct ib_phys_buf *phys_buf_array,
-                            int num_phys_buf,
-                            int mr_access_flags,
-                            u64 *iova_start);
-
-/**
- * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
- *   Conceptually, this call performs the functions deregister memory region
- *   followed by register physical memory region.  Where possible,
- *   resources are reused instead of deallocated and reallocated.
- * @mr: The memory region to modify.
- * @mr_rereg_mask: A bit-mask used to indicate which of the following
- *   properties of the memory region are being modified.
- * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
- *   the new protection domain to associated with the memory region,
- *   otherwise, this parameter is ignored.
- * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- *   field specifies a list of physical buffers to use in the new
- *   translation, otherwise, this parameter is ignored.
- * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- *   field specifies the size of the phys_buf_array, otherwise, this
- *   parameter is ignored.
- * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
- *   field specifies the new memory access rights, otherwise, this
- *   parameter is ignored.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-int ib_rereg_phys_mr(struct ib_mr *mr,
-                    int mr_rereg_mask,
-                    struct ib_pd *pd,
-                    struct ib_phys_buf *phys_buf_array,
-                    int num_phys_buf,
-                    int mr_access_flags,
-                    u64 *iova_start);
-
-/**
- * ib_query_mr - Retrieves information about a specific memory region.
- * @mr: The memory region to retrieve information about.
- * @mr_attr: The attributes of the specified memory region.
- */
-int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
-
-/**
- * ib_dereg_mr - Deregisters a memory region and removes it from the
- *   HCA translation table.
- * @mr: The memory region to deregister.
- */
-int ib_dereg_mr(struct ib_mr *mr);
-
-/**
- * ib_alloc_mw - Allocates a memory window.
- * @pd: The protection domain associated with the memory window.
- */
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
-
-/**
- * ib_bind_mw - Posts a work request to the send queue of the specified
- *   QP, which binds the memory window to the given address range and
- *   remote access attributes.
- * @qp: QP to post the bind work request on.
- * @mw: The memory window to bind.
- * @mw_bind: Specifies information about the memory window, including
- *   its address range, remote access rights, and associated memory region.
- */
-static inline int ib_bind_mw(struct ib_qp *qp,
-                            struct ib_mw *mw,
-                            struct ib_mw_bind *mw_bind)
-{
-       /* XXX reference counting in corresponding MR? */
-       return mw->device->bind_mw ?
-               mw->device->bind_mw(qp, mw, mw_bind) :
-               -ENOSYS;
-}
-
-/**
- * ib_dealloc_mw - Deallocates a memory window.
- * @mw: The memory window to deallocate.
- */
-int ib_dealloc_mw(struct ib_mw *mw);
-
-/**
- * ib_alloc_fmr - Allocates a unmapped fast memory region.
- * @pd: The protection domain associated with the unmapped region.
- * @mr_access_flags: Specifies the memory access rights.
- * @fmr_attr: Attributes of the unmapped region.
- *
- * A fast memory region must be mapped before it can be used as part of
- * a work request.
- */
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
-                           int mr_access_flags,
-                           struct ib_fmr_attr *fmr_attr);
-
-/**
- * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
- * @fmr: The fast memory region to associate with the pages.
- * @page_list: An array of physical pages to map to the fast memory region.
- * @list_len: The number of pages in page_list.
- * @iova: The I/O virtual address to use with the mapped region.
- */
-static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
-                                 u64 *page_list, int list_len,
-                                 u64 iova)
-{
-       return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
-}
-
-/**
- * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
- * @fmr_list: A linked list of fast memory regions to unmap.
- */
-int ib_unmap_fmr(struct list_head *fmr_list);
-
-/**
- * ib_dealloc_fmr - Deallocates a fast memory region.
- * @fmr: The fast memory region to deallocate.
- */
-int ib_dealloc_fmr(struct ib_fmr *fmr);
-
-/**
- * ib_attach_mcast - Attaches the specified QP to a multicast group.
- * @qp: QP to attach to the multicast group.  The QP must be type
- *   IB_QPT_UD.
- * @gid: Multicast group GID.
- * @lid: Multicast group LID in host byte order.
- *
- * In order to send and receive multicast packets, subnet
- * administration must have created the multicast group and configured
- * the fabric appropriately.  The port associated with the specified
- * QP must also be a member of the multicast group.
- */
-int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-/**
- * ib_detach_mcast - Detaches the specified QP from a multicast group.
- * @qp: QP to detach from the multicast group.
- * @gid: Multicast group GID.
- * @lid: Multicast group LID in host byte order.
- */
-int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-#endif /* IB_VERBS_H */
+/*\r
+ * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.\r
+ * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Topspin Corporation.  All rights reserved.\r
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.\r
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.\r
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $\r
+ */\r
+\r
+#if !defined(IB_VERBS_H)\r
+#define IB_VERBS_H\r
+\r
+#include <iba\ib_ci.h>\r
+\r
+union ib_gid {\r
+       u8      raw[16];\r
+       struct {\r
+               __be64  subnet_prefix;\r
+               __be64  interface_id;\r
+       } global;\r
+};\r
+\r
+#include "ib_verbs_ex.h"\r
+\r
+enum rdma_node_type {\r
+       /* IB values map to NodeInfo:NodeType. */\r
+       RDMA_NODE_IB_CA         = 1,\r
+       RDMA_NODE_IB_SWITCH,\r
+       RDMA_NODE_IB_ROUTER,\r
+       RDMA_NODE_RNIC\r
+};\r
+\r
+enum rdma_transport_type {\r
+       RDMA_TRANSPORT_IB,\r
+       RDMA_TRANSPORT_IWARP\r
+};\r
+\r
+enum rdma_transport_type\r
+rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;\r
+\r
+enum ib_device_cap_flags {\r
+       IB_DEVICE_RESIZE_MAX_WR         = 1,\r
+       IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),\r
+       IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),\r
+       IB_DEVICE_RAW_MULTI             = (1<<3),\r
+       IB_DEVICE_AUTO_PATH_MIG         = (1<<4),\r
+       IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),\r
+       IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),\r
+       IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),\r
+       IB_DEVICE_SHUTDOWN_PORT         = (1<<8),\r
+       IB_DEVICE_INIT_TYPE             = (1<<9),\r
+       IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),\r
+       IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),\r
+       IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),\r
+       IB_DEVICE_SRQ_RESIZE            = (1<<13),\r
+       IB_DEVICE_N_NOTIFY_CQ           = (1<<14),\r
+       IB_DEVICE_ZERO_STAG                     = (1<<15),\r
+       IB_DEVICE_SEND_W_INV            = (1<<16),\r
+       IB_DEVICE_MEM_WINDOW            = (1<<17),\r
+       IB_DEVICE_IPOIB_CSUM            = (1<<18)\r
+};\r
+\r
+enum ib_atomic_cap {\r
+       IB_ATOMIC_NON,\r
+       IB_ATOMIC_HCA,\r
+       IB_ATOMIC_GLOB\r
+};\r
+\r
+struct ib_device_attr {\r
+       u64                     fw_ver;\r
+       __be64                  sys_image_guid;\r
+       u64                     max_mr_size;\r
+       u64                     page_size_cap;\r
+       u32                     vendor_id;\r
+       u32                     vendor_part_id;\r
+       u32                     hw_ver;\r
+       int                     max_qp;\r
+       int                     max_qp_wr;\r
+       int                     device_cap_flags;\r
+       int                     max_sge;\r
+       int                     max_sge_rd;\r
+       int                     max_cq;\r
+       int                     max_cqe;\r
+       int                     max_mr;\r
+       int                     max_pd;\r
+       int                     max_qp_rd_atom;\r
+       int                     max_ee_rd_atom;\r
+       int                     max_res_rd_atom;\r
+       int                     max_qp_init_rd_atom;\r
+       int                     max_ee_init_rd_atom;\r
+       enum ib_atomic_cap      atomic_cap;\r
+       int                     max_ee;\r
+       int                     max_rdd;\r
+       int                     max_mw;\r
+       int                     max_raw_ipv6_qp;\r
+       int                     max_raw_ethy_qp;\r
+       int                     max_mcast_grp;\r
+       int                     max_mcast_qp_attach;\r
+       int                     max_total_mcast_qp_attach;\r
+       u64                     max_ah;\r
+       int                     max_fmr;\r
+       int                     max_map_per_fmr;\r
+       int                     max_srq;\r
+       int                     max_srq_wr;\r
+       int                     max_srq_sge;\r
+       u16                     max_pkeys;\r
+       u8                      local_ca_ack_delay;\r
+};\r
+\r
+enum ib_mtu {\r
+       IB_MTU_256  = 1,\r
+       IB_MTU_512  = 2,\r
+       IB_MTU_1024 = 3,\r
+       IB_MTU_2048 = 4,\r
+       IB_MTU_4096 = 5\r
+};\r
+\r
+static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)\r
+{\r
+       switch (mtu) {\r
+       case IB_MTU_256:  return  256;\r
+       case IB_MTU_512:  return  512;\r
+       case IB_MTU_1024: return 1024;\r
+       case IB_MTU_2048: return 2048;\r
+       case IB_MTU_4096: return 4096;\r
+       default:          return -1;\r
+       }\r
+}\r
+\r
+enum ib_port_state {\r
+       IB_PORT_NOP             = 0,\r
+       IB_PORT_DOWN            = 1,\r
+       IB_PORT_INIT            = 2,\r
+       IB_PORT_ARMED           = 3,\r
+       IB_PORT_ACTIVE          = 4,\r
+       IB_PORT_ACTIVE_DEFER    = 5\r
+};\r
+\r
+enum ib_port_cap_flags {\r
+       IB_PORT_SM                              = 1 <<  1,\r
+       IB_PORT_NOTICE_SUP                      = 1 <<  2,\r
+       IB_PORT_TRAP_SUP                        = 1 <<  3,\r
+       IB_PORT_OPT_IPD_SUP                     = 1 <<  4,\r
+       IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,\r
+       IB_PORT_SL_MAP_SUP                      = 1 <<  6,\r
+       IB_PORT_MKEY_NVRAM                      = 1 <<  7,\r
+       IB_PORT_PKEY_NVRAM                      = 1 <<  8,\r
+       IB_PORT_LED_INFO_SUP                    = 1 <<  9,\r
+       IB_PORT_SM_DISABLED                     = 1 << 10,\r
+       IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,\r
+       IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,\r
+       IB_PORT_CM_SUP                          = 1 << 16,\r
+       IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,\r
+       IB_PORT_REINIT_SUP                      = 1 << 18,\r
+       IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,\r
+       IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,\r
+       IB_PORT_DR_NOTICE_SUP                   = 1 << 21,\r
+       IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,\r
+       IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,\r
+       IB_PORT_LINK_LATENCY_SUP                = 1 << 24,\r
+       IB_PORT_CLIENT_REG_SUP                  = 1 << 25\r
+};\r
+\r
+enum ib_port_width {\r
+       IB_WIDTH_1X     = 1,\r
+       IB_WIDTH_4X     = 2,\r
+       IB_WIDTH_8X     = 4,\r
+       IB_WIDTH_12X    = 8\r
+};\r
+\r
+static inline int ib_width_enum_to_int(enum ib_port_width width)\r
+{\r
+       switch (width) {\r
+       case IB_WIDTH_1X:  return  1;\r
+       case IB_WIDTH_4X:  return  4;\r
+       case IB_WIDTH_8X:  return  8;\r
+       case IB_WIDTH_12X: return 12;\r
+       default:          return -1;\r
+       }\r
+}\r
+\r
+struct ib_port_attr {\r
+       enum ib_port_state      state;\r
+       enum ib_mtu             max_mtu;\r
+       enum ib_mtu             active_mtu;\r
+       int                     gid_tbl_len;\r
+       u32                     port_cap_flags;\r
+       u32                     max_msg_sz;\r
+       u32                     bad_pkey_cntr;\r
+       u32                     qkey_viol_cntr;\r
+       u16                     pkey_tbl_len;\r
+       u16                     lid;\r
+       u16                     sm_lid;\r
+       u8                      lmc;\r
+       u8                      max_vl_num;\r
+       u8                      sm_sl;\r
+       u8                      subnet_timeout;\r
+       u8                      init_type_reply;\r
+       u8                      active_width;\r
+       u8                      active_speed;\r
+       u8                      phys_state;\r
+};\r
+\r
+enum ib_device_modify_flags {\r
+       IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,\r
+       IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1\r
+};\r
+\r
+struct ib_device_modify {\r
+       u64     sys_image_guid;\r
+       char    node_desc[64];\r
+};\r
+\r
+enum ib_port_modify_flags {\r
+       IB_PORT_SHUTDOWN                = 1,\r
+       IB_PORT_INIT_TYPE               = (1<<2),\r
+       IB_PORT_RESET_QKEY_CNTR         = (1<<3)\r
+};\r
+\r
+struct ib_port_modify {\r
+       u32     set_port_cap_mask;\r
+       u32     clr_port_cap_mask;\r
+       u8      init_type;\r
+};\r
+\r
+enum ib_event_type {\r
+       IB_EVENT_CQ_ERR                                                                 = IB_AE_CQ_ERROR,\r
+       IB_EVENT_QP_FATAL                                                               = IB_AE_QP_FATAL,\r
+       IB_EVENT_QP_REQ_ERR                                                     = IB_AE_WQ_REQ_ERROR,\r
+       IB_EVENT_QP_ACCESS_ERR                                  = IB_AE_WQ_ACCESS_ERROR,\r
+       IB_EVENT_COMM_EST                                                       = IB_AE_QP_COMM,\r
+       IB_EVENT_SQ_DRAINED                                             = IB_AE_SQ_DRAINED,\r
+       IB_EVENT_PATH_MIG                                                               = IB_AE_QP_APM,\r
+       IB_EVENT_PATH_MIG_ERR                                   = IB_AE_QP_APM_ERROR,\r
+       IB_EVENT_DEVICE_FATAL                                           = IB_AE_LOCAL_FATAL,\r
+       IB_EVENT_PORT_ACTIVE                                            = IB_AE_PORT_ACTIVE,\r
+       IB_EVENT_PORT_ERR                                                               = IB_AE_PORT_DOWN,\r
+       IB_EVENT_SRQ_LIMIT_REACHED                              = IB_AE_SRQ_LIMIT_REACHED,\r
+       IB_EVENT_SRQ_ERR                                                = IB_AE_SRQ_CATAS_ERROR,\r
+       IB_EVENT_QP_LAST_WQE_REACHED                    = IB_AE_SRQ_QP_LAST_WQE_REACHED,\r
+       IB_EVENT_LID_CHANGE                                                     = IB_AE_UNKNOWN + 1,\r
+       IB_EVENT_PKEY_CHANGE,\r
+       IB_EVENT_SM_CHANGE,\r
+       IB_EVENT_CLIENT_REREGISTER\r
+};\r
+\r
+struct ib_event {\r
+       struct ib_device        *device;\r
+       union {\r
+               struct ib_cq    *cq;\r
+               struct ib_qp    *qp;\r
+               struct ib_srq   *srq;\r
+               u8              port_num;\r
+       } element;\r
+       enum ib_event_type      event;\r
+       struct ib_event_ex      x;\r
+       };\r
+\r
+struct ib_event_handler {\r
+       struct ib_device *device;\r
+       void            (*handler)(struct ib_event_handler *, struct ib_event *);\r
+       void *            ctx;\r
+       struct list_head  list;\r
+};\r
+\r
+#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler, _ctx)           \\r
+       {                                                       \\r
+               (_ptr)->device  = _device;                      \\r
+               (_ptr)->handler = _handler;             \\r
+               (_ptr)->ctx = _ctx;             \\r
+               INIT_LIST_HEAD(&(_ptr)->list);                  \\r
+       }\r
+\r
+struct ib_global_route {\r
+       union ib_gid    dgid;\r
+       u32             flow_label;\r
+       u8              sgid_index;\r
+       u8              hop_limit;\r
+       u8              traffic_class;\r
+};\r
+\r
+struct ib_grh {\r
+       __be32          version_tclass_flow;\r
+       __be16          paylen;\r
+       u8              next_hdr;\r
+       u8              hop_limit;\r
+       union ib_gid    sgid;\r
+       union ib_gid    dgid;\r
+};\r
+\r
+enum {\r
+       IB_MULTICAST_QPN = 0xffffff\r
+};\r
+\r
+#define XIB_LID_PERMISSIVE     __constant_htons(0xFFFF)\r
+\r
+enum ib_ah_flags {\r
+       IB_AH_GRH       = 1\r
+};\r
+\r
+enum ib_rate {\r
+       IB_RATE_PORT_CURRENT = 0,\r
+       IB_RATE_2_5_GBPS = 2,\r
+       IB_RATE_5_GBPS   = 5,\r
+       IB_RATE_10_GBPS  = 3,\r
+       IB_RATE_20_GBPS  = 6,\r
+       IB_RATE_30_GBPS  = 4,\r
+       IB_RATE_40_GBPS  = 7,\r
+       IB_RATE_60_GBPS  = 8,\r
+       IB_RATE_80_GBPS  = 9,\r
+       IB_RATE_120_GBPS = 10\r
+};\r
+\r
+/**\r
+ * ib_rate_to_mult - Convert the IB rate enum to a multiple of the\r
+ * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be\r
+ * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.\r
+ * @rate: rate to convert.\r
+ */\r
+int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;\r
+\r
+/**\r
+ * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate\r
+ * enum.\r
+ * @mult: multiple to convert.\r
+ */\r
+enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;\r
+\r
+struct ib_ah_attr {\r
+       struct ib_global_route  grh;\r
+       u16                     dlid;\r
+       u8                      sl;\r
+       u8                      src_path_bits;\r
+       u8                      static_rate;\r
+       u8                      ah_flags;\r
+       u8                      port_num;\r
+};\r
+\r
+enum ib_wc_status {\r
+       IB_WC_SUCCESS,\r
+       IB_WC_LOC_LEN_ERR,\r
+       IB_WC_LOC_QP_OP_ERR,\r
+       IB_WC_LOC_EEC_OP_ERR,\r
+       IB_WC_LOC_PROT_ERR,\r
+       IB_WC_WR_FLUSH_ERR,\r
+       IB_WC_MW_BIND_ERR,\r
+       IB_WC_BAD_RESP_ERR,\r
+       IB_WC_LOC_ACCESS_ERR,\r
+       IB_WC_REM_INV_REQ_ERR,\r
+       IB_WC_REM_ACCESS_ERR,\r
+       IB_WC_REM_OP_ERR,\r
+       IB_WC_RETRY_EXC_ERR,\r
+       IB_WC_RNR_RETRY_EXC_ERR,\r
+       IB_WC_LOC_RDD_VIOL_ERR,\r
+       IB_WC_REM_INV_RD_REQ_ERR,\r
+       IB_WC_REM_ABORT_ERR,\r
+       IB_WC_INV_EECN_ERR,\r
+       IB_WC_INV_EEC_STATE_ERR,\r
+       IB_WC_FATAL_ERR,\r
+       IB_WC_RESP_TIMEOUT_ERR,\r
+       IB_WC_GENERAL_ERR\r
+};\r
+\r
+enum ib_wc_opcode {\r
+       XIB_WC_SEND,\r
+       XIB_WC_RDMA_WRITE,\r
+       XIB_WC_RDMA_READ,\r
+       XIB_WC_COMP_SWAP,\r
+       XIB_WC_FETCH_ADD,\r
+       XIB_WC_BIND_MW,\r
+/*\r
+ * Set value of XIB_WC_RECV so consumers can test if a completion is a\r
+ * receive by testing (opcode & XIB_WC_RECV).\r
+ */\r
+       XIB_WC_RECV                     = 1 << 7,\r
+       XIB_WC_RECV_RDMA_WITH_IMM\r
+};\r
+\r
+enum ib_wc_flags {\r
+       IB_WC_GRH               = 1,\r
+       IB_WC_WITH_IMM          = (1<<1),\r
+       IB_WC_FORWARD           = (1<<2)\r
+};\r
+\r
+struct ib_wc {\r
+       u64                     wr_id;\r
+       enum ib_wc_status       status;\r
+       enum ib_wc_opcode       opcode;\r
+       u32                     vendor_err;\r
+       u32                     byte_len;\r
+       struct ib_qp           *qp;\r
+       __be32                  imm_data;\r
+       u32                     src_qp;\r
+       int                     wc_flags;\r
+       u16                     pkey_index;\r
+       u16                     slid;\r
+       u8                      sl;\r
+       u8                      dlid_path_bits;\r
+       u8                      port_num;       /* valid only for DR SMPs on switches */\r
+};\r
+\r
+enum ib_cq_notify_flags {\r
+       IB_CQ_SOLICITED                 = 1 << 0,\r
+       IB_CQ_NEXT_COMP                 = 1 << 1,\r
+       IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,\r
+       IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,\r
+};\r
+\r
+enum ib_srq_attr_mask {\r
+       XIB_SRQ_MAX_WR  = 1 << 0,\r
+       XIB_SRQ_LIMIT   = 1 << 1,\r
+};\r
+\r
+struct ib_srq_attr {\r
+       u32     max_wr;\r
+       u32     max_sge;\r
+       u32     srq_limit;\r
+};\r
+\r
+struct ib_srq_init_attr {\r
+       void                  (*event_handler)(ib_event_rec_t *);\r
+       void                   *srq_context;\r
+       struct ib_srq_attr      attr;\r
+};\r
+\r
+struct ib_qp_cap {\r
+       u32     max_send_wr;\r
+       u32     max_recv_wr;\r
+       u32     max_send_sge;\r
+       u32     max_recv_sge;\r
+       u32     max_inline_data;\r
+};\r
+\r
+enum ib_sig_type {\r
+       IB_SIGNAL_ALL_WR,\r
+       IB_SIGNAL_REQ_WR\r
+};\r
+\r
+enum ib_qp_type {\r
+       /*\r
+        * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries\r
+        * here (and in that order) since the MAD layer uses them as\r
+        * indices into a 2-entry table.\r
+        */\r
+       IB_QPT_SMI,\r
+       IB_QPT_GSI,\r
+\r
+       IB_QPT_RC,\r
+       IB_QPT_UC,\r
+       IB_QPT_UD,\r
+       IB_QPT_RAW_IP_V6,\r
+       IB_QPT_RAW_ETY\r
+};\r
+\r
+struct ib_qp_init_attr {\r
+       void                  (*event_handler)(ib_event_rec_t *);\r
+       void                   *qp_context;\r
+       struct ib_cq           *send_cq;\r
+       struct ib_cq           *recv_cq;\r
+       struct ib_srq          *srq;\r
+       struct ib_qp_cap        cap;\r
+       enum ib_sig_type        sq_sig_type;\r
+       enum ib_qp_type         qp_type;\r
+       u8                      port_num; /* special QP types only */\r
+};\r
+\r
+enum ib_rnr_timeout {\r
+       IB_RNR_TIMER_655_36 =  0,\r
+       IB_RNR_TIMER_000_01 =  1,\r
+       IB_RNR_TIMER_000_02 =  2,\r
+       IB_RNR_TIMER_000_03 =  3,\r
+       IB_RNR_TIMER_000_04 =  4,\r
+       IB_RNR_TIMER_000_06 =  5,\r
+       IB_RNR_TIMER_000_08 =  6,\r
+       IB_RNR_TIMER_000_12 =  7,\r
+       IB_RNR_TIMER_000_16 =  8,\r
+       IB_RNR_TIMER_000_24 =  9,\r
+       IB_RNR_TIMER_000_32 = 10,\r
+       IB_RNR_TIMER_000_48 = 11,\r
+       IB_RNR_TIMER_000_64 = 12,\r
+       IB_RNR_TIMER_000_96 = 13,\r
+       IB_RNR_TIMER_001_28 = 14,\r
+       IB_RNR_TIMER_001_92 = 15,\r
+       IB_RNR_TIMER_002_56 = 16,\r
+       IB_RNR_TIMER_003_84 = 17,\r
+       IB_RNR_TIMER_005_12 = 18,\r
+       IB_RNR_TIMER_007_68 = 19,\r
+       IB_RNR_TIMER_010_24 = 20,\r
+       IB_RNR_TIMER_015_36 = 21,\r
+       IB_RNR_TIMER_020_48 = 22,\r
+       IB_RNR_TIMER_030_72 = 23,\r
+       IB_RNR_TIMER_040_96 = 24,\r
+       IB_RNR_TIMER_061_44 = 25,\r
+       IB_RNR_TIMER_081_92 = 26,\r
+       IB_RNR_TIMER_122_88 = 27,\r
+       IB_RNR_TIMER_163_84 = 28,\r
+       IB_RNR_TIMER_245_76 = 29,\r
+       IB_RNR_TIMER_327_68 = 30,\r
+       IB_RNR_TIMER_491_52 = 31\r
+};\r
+       \r
+enum ib_qp_attr_mask {\r
+       IB_QP_STATE                     = 1,\r
+       IB_QP_CUR_STATE                 = (1<<1),\r
+       IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),\r
+       IB_QP_ACCESS_FLAGS              = (1<<3),\r
+       IB_QP_PKEY_INDEX                = (1<<4),\r
+       IB_QP_PORT                      = (1<<5),\r
+       IB_QP_QKEY                      = (1<<6),\r
+       IB_QP_AV                        = (1<<7),\r
+       IB_QP_PATH_MTU                  = (1<<8),\r
+       IB_QP_TIMEOUT                   = (1<<9),\r
+       IB_QP_RETRY_CNT                 = (1<<10),\r
+       IB_QP_RNR_RETRY                 = (1<<11),\r
+       IB_QP_RQ_PSN                    = (1<<12),\r
+       IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),\r
+       IB_QP_ALT_PATH                  = (1<<14),\r
+       IB_QP_MIN_RNR_TIMER             = (1<<15),\r
+       IB_QP_SQ_PSN                    = (1<<16),\r
+       IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),\r
+       IB_QP_PATH_MIG_STATE            = (1<<18),\r
+       IB_QP_CAP                       = (1<<19),\r
+       IB_QP_DEST_QPN                  = (1<<20)\r
+};\r
+\r
+enum ib_qp_state {\r
+       XIB_QPS_RESET,\r
+       XIB_QPS_INIT,\r
+       XIB_QPS_RTR,\r
+       XIB_QPS_RTS,\r
+       XIB_QPS_SQD,\r
+       XIB_QPS_SQE,\r
+       XIB_QPS_ERR\r
+};\r
+\r
+enum ib_mig_state {\r
+       IB_MIG_MIGRATED,\r
+       IB_MIG_REARM,\r
+       IB_MIG_ARMED\r
+};\r
+\r
+struct ib_qp_attr {\r
+       enum ib_qp_state        qp_state;\r
+       enum ib_qp_state        cur_qp_state;\r
+       enum ib_mtu             path_mtu;\r
+       enum ib_mig_state       path_mig_state;\r
+       u32                     qkey;\r
+       u32                     rq_psn;\r
+       u32                     sq_psn;\r
+       u32                     dest_qp_num;\r
+       int                     qp_access_flags;\r
+       struct ib_qp_cap        cap;\r
+       struct ib_ah_attr       ah_attr;\r
+       struct ib_ah_attr       alt_ah_attr;\r
+       u16                     pkey_index;\r
+       u16                     alt_pkey_index;\r
+       u8                      en_sqd_async_notify;\r
+       u8                      sq_draining;\r
+       u8                      max_rd_atomic;\r
+       u8                      max_dest_rd_atomic;\r
+       u8                      min_rnr_timer;\r
+       u8                      port_num;\r
+       u8                      timeout;\r
+       u8                      retry_cnt;\r
+       u8                      rnr_retry;\r
+       u8                      alt_port_num;\r
+       u8                      alt_timeout;\r
+};\r
+\r
+enum ib_wr_opcode {\r
+       IB_WR_RDMA_WRITE,\r
+       IB_WR_RDMA_WRITE_WITH_IMM,\r
+       IB_WR_SEND,\r
+       IB_WR_SEND_WITH_IMM,\r
+       IB_WR_RDMA_READ,\r
+       IB_WR_ATOMIC_CMP_AND_SWP,\r
+       IB_WR_ATOMIC_FETCH_AND_ADD\r
+};\r
+\r
+enum ib_send_flags {\r
+       IB_SEND_FENCE           = 1,\r
+       IB_SEND_SIGNALED        = (1<<1),\r
+       IB_SEND_SOLICITED       = (1<<2),\r
+       IB_SEND_INLINE          = (1<<3)\r
+};\r
+\r
+struct ib_sge {\r
+       u64     addr;\r
+       u32     length;\r
+       u32     lkey;\r
+};\r
+\r
+struct ib_send_wr {\r
+       struct ib_send_wr      *next;\r
+       u64                     wr_id;\r
+       struct ib_sge          *sg_list;\r
+       int                     num_sge;\r
+       enum ib_wr_opcode       opcode;\r
+       int                     send_flags;\r
+       __be32                  imm_data;\r
+       union {\r
+               struct {\r
+                       u64     remote_addr;\r
+                       u32     rkey;\r
+               } rdma;\r
+               struct {\r
+                       u64     remote_addr;\r
+                       u64     compare_add;\r
+                       u64     swap;\r
+                       u32     rkey;\r
+               } atomic;\r
+               struct {\r
+                       struct ib_ah *ah;\r
+                       u32     remote_qpn;\r
+                       u32     remote_qkey;\r
+                       u16     pkey_index; /* valid for GSI only */\r
+                       u8      port_num;   /* valid for DR SMPs on switch only */\r
+               } ud;\r
+       } wr;\r
+};\r
+\r
+struct ib_recv_wr {\r
+       struct ib_recv_wr      *next;\r
+       u64                     wr_id;\r
+       struct ib_sge          *sg_list;\r
+       int                     num_sge;\r
+};\r
+\r
+enum ib_access_flags {\r
+       IB_ACCESS_LOCAL_WRITE   = 1,\r
+       IB_ACCESS_REMOTE_WRITE  = (1<<1),\r
+       IB_ACCESS_REMOTE_READ   = (1<<2),\r
+       IB_ACCESS_REMOTE_ATOMIC = (1<<3),\r
+       IB_ACCESS_MW_BIND       = (1<<4)\r
+};\r
+\r
+struct ib_phys_buf {\r
+       u64      addr;\r
+       u64      size;\r
+};\r
+\r
+struct ib_mr_attr {\r
+       struct ib_pd    *pd;\r
+       u64             device_virt_addr;\r
+       u64             size;\r
+       int             mr_access_flags;\r
+       u32             lkey;\r
+       u32             rkey;\r
+};\r
+\r
+enum ib_mr_rereg_flags {\r
+       IB_MR_REREG_TRANS       = 1,\r
+       IB_MR_REREG_PD          = (1<<1),\r
+       IB_MR_REREG_ACCESS      = (1<<2)\r
+};\r
+\r
+struct ib_mw_bind {\r
+       struct ib_mr   *mr;\r
+       u64             wr_id;\r
+       u64             addr;\r
+       u32             length;\r
+       int             send_flags;\r
+       int             mw_access_flags;\r
+};\r
+\r
+struct ib_fmr_attr {\r
+       int     max_pages;\r
+       int     max_maps;\r
+       u8      page_shift;\r
+};\r
+struct ib_ucontext {\r
+       struct ib_device       *device;\r
+       int                     closing;\r
+       struct ib_ucontext_ex   x;\r
+};\r
+\r
+struct ib_udata {\r
+       void        *inbuf;\r
+       void        *outbuf;\r
+       size_t       inlen;\r
+       size_t       outlen;\r
+};\r
+\r
+#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \\r
+       {                                                               \\r
+               (udata)->inbuf  = (void *) (ibuf);              \\r
+               (udata)->outbuf = (void *) (obuf);              \\r
+               (udata)->inlen  = (ilen);                               \\r
+               (udata)->outlen = (olen);                               \\r
+       }\r
+\r
+struct ib_pd {\r
+       struct ib_device       *device;\r
+       struct ib_ucontext     *p_uctx;\r
+       atomic_t                usecnt; /* count all resources */\r
+};\r
+\r
+struct ib_ah {\r
+       struct ib_device        *device;\r
+       struct ib_pd            *pd;\r
+       struct ib_ucontext      *p_uctx;\r
+};\r
+\r
+typedef void (*ib_comp_handler)(void *cq_context);\r
+\r
+struct ib_cq {\r
+       struct ib_device       *device;\r
+       struct ib_ucontext     *p_uctx;\r
+       ib_comp_handler         comp_handler;\r
+       void                  (*event_handler)(ib_event_rec_t *);\r
+       void *                  cq_context;\r
+       int                     cqe;\r
+       atomic_t                usecnt; /* count number of work queues */\r
+};\r
+\r
+struct ib_srq {\r
+       struct ib_device       *device;\r
+       struct ib_pd           *pd;\r
+       struct ib_ucontext     *p_uctx;\r
+       void                  (*event_handler)(ib_event_rec_t *);\r
+       void                   *srq_context;\r
+       atomic_t                usecnt;\r
+};\r
+\r
+struct ib_qp {\r
+       struct ib_device       *device;\r
+       struct ib_pd           *pd;\r
+       struct ib_cq           *send_cq;\r
+       struct ib_cq           *recv_cq;\r
+       struct ib_srq          *srq;\r
+       struct ib_ucontext     *p_uctx;\r
+       void                  (*event_handler)(ib_event_rec_t *);\r
+       void                   *qp_context;\r
+       u32                     qp_num;\r
+       enum ib_qp_type         qp_type;\r
+};\r
+\r
+struct ib_mr {\r
+       struct ib_device  *device;\r
+       struct ib_pd      *pd;\r
+       struct ib_ucontext *p_uctx;\r
+       u32                lkey;\r
+       u32                rkey;\r
+       atomic_t           usecnt; /* count number of MWs */\r
+};\r
+\r
+struct ib_mw {\r
+       struct ib_device        *device;\r
+       struct ib_pd            *pd;\r
+       struct ib_ucontext  *p_uctx;\r
+       u32                     rkey;\r
+};\r
+\r
+struct ib_fmr {\r
+       struct ib_device        *device;\r
+       struct ib_pd            *pd;\r
+       struct list_head        list;\r
+       u32                     lkey;\r
+       u32                     rkey;\r
+};\r
+\r
+struct ib_mad;\r
+struct ib_grh;\r
+\r
+enum ib_process_mad_flags {\r
+       IB_MAD_IGNORE_MKEY      = 1,\r
+       IB_MAD_IGNORE_BKEY      = 2,\r
+       IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY\r
+};\r
+\r
+enum ib_mad_result {\r
+       IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */\r
+       IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */\r
+       IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */\r
+       IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */\r
+};\r
+\r
+#define IB_DEVICE_NAME_MAX 64\r
+\r
+struct ib_cache {\r
+       rwlock_t                lock;\r
+       struct ib_event_handler event_handler;\r
+       struct ib_pkey_cache  **pkey_cache;\r
+       struct ib_gid_cache   **gid_cache;\r
+       u8                     *lmc_cache;\r
+       struct ib_cache_ex      x;\r
+};\r
+\r
+struct ib_dma_mapping_ops {\r
+       int             (*mapping_error)(struct ib_device *dev,\r
+                                        u64 dma_addr);\r
+       u64             (*map_single)(struct ib_device *dev,\r
+                                     void *ptr, size_t size,\r
+                                     enum dma_data_direction direction);\r
+       void            (*unmap_single)(struct ib_device *dev,\r
+                                       u64 addr, size_t size,\r
+                                       enum dma_data_direction direction);\r
+       u64             (*map_page)(struct ib_device *dev,\r
+                                   dma_addr_t page, unsigned long offset,\r
+                                   size_t size,\r
+                                   enum dma_data_direction direction);\r
+       void            (*unmap_page)(struct ib_device *dev,\r
+                                     u64 addr, size_t size,\r
+                                     enum dma_data_direction direction);\r
+       int             (*map_sg)(struct ib_device *dev,\r
+                                 struct scatterlist *sg, int nents,\r
+                                 enum dma_data_direction direction);\r
+       void            (*unmap_sg)(struct ib_device *dev,\r
+                                   struct scatterlist *sg, int nents,\r
+                                   enum dma_data_direction direction);\r
+       u64             (*dma_address)(struct ib_device *dev,\r
+                                      struct scatterlist *sg);\r
+       unsigned int    (*dma_len)(struct ib_device *dev,\r
+                                  struct scatterlist *sg);\r
+       void            (*sync_single_for_cpu)(struct ib_device *dev,\r
+                                              u64 dma_handle,\r
+                                              size_t size,\r
+                                              enum dma_data_direction dir);\r
+       void            (*sync_single_for_device)(struct ib_device *dev,\r
+                                                 u64 dma_handle,\r
+                                                 size_t size,\r
+                                                 enum dma_data_direction dir);\r
+       void            *(*alloc_coherent)(struct ib_device *dev,\r
+                                          size_t size,\r
+                                          u64 *dma_handle,\r
+                                          gfp_t flag);\r
+       void            (*free_coherent)(struct ib_device *dev,\r
+                                        size_t size, void *cpu_addr,\r
+                                        u64 dma_handle);\r
+};\r
+\r
+struct iw_cm_verbs;\r
+\r
+struct ib_device {\r
+       struct mlx4_dev                *dma_device;\r
+\r
+       char                          name[IB_DEVICE_NAME_MAX];\r
+\r
+       struct list_head              event_handler_list;\r
+       spinlock_t                    event_handler_lock;\r
+\r
+       struct list_head              core_list;\r
+       struct list_head              client_data_list;\r
+       spinlock_t                    client_data_lock;\r
+\r
+       struct ib_cache               cache;\r
+       int                          *pkey_tbl_len;\r
+       int                          *gid_tbl_len;\r
+\r
+       u32                           flags;\r
+\r
+       int                           num_comp_vectors;\r
+\r
+       struct iw_cm_verbs           *iwcm;\r
+\r
+       int                        (*query_device)(struct ib_device *device,\r
+                                                  struct ib_device_attr *device_attr);\r
+       int                        (*query_port)(struct ib_device *device,\r
+                                                u8 port_num,\r
+                                                struct ib_port_attr *port_attr);\r
+       int                        (*query_gid_chunk)(struct ib_device *device,\r
+                                               u8 port_num, int index,\r
+                                               union ib_gid gid[8], int size);\r
+       int                        (*query_pkey_chunk)(struct ib_device *device,\r
+                                                u8 port_num, u16 index, __be16 pkey[32], int size);\r
+       int                        (*modify_device)(struct ib_device *device,\r
+                                                   int device_modify_mask,\r
+                                                   struct ib_device_modify *device_modify);\r
+       int                        (*modify_port)(struct ib_device *device,\r
+                                                 u8 port_num, int port_modify_mask,\r
+                                                 struct ib_port_modify *port_modify);\r
+       struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,\r
+                                                    struct ib_udata *udata);\r
+       int                        (*dealloc_ucontext)(struct ib_ucontext *context);\r
+       int                        (*mmap)(struct ib_ucontext *context,\r
+                                          struct vm_area_struct *vma);\r
+       struct ib_pd *             (*alloc_pd)(struct ib_device *device,\r
+                                              struct ib_ucontext *context,\r
+                                              struct ib_udata *udata);\r
+       int                        (*dealloc_pd)(struct ib_pd *pd);\r
+       struct ib_ah *             (*create_ah)(struct ib_pd *pd,\r
+                                               struct ib_ah_attr *ah_attr);\r
+       int                        (*modify_ah)(struct ib_ah *ah,\r
+                                               struct ib_ah_attr *ah_attr);\r
+       int                        (*query_ah)(struct ib_ah *ah,\r
+                                              struct ib_ah_attr *ah_attr);\r
+       int                        (*destroy_ah)(struct ib_ah *ah);\r
+       struct ib_srq *            (*create_srq)(struct ib_pd *pd,\r
+                                                struct ib_srq_init_attr *srq_init_attr,\r
+                                                struct ib_udata *udata);\r
+       int                        (*modify_srq)(struct ib_srq *srq,\r
+                                                struct ib_srq_attr *srq_attr,\r
+                                                enum ib_srq_attr_mask srq_attr_mask,\r
+                                                struct ib_udata *udata);\r
+       int                        (*query_srq)(struct ib_srq *srq,\r
+                                               struct ib_srq_attr *srq_attr);\r
+       int                        (*destroy_srq)(struct ib_srq *srq);\r
+       int                        (*post_srq_recv)(struct ib_srq *srq,\r
+                                                   ib_recv_wr_t *recv_wr,\r
+                                                   ib_recv_wr_t **bad_recv_wr);\r
+       struct ib_qp *             (*create_qp)(struct ib_pd *pd,\r
+                                               struct ib_qp_init_attr *qp_init_attr,\r
+                                               struct ib_udata *udata);\r
+       int                        (*modify_qp)(struct ib_qp *qp,\r
+                                               struct ib_qp_attr *qp_attr,\r
+                                               int qp_attr_mask,\r
+                                               struct ib_udata *udata);\r
+       int                        (*query_qp)(struct ib_qp *qp,\r
+                                              struct ib_qp_attr *qp_attr,\r
+                                              int qp_attr_mask,\r
+                                              struct ib_qp_init_attr *qp_init_attr);\r
+       int                        (*destroy_qp)(struct ib_qp *qp);\r
+       int                        (*post_send)(struct ib_qp *qp,\r
+                                               ib_send_wr_t *send_wr,\r
+                                               ib_send_wr_t **bad_send_wr);\r
+       int                        (*post_recv)(struct ib_qp *qp,\r
+                                               ib_recv_wr_t *recv_wr,\r
+                                               ib_recv_wr_t **bad_recv_wr);\r
+       struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,\r
+                                               int comp_vector,\r
+                                               struct ib_ucontext *context,\r
+                                               struct ib_udata *udata);\r
+       int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,\r
+                                               u16 cq_period);\r
+       int                        (*destroy_cq)(struct ib_cq *cq);\r
+       int                        (*resize_cq)(struct ib_cq *cq, int cqe,\r
+                                               struct ib_udata *udata);\r
+       int                        (*poll_cq)(struct ib_cq *ibcq,\r
+                       ib_wc_t** const pp_free_wclist, ib_wc_t** const pp_done_wclist);\r
+       int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);\r
+       int                        (*req_notify_cq)(struct ib_cq *cq,\r
+                                                   enum ib_cq_notify_flags flags);\r
+       int                        (*req_ncomp_notif)(struct ib_cq *cq,\r
+                                                     int wc_cnt);\r
+       struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,\r
+                                                int mr_access_flags);\r
+       struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,\r
+                                                 struct ib_phys_buf *phys_buf_array,\r
+                                                 int num_phys_buf,\r
+                                                 int mr_access_flags,\r
+                                                 u64 *iova_start);\r
+       struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,\r
+                                                 u64 start, u64 length,\r
+                                                 u64 virt_addr,\r
+                                                 int mr_access_flags,\r
+                                                 struct ib_udata *udata);\r
+       int                        (*query_mr)(struct ib_mr *mr,\r
+                                              struct ib_mr_attr *mr_attr);\r
+       int                        (*dereg_mr)(struct ib_mr *mr);\r
+       int                        (*rereg_phys_mr)(struct ib_mr *mr,\r
+                                                   int mr_rereg_mask,\r
+                                                   struct ib_pd *pd,\r
+                                                   struct ib_phys_buf *phys_buf_array,\r
+                                                   int num_phys_buf,\r
+                                                   int mr_access_flags,\r
+                                                   u64 *iova_start);\r
+       struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);\r
+       int                        (*bind_mw)(struct ib_qp *qp,\r
+                                             struct ib_mw *mw,\r
+                                             struct ib_mw_bind *mw_bind);\r
+       int                        (*dealloc_mw)(struct ib_mw *mw);\r
+       struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,\r
+                                               int mr_access_flags,\r
+                                               struct ib_fmr_attr *fmr_attr);\r
+       int                        (*map_phys_fmr)(struct ib_fmr *fmr,\r
+                                                  u64 *page_list, int list_len,\r
+                                                  u64 iova);\r
+       int                        (*unmap_fmr)(struct list_head *fmr_list);\r
+       int                        (*dealloc_fmr)(struct ib_fmr *fmr);\r
+       int                        (*attach_mcast)(struct ib_qp *qp,\r
+                                                  union ib_gid *gid,\r
+                                                  u16 lid);\r
+       int                        (*detach_mcast)(struct ib_qp *qp,\r
+                                                  union ib_gid *gid,\r
+                                                  u16 lid);\r
+       int                        (*process_mad)(struct ib_device *device,\r
+                                                 int process_mad_flags,\r
+                                                 u8 port_num,\r
+                                                 ib_wc_t *in_wc,\r
+                                                 struct ib_grh *in_grh,\r
+                                                 struct ib_mad *in_mad,\r
+                                                 struct ib_mad *out_mad);\r
+\r
+       struct ib_dma_mapping_ops   *dma_ops;\r
+       struct list_head             port_list;\r
+\r
+       enum {\r
+               IB_DEV_UNINITIALIZED,\r
+               IB_DEV_REGISTERED,\r
+               IB_DEV_UNREGISTERED\r
+       }                            reg_state;\r
+\r
+       u64                          uverbs_cmd_mask;\r
+       int                          uverbs_abi_ver;\r
+\r
+       char                         node_desc[64];\r
+       __be64                       node_guid;\r
+       u8                           node_type;\r
+       u8                           phys_port_cnt;\r
+       struct ib_device_ex          x;\r
+};\r
+\r
+struct ib_client {\r
+       char  *name;\r
+       void (*add)   (struct ib_device *);\r
+       void (*remove)(struct ib_device *);\r
+\r
+       struct list_head list;\r
+};\r
+\r
+struct ib_device *ib_alloc_device(size_t size);\r
+void ib_dealloc_device(struct ib_device *device);\r
+\r
+int ib_register_device   (struct ib_device *device);\r
+void ib_unregister_device(struct ib_device *device);\r
+\r
+int ib_register_client   (struct ib_client *client);\r
+void ib_unregister_client(struct ib_client *client);\r
+\r
+void *ib_get_client_data(struct ib_device *device, struct ib_client *client);\r
+void  ib_set_client_data(struct ib_device *device, struct ib_client *client,\r
+                        void *data);\r
+\r
+static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)\r
+{\r
+       if (len > udata->inlen)\r
+               return -EFAULT;\r
+       memcpy(dest, udata->inbuf, len);\r
+       return 0;\r
+}\r
+\r
+static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)\r
+{\r
+       if (len > udata->outlen)\r
+               return -EFAULT;\r
+       memcpy(udata->outbuf, src, len);\r
+       return 0;\r
+}\r
+\r
+/**\r
+ * ib_modify_qp_is_ok - Check that the supplied attribute mask\r
+ * contains all required attributes and no attributes not allowed for\r
+ * the given QP state transition.\r
+ * @cur_state: Current QP state\r
+ * @next_state: Next QP state\r
+ * @type: QP type\r
+ * @mask: Mask of supplied QP attributes\r
+ *\r
+ * This function is a helper function that a low-level driver's\r
+ * modify_qp method can use to validate the consumer's input.  It\r
+ * checks that cur_state and next_state are valid QP states, that a\r
+ * transition from cur_state to next_state is allowed by the IB spec,\r
+ * and that the attribute mask supplied is allowed for the transition.\r
+ */\r
+int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,\r
+                      enum ib_qp_type type, enum ib_qp_attr_mask mask);\r
+\r
+int ib_register_event_handler  (struct ib_event_handler *event_handler);\r
+int ib_unregister_event_handler(struct ib_event_handler *event_handler);\r
+void ib_dispatch_event(struct ib_event *event);\r
+\r
+int ib_query_device(struct ib_device *device,\r
+                   struct ib_device_attr *device_attr);\r
+\r
+int ib_query_port(struct ib_device *device,\r
+                 u8 port_num, struct ib_port_attr *port_attr);\r
+\r
+int ib_query_gid_chunk(struct ib_device *device,\r
+                u8 port_num, int index, union ib_gid gid[8], int size);\r
+\r
+int ib_query_pkey_chunk(struct ib_device *device,\r
+                 u8 port_num, u16 index, __be16 pkey[32], int size);\r
+\r
+int ib_modify_device(struct ib_device *device,\r
+                    int device_modify_mask,\r
+                    struct ib_device_modify *device_modify);\r
+\r
+int ib_modify_port(struct ib_device *device,\r
+                  u8 port_num, int port_modify_mask,\r
+                  struct ib_port_modify *port_modify);\r
+\r
+int ib_find_gid(struct ib_device *device, union ib_gid *gid,\r
+               u8 *port_num, u16 *index);\r
+\r
+int ib_find_pkey(struct ib_device *device,\r
+                u8 port_num, __be16 pkey, u16 *index);\r
+\r
+/**\r
+ * ib_alloc_pd - Allocates an unused protection domain.\r
+ * @device: The device on which to allocate the protection domain.\r
+ *\r
+ * A protection domain object provides an association between QPs, shared\r
+ * receive queues, address handles, memory regions, and memory windows.\r
+ */\r
+struct ib_pd *ib_alloc_pd(struct ib_device *device);\r
+\r
+/**\r
+ * ib_dealloc_pd - Deallocates a protection domain.\r
+ * @pd: The protection domain to deallocate.\r
+ */\r
+int ib_dealloc_pd(struct ib_pd *pd);\r
+\r
+/**\r
+ * ib_create_ah - Creates an address handle for the given address vector.\r
+ * @pd: The protection domain associated with the address handle.\r
+ * @ah_attr: The attributes of the address vector.\r
+ *\r
+ * The address handle is used to reference a local or global destination\r
+ * in all UD QP post sends.\r
+ */\r
+struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);\r
+\r
+/**\r
+ * ib_init_ah_from_wc - Initializes address handle attributes from a\r
+ *   work completion.\r
+ * @device: Device on which the received message arrived.\r
+ * @port_num: Port on which the received message arrived.\r
+ * @wc: Work completion associated with the received message.\r
+ * @grh: References the received global route header.  This parameter is\r
+ *   ignored unless the work completion indicates that the GRH is valid.\r
+ * @ah_attr: Returned attributes that can be used when creating an address\r
+ *   handle for replying to the message.\r
+ */\r
+int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, ib_wc_t *wc,\r
+                      struct ib_grh *grh, struct ib_ah_attr *ah_attr);\r
+\r
+/**\r
+ * ib_create_ah_from_wc - Creates an address handle associated with the\r
+ *   sender of the specified work completion.\r
+ * @pd: The protection domain associated with the address handle.\r
+ * @wc: Work completion information associated with a received message.\r
+ * @grh: References the received global route header.  This parameter is\r
+ *   ignored unless the work completion indicates that the GRH is valid.\r
+ * @port_num: The outbound port number to associate with the address.\r
+ *\r
+ * The address handle is used to reference a local or global destination\r
+ * in all UD QP post sends.\r
+ */\r
+struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, ib_wc_t *wc,\r
+                                  struct ib_grh *grh, u8 port_num);\r
+\r
+/**\r
+ * ib_modify_ah - Modifies the address vector associated with an address\r
+ *   handle.\r
+ * @ah: The address handle to modify.\r
+ * @ah_attr: The new address vector attributes to associate with the\r
+ *   address handle.\r
+ */\r
+int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);\r
+\r
+/**\r
+ * ib_query_ah - Queries the address vector associated with an address\r
+ *   handle.\r
+ * @ah: The address handle to query.\r
+ * @ah_attr: The address vector attributes associated with the address\r
+ *   handle.\r
+ */\r
+int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);\r
+\r
+/**\r
+ * ib_destroy_ah - Destroys an address handle.\r
+ * @ah: The address handle to destroy.\r
+ */\r
+int ib_destroy_ah(struct ib_ah *ah);\r
+\r
+/**\r
+ * ib_create_srq - Creates a SRQ associated with the specified protection\r
+ *   domain.\r
+ * @pd: The protection domain associated with the SRQ.\r
+ * @srq_init_attr: A list of initial attributes required to create the\r
+ *   SRQ.  If SRQ creation succeeds, then the attributes are updated to\r
+ *   the actual capabilities of the created SRQ.\r
+ *\r
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the\r
+ * requested size of the SRQ, and set to the actual values allocated\r
+ * on return.  If ib_create_srq() succeeds, then max_wr and max_sge\r
+ * will always be at least as large as the requested values.\r
+ */\r
+struct ib_srq *ib_create_srq(struct ib_pd *pd,\r
+                            struct ib_srq_init_attr *srq_init_attr);\r
+\r
+/**\r
+ * ib_modify_srq - Modifies the attributes for the specified SRQ.\r
+ * @srq: The SRQ to modify.\r
+ * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,\r
+ *   the current values of selected SRQ attributes are returned.\r
+ * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ\r
+ *   are being modified.\r
+ *\r
+ * The mask may contain XIB_SRQ_MAX_WR to resize the SRQ and/or\r
+ * XIB_SRQ_LIMIT to set the SRQ's limit and request notification when\r
+ * the number of receives queued drops below the limit.\r
+ */\r
+int ib_modify_srq(struct ib_srq *srq,\r
+                 struct ib_srq_attr *srq_attr,\r
+                 enum ib_srq_attr_mask srq_attr_mask);\r
+\r
+/**\r
+ * ib_query_srq - Returns the attribute list and current values for the\r
+ *   specified SRQ.\r
+ * @srq: The SRQ to query.\r
+ * @srq_attr: The attributes of the specified SRQ.\r
+ */\r
+int ib_query_srq(struct ib_srq *srq,\r
+                struct ib_srq_attr *srq_attr);\r
+\r
+/**\r
+ * ib_destroy_srq - Destroys the specified SRQ.\r
+ * @srq: The SRQ to destroy.\r
+ */\r
+int ib_destroy_srq(struct ib_srq *srq);\r
+\r
+/**\r
+ * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.\r
+ * @srq: The SRQ to post the work request on.\r
+ * @recv_wr: A list of work requests to post on the receive queue.\r
+ * @bad_recv_wr: On an immediate failure, this parameter will reference\r
+ *   the work request that failed to be posted on the QP.\r
+ */\r
+static inline int ib_post_srq_recv(struct ib_srq *srq,\r
+                                  ib_recv_wr_t *recv_wr,\r
+                                  ib_recv_wr_t **bad_recv_wr)\r
+{\r
+       return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);\r
+}\r
+\r
+/**\r
+ * ib_create_qp - Creates a QP associated with the specified protection\r
+ *   domain.\r
+ * @pd: The protection domain associated with the QP.\r
+ * @qp_init_attr: A list of initial attributes required to create the\r
+ *   QP.  If QP creation succeeds, then the attributes are updated to\r
+ *   the actual capabilities of the created QP.\r
+ */\r
+struct ib_qp *ib_create_qp(struct ib_pd *pd,\r
+                          struct ib_qp_init_attr *qp_init_attr);\r
+\r
+/**\r
+ * ib_modify_qp - Modifies the attributes for the specified QP and then\r
+ *   transitions the QP to the given state.\r
+ * @qp: The QP to modify.\r
+ * @qp_attr: On input, specifies the QP attributes to modify.  On output,\r
+ *   the current values of selected QP attributes are returned.\r
+ * @qp_attr_mask: A bit-mask used to specify which attributes of the QP\r
+ *   are being modified.\r
+ */\r
+int ib_modify_qp(struct ib_qp *qp,\r
+                struct ib_qp_attr *qp_attr,\r
+                int qp_attr_mask);\r
+\r
+/**\r
+ * ib_query_qp - Returns the attribute list and current values for the\r
+ *   specified QP.\r
+ * @qp: The QP to query.\r
+ * @qp_attr: The attributes of the specified QP.\r
+ * @qp_attr_mask: A bit-mask used to select specific attributes to query.\r
+ * @qp_init_attr: Additional attributes of the selected QP.\r
+ *\r
+ * The qp_attr_mask may be used to limit the query to gathering only the\r
+ * selected attributes.\r
+ */\r
+int ib_query_qp(struct ib_qp *qp,\r
+               struct ib_qp_attr *qp_attr,\r
+               int qp_attr_mask,\r
+               struct ib_qp_init_attr *qp_init_attr);\r
+\r
+/**\r
+ * ib_modify_cq - Modifies moderation params of the CQ\r
+ * @cq: The CQ to modify.\r
+ * @cq_count: number of CQEs that will tirgger an event\r
+ * @cq_period: max period of time beofre triggering an event\r
+ *\r
+ * Users can examine the cq structure to determine the actual CQ size.\r
+ */\r
+int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);\r
+\r
+/**\r
+ * ib_destroy_qp - Destroys the specified QP.\r
+ * @qp: The QP to destroy.\r
+ */\r
+int ib_destroy_qp(struct ib_qp *qp);\r
+\r
+/**\r
+ * ib_post_send - Posts a list of work requests to the send queue of\r
+ *   the specified QP.\r
+ * @qp: The QP to post the work request on.\r
+ * @send_wr: A list of work requests to post on the send queue.\r
+ * @bad_send_wr: On an immediate failure, this parameter will reference\r
+ *   the work request that failed to be posted on the QP.\r
+ */\r
+static inline int ib_post_send(struct ib_qp *qp,\r
+                              ib_send_wr_t *send_wr,\r
+                              ib_send_wr_t **bad_send_wr)\r
+{\r
+       return qp->device->post_send(qp, send_wr, bad_send_wr);\r
+}\r
+\r
+/**\r
+ * ib_post_recv - Posts a list of work requests to the receive queue of\r
+ *   the specified QP.\r
+ * @qp: The QP to post the work request on.\r
+ * @recv_wr: A list of work requests to post on the receive queue.\r
+ * @bad_recv_wr: On an immediate failure, this parameter will reference\r
+ *   the work request that failed to be posted on the QP.\r
+ */\r
+static inline int ib_post_recv(struct ib_qp *qp,\r
+                              ib_recv_wr_t *recv_wr,\r
+                              ib_recv_wr_t **bad_recv_wr)\r
+{\r
+       return qp->device->post_recv(qp, recv_wr, bad_recv_wr);\r
+}\r
+\r
+/**\r
+ * ib_create_cq - Creates a CQ on the specified device.\r
+ * @device: The device on which to create the CQ.\r
+ * @comp_handler: A user-specified callback that is invoked when a\r
+ *   completion event occurs on the CQ.\r
+ * @event_handler: A user-specified callback that is invoked when an\r
+ *   asynchronous event not associated with a completion occurs on the CQ.\r
+ * @cq_context: Context associated with the CQ returned to the user via\r
+ *   the associated completion and event handlers.\r
+ * @cqe: The minimum size of the CQ.\r
+ * @comp_vector - Completion vector used to signal completion events.\r
+ *     Must be >= 0 and < context->num_comp_vectors.\r
+ *\r
+ * Users can examine the cq structure to determine the actual CQ size.\r
+ */\r
+struct ib_cq *ib_create_cq(struct ib_device *device,\r
+                          ib_comp_handler comp_handler,\r
+                          void (*event_handler)(ib_event_rec_t *),\r
+                          void *cq_context, int cqe, int comp_vector);\r
+\r
+/**\r
+ * ib_resize_cq - Modifies the capacity of the CQ.\r
+ * @cq: The CQ to resize.\r
+ * @cqe: The minimum size of the CQ.\r
+ *\r
+ * Users can examine the cq structure to determine the actual CQ size.\r
+ */\r
+int ib_resize_cq(struct ib_cq *cq, int cqe);\r
+\r
+/**\r
+ * ib_destroy_cq - Destroys the specified CQ.\r
+ * @cq: The CQ to destroy.\r
+ */\r
+int ib_destroy_cq(struct ib_cq *cq);\r
+\r
+/**\r
+ * ib_poll_cq - poll a CQ for completion(s)\r
+ * @cq:the CQ being polled\r
+ * @pp_free_wclist:\r
+ *             On input, a list of work completion structures provided by\r
+ *             the client.  These are used to report completed work requests through\r
+ *             the pp_done_wclist.\r
+ *\r
+ *             On output, this contains the list of work completions structures for\r
+ *             which no work completion was found.\r
+ * @pp_done_wclist:A list of work completions retrieved from the completion queue.\r
+ *\r
+ * Poll a CQ for (possibly multiple) completions.  If the return value\r
+ * is < 0, an error occurred.  If the return value is >= 0, it is the\r
+ * number of completions returned.  If the return value is\r
+ * non-negative and < num_entries, then the CQ was emptied.\r
+ */\r
+static inline int ib_poll_cq(struct ib_cq *cq, ib_wc_t** const pp_free_wclist,\r
+                            ib_wc_t** const pp_done_wclist)\r
+{\r
+       return cq->device->poll_cq(cq, pp_free_wclist, pp_done_wclist);\r
+}\r
+\r
+/**\r
+ * ib_peek_cq - Returns the number of unreaped completions currently\r
+ *   on the specified CQ.\r
+ * @cq: The CQ to peek.\r
+ * @wc_cnt: A minimum number of unreaped completions to check for.\r
+ *\r
+ * If the number of unreaped completions is greater than or equal to wc_cnt,\r
+ * this function returns wc_cnt, otherwise, it returns the actual number of\r
+ * unreaped completions.\r
+ */\r
+int ib_peek_cq(struct ib_cq *cq, int wc_cnt);\r
+\r
+/**\r
+ * ib_req_notify_cq - Request completion notification on a CQ.\r
+ * @cq: The CQ to generate an event for.\r
+ * @flags:\r
+ *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP\r
+ *   to request an event on the next solicited event or next work\r
+ *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS\r
+ *   may also be |ed in to request a hint about missed events, as\r
+ *   described below.\r
+ *\r
+ * Return Value:\r
+ *    < 0 means an error occurred while requesting notification\r
+ *   == 0 means notification was requested successfully, and if\r
+ *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events\r
+ *        were missed and it is safe to wait for another event.  In\r
+ *        this case is it guaranteed that any work completions added\r
+ *        to the CQ since the last CQ poll will trigger a completion\r
+ *        notification event.\r
+ *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed\r
+ *        in.  It means that the consumer must poll the CQ again to\r
+ *        make sure it is empty to avoid missing an event because of a\r
+ *        race between requesting notification and an entry being\r
+ *        added to the CQ.  This return value means it is possible\r
+ *        (but not guaranteed) that a work completion has been added\r
+ *        to the CQ since the last poll without triggering a\r
+ *        completion notification event.\r
+ */\r
+static inline int ib_req_notify_cq(struct ib_cq *cq,\r
+                                  enum ib_cq_notify_flags flags)\r
+{\r
+       return cq->device->req_notify_cq(cq, flags);\r
+}\r
+\r
+/**\r
+ * ib_req_ncomp_notif - Request completion notification when there are\r
+ *   at least the specified number of unreaped completions on the CQ.\r
+ * @cq: The CQ to generate an event for.\r
+ * @wc_cnt: The number of unreaped completions that should be on the\r
+ *   CQ before an event is generated.\r
+ */\r
+static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)\r
+{\r
+       return cq->device->req_ncomp_notif ?\r
+               cq->device->req_ncomp_notif(cq, wc_cnt) :\r
+               -ENOSYS;\r
+}\r
+\r
+/**\r
+ * ib_get_dma_mr - Returns a memory region for system memory that is\r
+ *   usable for DMA.\r
+ * @pd: The protection domain associated with the memory region.\r
+ * @mr_access_flags: Specifies the memory access rights.\r
+ *\r
+ * Note that the ib_dma_*() functions defined below must be used\r
+ * to create/destroy addresses used with the Lkey or Rkey returned\r
+ * by ib_get_dma_mr().\r
+ */\r
+struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);\r
+\r
+#if 0\r
+// TODO: do we need that\r
+/**\r
+ * ib_dma_mapping_error - check a DMA addr for error\r
+ * @dev: The device for which the dma_addr was created\r
+ * @dma_addr: The DMA address to check\r
+ */\r
+static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->mapping_error(dev, dma_addr);\r
+       return dma_mapping_error(dma_addr);\r
+}\r
+\r
+/**\r
+ * ib_dma_map_single - Map a kernel virtual address to DMA address\r
+ * @dev: The device for which the dma_addr is to be created\r
+ * @cpu_addr: The kernel virtual address\r
+ * @size: The size of the region in bytes\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline u64 ib_dma_map_single(struct ib_device *dev,\r
+                                   void *cpu_addr, size_t size,\r
+                                   enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->map_single(dev, cpu_addr, size, direction);\r
+       return dma_map_single(dev->dma_device, cpu_addr, size, direction);\r
+}\r
+\r
+/**\r
+ * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()\r
+ * @dev: The device for which the DMA address was created\r
+ * @addr: The DMA address\r
+ * @size: The size of the region in bytes\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline void ib_dma_unmap_single(struct ib_device *dev,\r
+                                      u64 addr, size_t size,\r
+                                      enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->unmap_single(dev, addr, size, direction);\r
+       else\r
+               dma_unmap_single(dev->dma_device, addr, size, direction);\r
+}\r
+\r
+/**\r
+ * ib_dma_map_page - Map a physical page to DMA address\r
+ * @dev: The device for which the dma_addr is to be created\r
+ * @page: The page to be mapped\r
+ * @offset: The offset within the page\r
+ * @size: The size of the region in bytes\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline u64 ib_dma_map_page(struct ib_device *dev,\r
+                                 struct page *page,\r
+                                 unsigned long offset,\r
+                                 size_t size,\r
+                                        enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->map_page(dev, page, offset, size, direction);\r
+       return dma_map_page(dev->dma_device, page, offset, size, direction);\r
+}\r
+\r
+/**\r
+ * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()\r
+ * @dev: The device for which the DMA address was created\r
+ * @addr: The DMA address\r
+ * @size: The size of the region in bytes\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline void ib_dma_unmap_page(struct ib_device *dev,\r
+                                    u64 addr, size_t size,\r
+                                    enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->unmap_page(dev, addr, size, direction);\r
+       else\r
+               dma_unmap_page(dev->dma_device, addr, size, direction);\r
+}\r
+\r
+/**\r
+ * ib_dma_map_sg - Map a scatter/gather list to DMA addresses\r
+ * @dev: The device for which the DMA addresses are to be created\r
+ * @sg: The array of scatter/gather entries\r
+ * @nents: The number of scatter/gather entries\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline int ib_dma_map_sg(struct ib_device *dev,\r
+                               struct scatterlist *sg, int nents,\r
+                               enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->map_sg(dev, sg, nents, direction);\r
+       return dma_map_sg(dev->dma_device, sg, nents, direction);\r
+}\r
+\r
+/**\r
+ * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses\r
+ * @dev: The device for which the DMA addresses were created\r
+ * @sg: The array of scatter/gather entries\r
+ * @nents: The number of scatter/gather entries\r
+ * @direction: The direction of the DMA\r
+ */\r
+static inline void ib_dma_unmap_sg(struct ib_device *dev,\r
+                                  struct scatterlist *sg, int nents,\r
+                                  enum dma_data_direction direction)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->unmap_sg(dev, sg, nents, direction);\r
+       else\r
+               dma_unmap_sg(dev->dma_device, sg, nents, direction);\r
+}\r
+\r
+/**\r
+ * ib_sg_dma_address - Return the DMA address from a scatter/gather entry\r
+ * @dev: The device for which the DMA addresses were created\r
+ * @sg: The scatter/gather entry\r
+ */\r
+static inline u64 ib_sg_dma_address(struct ib_device *dev,\r
+                                   struct scatterlist *sg)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->dma_address(dev, sg);\r
+       return sg_dma_address(sg);\r
+}\r
+\r
+/**\r
+ * ib_sg_dma_len - Return the DMA length from a scatter/gather entry\r
+ * @dev: The device for which the DMA addresses were created\r
+ * @sg: The scatter/gather entry\r
+ */\r
+static inline unsigned int ib_sg_dma_len(struct ib_device *dev,\r
+                                        struct scatterlist *sg)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->dma_len(dev, sg);\r
+       return sg_dma_len(sg);\r
+}\r
+\r
+/**\r
+ * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU\r
+ * @dev: The device for which the DMA address was created\r
+ * @addr: The DMA address\r
+ * @size: The size of the region in bytes\r
+ * @dir: The direction of the DMA\r
+ */\r
+static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,\r
+                                             u64 addr,\r
+                                             size_t size,\r
+                                             enum dma_data_direction dir)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);\r
+       else\r
+               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);\r
+}\r
+\r
+/**\r
+ * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device\r
+ * @dev: The device for which the DMA address was created\r
+ * @addr: The DMA address\r
+ * @size: The size of the region in bytes\r
+ * @dir: The direction of the DMA\r
+ */\r
+static inline void ib_dma_sync_single_for_device(struct ib_device *dev,\r
+                                                u64 addr,\r
+                                                size_t size,\r
+                                                enum dma_data_direction dir)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->sync_single_for_device(dev, addr, size, dir);\r
+       else\r
+               dma_sync_single_for_device(dev->dma_device, addr, size, dir);\r
+}\r
+\r
+/**\r
+ * ib_dma_alloc_coherent - Allocate memory and map it for DMA\r
+ * @dev: The device for which the DMA address is requested\r
+ * @size: The size of the region to allocate in bytes\r
+ * @dma_handle: A pointer for returning the DMA address of the region\r
+ * @flag: memory allocator flags\r
+ */\r
+static inline void *ib_dma_alloc_coherent(struct ib_device *dev,\r
+                                          size_t size,\r
+                                          u64 *dma_handle,\r
+                                          gfp_t flag)\r
+{\r
+       if (dev->dma_ops)\r
+               return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);\r
+       else {\r
+               dma_addr_t handle;\r
+               void *ret;\r
+\r
+               ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);\r
+               *dma_handle = handle;\r
+               return ret;\r
+       }\r
+}\r
+\r
+/**\r
+ * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()\r
+ * @dev: The device for which the DMA addresses were allocated\r
+ * @size: The size of the region\r
+ * @cpu_addr: the address returned by ib_dma_alloc_coherent()\r
+ * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()\r
+ */\r
+static inline void ib_dma_free_coherent(struct ib_device *dev,\r
+                                       size_t size, void *cpu_addr,\r
+                                       u64 dma_handle)\r
+{\r
+       if (dev->dma_ops)\r
+               dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);\r
+       else\r
+               dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);\r
+}\r
+\r
+#endif\r
+\r
+/**\r
+ * ib_reg_phys_mr - Prepares a virtually addressed memory region for use\r
+ *   by an HCA.\r
+ * @pd: The protection domain associated assigned to the registered region.\r
+ * @phys_buf_array: Specifies a list of physical buffers to use in the\r
+ *   memory region.\r
+ * @num_phys_buf: Specifies the size of the phys_buf_array.\r
+ * @mr_access_flags: Specifies the memory access rights.\r
+ * @iova_start: The offset of the region's starting I/O virtual address.\r
+ */\r
+struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,\r
+                            struct ib_phys_buf *phys_buf_array,\r
+                            int num_phys_buf,\r
+                            int mr_access_flags,\r
+                            u64 *iova_start);\r
+\r
+/**\r
+ * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.\r
+ *   Conceptually, this call performs the functions deregister memory region\r
+ *   followed by register physical memory region.  Where possible,\r
+ *   resources are reused instead of deallocated and reallocated.\r
+ * @mr: The memory region to modify.\r
+ * @mr_rereg_mask: A bit-mask used to indicate which of the following\r
+ *   properties of the memory region are being modified.\r
+ * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies\r
+ *   the new protection domain to associated with the memory region,\r
+ *   otherwise, this parameter is ignored.\r
+ * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this\r
+ *   field specifies a list of physical buffers to use in the new\r
+ *   translation, otherwise, this parameter is ignored.\r
+ * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this\r
+ *   field specifies the size of the phys_buf_array, otherwise, this\r
+ *   parameter is ignored.\r
+ * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this\r
+ *   field specifies the new memory access rights, otherwise, this\r
+ *   parameter is ignored.\r
+ * @iova_start: The offset of the region's starting I/O virtual address.\r
+ */\r
+int ib_rereg_phys_mr(struct ib_mr *mr,\r
+                    int mr_rereg_mask,\r
+                    struct ib_pd *pd,\r
+                    struct ib_phys_buf *phys_buf_array,\r
+                    int num_phys_buf,\r
+                    int mr_access_flags,\r
+                    u64 *iova_start);\r
+\r
+/**\r
+ * ib_query_mr - Retrieves information about a specific memory region.\r
+ * @mr: The memory region to retrieve information about.\r
+ * @mr_attr: The attributes of the specified memory region.\r
+ */\r
+int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);\r
+\r
+/**\r
+ * ib_dereg_mr - Deregisters a memory region and removes it from the\r
+ *   HCA translation table.\r
+ * @mr: The memory region to deregister.\r
+ */\r
+int ib_dereg_mr(struct ib_mr *mr);\r
+\r
+/**\r
+ * ib_alloc_mw - Allocates a memory window.\r
+ * @pd: The protection domain associated with the memory window.\r
+ */\r
+struct ib_mw *ib_alloc_mw(struct ib_pd *pd);\r
+\r
+/**\r
+ * ib_bind_mw - Posts a work request to the send queue of the specified\r
+ *   QP, which binds the memory window to the given address range and\r
+ *   remote access attributes.\r
+ * @qp: QP to post the bind work request on.\r
+ * @mw: The memory window to bind.\r
+ * @mw_bind: Specifies information about the memory window, including\r
+ *   its address range, remote access rights, and associated memory region.\r
+ */\r
+static inline int ib_bind_mw(struct ib_qp *qp,\r
+                            struct ib_mw *mw,\r
+                            struct ib_mw_bind *mw_bind)\r
+{\r
+       /* XXX reference counting in corresponding MR? */\r
+       return mw->device->bind_mw ?\r
+               mw->device->bind_mw(qp, mw, mw_bind) :\r
+               -ENOSYS;\r
+}\r
+\r
+/**\r
+ * ib_dealloc_mw - Deallocates a memory window.\r
+ * @mw: The memory window to deallocate.\r
+ */\r
+int ib_dealloc_mw(struct ib_mw *mw);\r
+\r
+/**\r
+ * ib_alloc_fmr - Allocates a unmapped fast memory region.\r
+ * @pd: The protection domain associated with the unmapped region.\r
+ * @mr_access_flags: Specifies the memory access rights.\r
+ * @fmr_attr: Attributes of the unmapped region.\r
+ *\r
+ * A fast memory region must be mapped before it can be used as part of\r
+ * a work request.\r
+ */\r
+struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,\r
+                           int mr_access_flags,\r
+                           struct ib_fmr_attr *fmr_attr);\r
+\r
+/**\r
+ * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.\r
+ * @fmr: The fast memory region to associate with the pages.\r
+ * @page_list: An array of physical pages to map to the fast memory region.\r
+ * @list_len: The number of pages in page_list.\r
+ * @iova: The I/O virtual address to use with the mapped region.\r
+ */\r
+static inline int ib_map_phys_fmr(struct ib_fmr *fmr,\r
+                                 u64 *page_list, int list_len,\r
+                                 u64 iova)\r
+{\r
+       return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);\r
+}\r
+\r
+/**\r
+ * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.\r
+ * @fmr_list: A linked list of fast memory regions to unmap.\r
+ */\r
+int ib_unmap_fmr(struct list_head *fmr_list);\r
+\r
+/**\r
+ * ib_dealloc_fmr - Deallocates a fast memory region.\r
+ * @fmr: The fast memory region to deallocate.\r
+ */\r
+int ib_dealloc_fmr(struct ib_fmr *fmr);\r
+\r
+/**\r
+ * ib_attach_mcast - Attaches the specified QP to a multicast group.\r
+ * @qp: QP to attach to the multicast group.  The QP must be type\r
+ *   IB_QPT_UD.\r
+ * @gid: Multicast group GID.\r
+ * @lid: Multicast group LID in host byte order.\r
+ *\r
+ * In order to send and receive multicast packets, subnet\r
+ * administration must have created the multicast group and configured\r
+ * the fabric appropriately.  The port associated with the specified\r
+ * QP must also be a member of the multicast group.\r
+ */\r
+int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);\r
+\r
+/**\r
+ * ib_detach_mcast - Detaches the specified QP from a multicast group.\r
+ * @qp: QP to detach from the multicast group.\r
+ * @gid: Multicast group GID.\r
+ * @lid: Multicast group LID in host byte order.\r
+ */\r
+int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);\r
+\r
+#endif /* IB_VERBS_H */\r
index 4184d7f139fcbc3a456487121f88a66af784acbe..49692cc402e795a724a3b85d0f664abe1baf58c5 100644 (file)
@@ -73,24 +73,6 @@ struct ib_ucontext_ex
        int                             fw_if_open;
 };
 
-/* extension for ib_cq */
-struct ib_cq_ex 
-{
-       void *                          ctx;            /* IBAL CQ context */
-};
-
-/* extension for ib_qp */
-struct ib_qp_ex 
-{
-       void *                          ctx;            /* IBAL QP context */
-};
-
-/* extension for ib_srq */
-struct ib_srq_ex 
-{
-       void *                          ctx;            /* IBAL SRQ context */
-};
-
 /* extension for ib_event */
 struct ib_event_ex 
 {
index 575cdb71f9b193b181465f022be31be5e0ed0ee1..96817641542f2d46d262bc32cb3fe7f1f81e8640 100644 (file)
@@ -2,6 +2,14 @@ TARGETNAME=mlx4_hca
 TARGETPATH=..\..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)\r
 TARGETTYPE=DRIVER\r
 \r
+!if $(_NT_TOOLS_VERSION) != 0x700\r
+# WDK build only - transform .inx --> .inf adding date & version stamp.\r
+# see .\makefile.inc\r
+INF_NAME=$(TARGETNAME)\r
+INF_TARGET==..\..\..\..\bin\kernel\$(O)\$(INF_NAME).inf\r
+NTTARGETFILES=$(INF_TARGET)\r
+!endif\r
+\r
 !if $(FREEBUILD)\r
 ENABLE_EVENT_TRACING=1\r
 !else\r
index 4e3c630e8c7aeee87a36f6c26703b15f46621aca..dbd8cfa45b48fc9ace5b6ecece016069b8c7c69e 100644 (file)
-/*
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "precomp.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "cq.tmh"
-#endif
-
-ib_api_status_t
-mlnx_create_cq (
-       IN              const   ib_ca_handle_t                          h_ca,
-       IN              const   void                                            *cq_context,
-       IN                              ci_async_event_cb_t                     event_handler,
-       IN                              ci_completion_cb_t                      cq_comp_handler,
-       IN      OUT                     uint32_t                                        *p_size,
-               OUT                     ib_cq_handle_t                          *ph_cq,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       int err;
-       ib_api_status_t         status;
-       struct ib_cq *p_ib_cq;
-       mlnx_hca_t *p_hca;
-       struct ib_device *p_ibdev;
-       struct ib_ucontext *p_uctx;
-
-       HCA_ENTER(HCA_DBG_CQ);
-
-       if( p_umv_buf ) {
-
-               p_uctx = (struct ib_ucontext *)h_ca;
-               p_ibdev = p_uctx->device;
-               p_hca = ibdev2hca(p_ibdev);
-
-               if( p_umv_buf  && p_umv_buf->command) {
-                       // sanity checks 
-                       if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||
-                               p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||
-                               !p_umv_buf->p_inout_buf) {
-                               status = IB_INVALID_PARAMETER;
-                               goto err_inval_params;
-                       }
-               }
-       }
-       else {
-               p_uctx = NULL;
-               p_hca = (mlnx_hca_t *)h_ca;
-               p_ibdev = hca2ibdev(p_hca);
-       }
-
-       /* sanity check */
-       if (!*p_size || *p_size > (uint32_t)hca2mdev(p_hca)->caps.max_cqes) {
-               status = IB_INVALID_CQ_SIZE;
-               goto err_cqe;
-       }
-
-       // allocate cq  
-       p_ib_cq = ibv_create_cq(p_ibdev, 
-               cq_comp_handler, event_handler,
-               p_hca, *p_size, p_uctx, p_umv_buf );
-       if (IS_ERR(p_ib_cq)) {
-               err = PTR_ERR(p_ib_cq);
-               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));
-               status = errno_to_iberr(err);
-               goto err_create_cq;
-       }
-
-       // fill the object
-       p_ib_cq->x.ctx = (void*)cq_context;
-       
-       // return the result
-       *p_size = p_ib_cq->cqe;
-
-       if (ph_cq) *ph_cq = (ib_cq_handle_t)p_ib_cq;
-
-       status = IB_SUCCESS;
-       
-err_create_cq:
-err_inval_params:
-err_cqe:
-       if (p_umv_buf && p_umv_buf->command) 
-               p_umv_buf->status = status;
-       if (status != IB_SUCCESS)
-       {
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,
-                       ("completes with ERROR status %x\n", status));
-       }
-       HCA_EXIT(HCA_DBG_CQ);
-       return status;
-}
-
-ib_api_status_t
-mlnx_resize_cq (
-       IN              const   ib_cq_handle_t                          h_cq,
-       IN      OUT                     uint32_t                                        *p_size,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       int err;
-       ib_api_status_t status = IB_SUCCESS;
-       struct ib_cq *p_ib_cq = (struct ib_cq *)h_cq;
-       struct ib_device *p_ibdev = p_ib_cq->device;
-
-       UNUSED_PARAM(p_umv_buf);
-       
-       HCA_ENTER(HCA_DBG_CQ);
-
-       if (p_ibdev->resize_cq) {
-               err = p_ibdev->resize_cq(p_ib_cq, *p_size, NULL);
-               if (err) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,
-                               ("ib_resize_cq failed (%d)\n", err));
-                       status = errno_to_iberr(err);
-               }
-       }
-       else
-               status = IB_UNSUPPORTED;
-       
-       HCA_EXIT(HCA_DBG_CQ);
-       return status;
-}
-
-ib_api_status_t
-mlnx_query_cq (
-       IN              const   ib_cq_handle_t                          h_cq,
-               OUT                     uint32_t                                        *p_size,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       UNREFERENCED_PARAMETER(h_cq);
-       UNREFERENCED_PARAMETER(p_size);
-       if (p_umv_buf && p_umv_buf->command) {
-               p_umv_buf->status = IB_UNSUPPORTED;
-       }
-       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,("mlnx_query_cq not supported\n"));
-       return IB_UNSUPPORTED;
-}
-
-ib_api_status_t
-mlnx_destroy_cq (
-       IN              const   ib_cq_handle_t                          h_cq)
-{
-                                                                                                                                                               
-       ib_api_status_t         status;
-       int err;
-       struct ib_cq *p_ib_cq = (struct ib_cq *)h_cq;
-
-       HCA_ENTER( HCA_DBG_QP);
-
-       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,
-               ("cqn %#x, pcs %p\n", ((struct mlx4_ib_cq*)p_ib_cq)->mcq.cqn, PsGetCurrentProcess()) );
-
-       // destroy CQ
-       err = ib_destroy_cq( p_ib_cq );
-       if (err) {
-               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
-                       ("ibv_destroy_cq failed (%d)\n", err));
-               status = errno_to_iberr(err);
-               goto err_destroy_cq;
-       }
-
-       status = IB_SUCCESS;
-
-err_destroy_cq:
-       if (status != IB_SUCCESS)
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,
-                       ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_CQ);
-       return status;
-}
-
-
-       
-
-void
-mlnx_cq_if(
-       IN      OUT                     ci_interface_t                          *p_interface )
-{
-       p_interface->create_cq = mlnx_create_cq;
-       p_interface->resize_cq = mlnx_resize_cq;
-       p_interface->query_cq = mlnx_query_cq;
-       p_interface->destroy_cq = mlnx_destroy_cq;
-}
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "cq.tmh"\r
+#endif\r
+\r
+ib_api_status_t\r
+mlnx_create_cq (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   void                                            *cq_context,\r
+       IN                              ci_async_event_cb_t                     event_handler,\r
+       IN                              ci_completion_cb_t                      cq_comp_handler,\r
+       IN      OUT                     uint32_t                                        *p_size,\r
+               OUT                     ib_cq_handle_t                          *ph_cq,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_cq *p_ib_cq;\r
+       mlnx_hca_t *p_hca;\r
+       struct ib_device *p_ibdev;\r
+       struct ib_ucontext *p_uctx;\r
+\r
+       HCA_ENTER(HCA_DBG_CQ);\r
+\r
+       if( p_umv_buf ) {\r
+\r
+               p_uctx = (struct ib_ucontext *)h_ca;\r
+               p_ibdev = p_uctx->device;\r
+               p_hca = ibdev2hca(p_ibdev);\r
+\r
+               if( p_umv_buf  && p_umv_buf->command) {\r
+                       // sanity checks \r
+                       if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
+                               p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
+                               !p_umv_buf->p_inout_buf) {\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto err_inval_params;\r
+                       }\r
+               }\r
+       }\r
+       else {\r
+               p_uctx = NULL;\r
+               p_hca = (mlnx_hca_t *)h_ca;\r
+               p_ibdev = hca2ibdev(p_hca);\r
+       }\r
+\r
+       /* sanity check */\r
+       if (!*p_size || *p_size > (uint32_t)hca2mdev(p_hca)->caps.max_cqes) {\r
+               status = IB_INVALID_CQ_SIZE;\r
+               goto err_cqe;\r
+       }\r
+\r
+       // allocate cq  \r
+       p_ib_cq = ibv_create_cq(p_ibdev, \r
+               cq_comp_handler, event_handler,\r
+               (void*)cq_context, *p_size, p_uctx, p_umv_buf );\r
+       if (IS_ERR(p_ib_cq)) {\r
+               err = PTR_ERR(p_ib_cq);\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_create_cq;\r
+       }\r
+\r
+       // return the result\r
+       *p_size = p_ib_cq->cqe;\r
+\r
+       if (ph_cq) *ph_cq = (ib_cq_handle_t)p_ib_cq;\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_create_cq:\r
+err_inval_params:\r
+err_cqe:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       if (status != IB_SUCCESS)\r
+       {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       }\r
+       HCA_EXIT(HCA_DBG_CQ);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_resize_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+       IN      OUT                     uint32_t                                        *p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       struct ib_cq *p_ib_cq = (struct ib_cq *)h_cq;\r
+       struct ib_device *p_ibdev = p_ib_cq->device;\r
+\r
+       UNUSED_PARAM(p_umv_buf);\r
+       \r
+       HCA_ENTER(HCA_DBG_CQ);\r
+\r
+       if (p_ibdev->resize_cq) {\r
+               err = p_ibdev->resize_cq(p_ib_cq, *p_size, NULL);\r
+               if (err) {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
+                               ("ib_resize_cq failed (%d)\n", err));\r
+                       status = errno_to_iberr(err);\r
+               }\r
+       }\r
+       else\r
+               status = IB_UNSUPPORTED;\r
+       \r
+       HCA_EXIT(HCA_DBG_CQ);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq,\r
+               OUT                     uint32_t                                        *p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       UNREFERENCED_PARAMETER(h_cq);\r
+       UNREFERENCED_PARAMETER(p_size);\r
+       if (p_umv_buf && p_umv_buf->command) {\r
+               p_umv_buf->status = IB_UNSUPPORTED;\r
+       }\r
+       HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,("mlnx_query_cq not supported\n"));\r
+       return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_cq (\r
+       IN              const   ib_cq_handle_t                          h_cq)\r
+{\r
+                                                                                                                                                               \r
+       ib_api_status_t         status;\r
+       int err;\r
+       struct ib_cq *p_ib_cq = (struct ib_cq *)h_cq;\r
+\r
+       HCA_ENTER( HCA_DBG_QP);\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,\r
+               ("cqn %#x, pcs %p\n", ((struct mlx4_ib_cq*)p_ib_cq)->mcq.cqn, PsGetCurrentProcess()) );\r
+\r
+       // destroy CQ\r
+       err = ib_destroy_cq( p_ib_cq );\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
+                       ("ibv_destroy_cq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_destroy_cq;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_destroy_cq:\r
+       if (status != IB_SUCCESS)\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_CQ);\r
+       return status;\r
+}\r
+\r
+\r
+       \r
+\r
+void\r
+mlnx_cq_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->create_cq = mlnx_create_cq;\r
+       p_interface->resize_cq = mlnx_resize_cq;\r
+       p_interface->query_cq = mlnx_query_cq;\r
+       p_interface->destroy_cq = mlnx_destroy_cq;\r
+}\r
+\r
index b2b1f9b5c4999bf0109249b93b00dcdaf66b6503..b926e9590bbc5961fcf06c95bdad96c9afe78269 100644 (file)
@@ -5,6 +5,20 @@ mlx4_hca.bmf: hca.mof
         wmimofck $(OBJ_PATH)\$O\mlx4_hca.bmf\r
 \r
 \r
+# Transform .inx file to .inf file adding date + major,min & svn.version stamp\r
+# Output .inf file is copied to the $(INF_TARGET) folder (commonly where .sys file resides).\r
 \r
+_LNG=$(LANGUAGE)\r
 \r
+!IF !DEFINED(_INX)\r
+_INX=.\r
+!ENDIF\r
+\r
+STAMP=stampinf -a $(_BUILDARCH)\r
+\r
+!INCLUDE mod_ver.def\r
+\r
+$(INF_TARGET) : $(_INX)\$(INF_NAME).inx\r
+    copy $(_INX)\$(@B).inx $@\r
+    $(STAMP) -f $@ -d * -v $(IB_MAJORVERSION).$(IB_MINORVERSION).$(IB_BUILDVERSION).$(OPENIB_REV)\r
 \r
diff --git a/branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inf b/branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inf
deleted file mode 100644 (file)
index 9cd1530..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-; Mellanox Technologies InfiniBand HCAs.\r
-; Copyright 2008 Mellanox Technologies all Rights Reserved.\r
-\r
-[Version]\r
-Signature="$WINDOWS NT$"\r
-Class=Mlx4Hca\r
-ClassGUID={31B0B28A-26FF-4dca-A6FA-E767C7DFBA20}\r
-Provider=%MTL%\r
-; must be synchronized with hca\drv.c\r
-DriverVer=02/01/2008,1.0.0.0\r
-CatalogFile=mlx4_hca.cat\r
-\r
-\r
-;*****************************************\r
-; Destination directory section\r
-;*****************************************\r
-\r
-[DestinationDirs]\r
-DefaultDestDir = %DIRID_DRIVERS%\r
-ClassCopyFiles = %DIRID_SYSTEM%\r
-MLX4HCA.UMCopyFiles = %DIRID_SYSTEM%\r
-MLX4HCA.WOW64CopyFiles = %DIRID_SYSTEM_X86%\r
-;Wdf_CoInstaller_CopyFiles = %DIRID_SYSTEM%\r
-\r
-;*****************************************\r
-; Class Install section\r
-;*****************************************\r
-\r
-[ClassInstall32]\r
-CopyFiles=ClassCopyFiles\r
-AddReg=ClassAddReg\r
-\r
-[ClassCopyFiles]\r
-IbInstaller.dll\r
-\r
-[ClassAddReg]\r
-HKR,,,,"Mellanox ConnectX Virtual InfiniBand Adapters"\r
-HKR,,Icon,,-5\r
-HKR,,SilentInstall,,1\r
-HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \\r
- %Mlx4HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller"\r
-\r
-;*****************************************\r
-; Device Install section\r
-;*****************************************\r
-\r
-[SourceDisksNames.x86]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.amd64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.ia64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksFiles.x86]\r
-IbInstaller.dll = 1,,\r
-mlx4_hca.sys = 1,,\r
-mlx4u.dll = 1,,\r
-mlx4ud.dll = 1,,\r
-;wdfcoinstaller01005.dll = 1,,\r
-\r
-[SourceDisksFiles.amd64]\r
-IbInstaller.dll = 1,,\r
-mlx4_hca.sys = 1,,\r
-mlx4u.dll = 1,,\r
-mlx4ud.dll = 1,,\r
-mlx4u32.dll = 1,,\r
-mlx4u32d.dll = 1,,\r
-;wdfcoinstaller01005.dll = 1,,\r
-\r
-[SourceDisksFiles.ia64]\r
-IbInstaller.dll = 1,,\r
-mlx4_hca.sys = 1,,\r
-mlx4u.dll = 1,,\r
-mlx4ud.dll = 1,,\r
-mlx4u32.dll = 1,,\r
-mlx4u32d.dll = 1,,\r
-;wdfcoinstaller01005.dll = 1,,\r
-\r
-;*****************************************\r
-; Mlx4Bus  Install Section\r
-;*****************************************\r
-\r
-[Manufacturer]\r
-%MTL% = MLX4HCA.DeviceSection,ntx86,ntamd64,ntia64\r
-\r
-[MLX4HCA.DeviceSection]\r
-; empty since we don't support W9x/Me\r
-\r
-[MLX4HCA.DeviceSection.ntx86]\r
-%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
-\r
-[MLX4HCA.DeviceSection.ntamd64]\r
-%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
-\r
-[MLX4HCA.DeviceSection.ntia64]\r
-%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
-\r
-[MLX4HCA.DDInstall.ntx86]\r
-CopyFiles = MLX4HCA.CopyFiles\r
-CopyFiles = MLX4HCA.UMCopyFiles\r
-CopyINF=ib_bus.inf\r
-\r
-[MLX4HCA.DDInstall.ntamd64]\r
-CopyFiles = MLX4HCA.CopyFiles\r
-CopyFiles = MLX4HCA.UMCopyFiles\r
-CopyFiles = MLX4HCA.WOW64CopyFiles\r
-CopyINF=ib_bus.inf\r
-\r
-[MLX4HCA.DDInstall.ntia64]\r
-CopyFiles = MLX4HCA.CopyFiles\r
-CopyFiles = MLX4HCA.UMCopyFiles\r
-CopyFiles = MLX4HCA.WOW64CopyFiles\r
-CopyINF=ib_bus.inf\r
-\r
-[MLX4HCA.DDInstall.ntx86.Services]\r
-AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
-\r
-[MLX4HCA.DDInstall.ntamd64.Services]\r
-AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
-\r
-[MLX4HCA.DDInstall.ntia64.Services]\r
-AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
-\r
-[MLX4HCA.CopyFiles]\r
-mlx4_hca.sys\r
-\r
-[MLX4HCA.UMCopyFiles]\r
-mlx4u.dll,,,2\r
-mlx4ud.dll,,,2\r
-\r
-[MLX4HCA.WOW64CopyFiles]\r
-mlx4u.dll,mlx4u32.dll,,2\r
-mlx4ud.dll,mlx4u32d.dll,,2\r
-\r
-\r
-;*****************************************\r
-; Service Install section\r
-;*****************************************\r
-\r
-[MLX4HCA.ServiceInstall]\r
-DisplayName     = %MLX4HCA.ServiceDesc%\r
-ServiceType     = %SERVICE_KERNEL_DRIVER%\r
-StartType       = %SERVICE_DEMAND_START%\r
-ErrorControl    = %SERVICE_ERROR_NORMAL%\r
-ServiceBinary   = %12%\mlx4_hca.sys\r
-LoadOrderGroup  = extended base\r
-AddReg          = MLX4HCA.ParamsReg\r
-\r
-[MLX4HCA.EventLog]\r
-AddReg = MLX4HCA.AddEventLogReg\r
-\r
-[MLX4HCA.AddEventLogReg]\r
-HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mlx4_hca.sys"\r
-HKR, , TypesSupported,   0x00010001, 7\r
-\r
-[MLX4HCA.ParamsReg]\r
-HKR,,DeviceCharacteristics,0x10001,0x0100         ; Use same security checks on relative opens\r
-HKR,,Security,,"D:P(A;;GA;;;BA)(A;;GA;;;SY)"      ; Allow generic-all access to Built-in administrators and Local system \r
-HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003\r
-HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff\r
-HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\F8C96A49-AE22-41e9-8025-D7E416884D89","Flags",%REG_DWORD%,0xffff\r
-HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\F8C96A49-AE22-41e9-8025-D7E416884D89","Level",%REG_DWORD%,0x3\r
-\r
-;\r
-; The below section is temporarily disabled.\r
-; It should be uncommented after returning MLX4_HCA to WDF model.\r
-;\r
-\r
-;*****************************************\r
-; WDF Coinstaller installation section\r
-;*****************************************\r
-\r
-;[MLX4HCA.DDInstall.ntx86.CoInstallers]\r
-;AddReg=Wdf_CoInstaller_AddReg\r
-;CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-;[MLX4HCA.DDInstall.ntamd64.CoInstallers]\r
-;AddReg=Wdf_CoInstaller_AddReg\r
-;CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-;[MLX4HCA.DDInstall.ntia64.CoInstallers]\r
-;AddReg=Wdf_CoInstaller_AddReg\r
-;CopyFiles=Wdf_CoInstaller_CopyFiles\r
-\r
-;[Wdf_CoInstaller_AddReg]\r
-;HKR,,CoInstallers32,0x00010000, "wdfcoinstaller01005.dll,WdfCoInstaller"\r
-\r
-;[Wdf_CoInstaller_CopyFiles]\r
-;wdfcoinstaller01005.dll\r
-\r
-;[MLX4HCA.DDInstall.ntx86.Wdf]\r
-;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
-\r
-;[MLX4HCA.DDInstall.ntamd64.Wdf]\r
-;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
-\r
-;[MLX4HCA.DDInstall.ntia64.Wdf]\r
-;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
-\r
-;[mlx4_hca_wdfsect]\r
-;KmdfLibraryVersion = 1.5\r
-\r
-\r
-;*****************************************\r
-; Strings\r
-;*****************************************\r
-\r
-[Strings]\r
-Mlx4HcaClassGuid = "{31B0B28A-26FF-4dca-A6FA-E767C7DFBA20}"\r
-MTL="Mellanox Technologies Ltd."\r
-MLX4HCA.ServiceDesc = "Mellanox ConnectX Virtual Infiband Driver"\r
-Mlx4_Hca.DeviceDesc="Mellanox ConnectX Virtual Channel Adapter"\r
-DiskId = "Mellanox Mlx4 Bus installation disk"\r
-SPSVCINST_NULL = 0x0\r
-SPSVCINST_ASSOCSERVICE = 0x00000002\r
-SERVICE_KERNEL_DRIVER  = 1\r
-SERVICE_DEMAND_START   = 3\r
-SERVICE_ERROR_NORMAL   = 1\r
-REG_DWORD              = 0x00010001\r
-REG_MULTI_SZ_APPEND    = 0x00010008\r
-DIRID_SYSTEM           = 11\r
-DIRID_DRIVERS          = 12\r
-DIRID_SYSTEM_X86       = 16425\r
diff --git a/branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inx b/branches/winverbs/hw/mlx4/kernel/hca/mlx4_hca.inx
new file mode 100644 (file)
index 0000000..9cd1530
--- /dev/null
@@ -0,0 +1,225 @@
+; Mellanox Technologies InfiniBand HCAs.\r
+; Copyright 2008 Mellanox Technologies all Rights Reserved.\r
+\r
+[Version]\r
+Signature="$WINDOWS NT$"\r
+Class=Mlx4Hca\r
+ClassGUID={31B0B28A-26FF-4dca-A6FA-E767C7DFBA20}\r
+Provider=%MTL%\r
+; must be synchronized with hca\drv.c\r
+DriverVer=02/01/2008,1.0.0.0\r
+CatalogFile=mlx4_hca.cat\r
+\r
+\r
+;*****************************************\r
+; Destination directory section\r
+;*****************************************\r
+\r
+[DestinationDirs]\r
+DefaultDestDir = %DIRID_DRIVERS%\r
+ClassCopyFiles = %DIRID_SYSTEM%\r
+MLX4HCA.UMCopyFiles = %DIRID_SYSTEM%\r
+MLX4HCA.WOW64CopyFiles = %DIRID_SYSTEM_X86%\r
+;Wdf_CoInstaller_CopyFiles = %DIRID_SYSTEM%\r
+\r
+;*****************************************\r
+; Class Install section\r
+;*****************************************\r
+\r
+[ClassInstall32]\r
+CopyFiles=ClassCopyFiles\r
+AddReg=ClassAddReg\r
+\r
+[ClassCopyFiles]\r
+IbInstaller.dll\r
+\r
+[ClassAddReg]\r
+HKR,,,,"Mellanox ConnectX Virtual InfiniBand Adapters"\r
+HKR,,Icon,,-5\r
+HKR,,SilentInstall,,1\r
+HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \\r
+ %Mlx4HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller"\r
+\r
+;*****************************************\r
+; Device Install section\r
+;*****************************************\r
+\r
+[SourceDisksNames.x86]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksNames.amd64]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksNames.ia64]\r
+1=%DiskId%,,,""\r
+\r
+[SourceDisksFiles.x86]\r
+IbInstaller.dll = 1,,\r
+mlx4_hca.sys = 1,,\r
+mlx4u.dll = 1,,\r
+mlx4ud.dll = 1,,\r
+;wdfcoinstaller01005.dll = 1,,\r
+\r
+[SourceDisksFiles.amd64]\r
+IbInstaller.dll = 1,,\r
+mlx4_hca.sys = 1,,\r
+mlx4u.dll = 1,,\r
+mlx4ud.dll = 1,,\r
+mlx4u32.dll = 1,,\r
+mlx4u32d.dll = 1,,\r
+;wdfcoinstaller01005.dll = 1,,\r
+\r
+[SourceDisksFiles.ia64]\r
+IbInstaller.dll = 1,,\r
+mlx4_hca.sys = 1,,\r
+mlx4u.dll = 1,,\r
+mlx4ud.dll = 1,,\r
+mlx4u32.dll = 1,,\r
+mlx4u32d.dll = 1,,\r
+;wdfcoinstaller01005.dll = 1,,\r
+\r
+;*****************************************\r
+; Mlx4Bus  Install Section\r
+;*****************************************\r
+\r
+[Manufacturer]\r
+%MTL% = MLX4HCA.DeviceSection,ntx86,ntamd64,ntia64\r
+\r
+[MLX4HCA.DeviceSection]\r
+; empty since we don't support W9x/Me\r
+\r
+[MLX4HCA.DeviceSection.ntx86]\r
+%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
+\r
+[MLX4HCA.DeviceSection.ntamd64]\r
+%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
+\r
+[MLX4HCA.DeviceSection.ntia64]\r
+%Mlx4_Hca.DeviceDesc%=MLX4HCA.DDInstall, MLX4\ConnectX_Hca\r
+\r
+[MLX4HCA.DDInstall.ntx86]\r
+CopyFiles = MLX4HCA.CopyFiles\r
+CopyFiles = MLX4HCA.UMCopyFiles\r
+CopyINF=ib_bus.inf\r
+\r
+[MLX4HCA.DDInstall.ntamd64]\r
+CopyFiles = MLX4HCA.CopyFiles\r
+CopyFiles = MLX4HCA.UMCopyFiles\r
+CopyFiles = MLX4HCA.WOW64CopyFiles\r
+CopyINF=ib_bus.inf\r
+\r
+[MLX4HCA.DDInstall.ntia64]\r
+CopyFiles = MLX4HCA.CopyFiles\r
+CopyFiles = MLX4HCA.UMCopyFiles\r
+CopyFiles = MLX4HCA.WOW64CopyFiles\r
+CopyINF=ib_bus.inf\r
+\r
+[MLX4HCA.DDInstall.ntx86.Services]\r
+AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
+\r
+[MLX4HCA.DDInstall.ntamd64.Services]\r
+AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
+\r
+[MLX4HCA.DDInstall.ntia64.Services]\r
+AddService = mlx4_hca,%SPSVCINST_ASSOCSERVICE%,MLX4HCA.ServiceInstall,MLX4HCA.EventLog\r
+\r
+[MLX4HCA.CopyFiles]\r
+mlx4_hca.sys\r
+\r
+[MLX4HCA.UMCopyFiles]\r
+mlx4u.dll,,,2\r
+mlx4ud.dll,,,2\r
+\r
+[MLX4HCA.WOW64CopyFiles]\r
+mlx4u.dll,mlx4u32.dll,,2\r
+mlx4ud.dll,mlx4u32d.dll,,2\r
+\r
+\r
+;*****************************************\r
+; Service Install section\r
+;*****************************************\r
+\r
+[MLX4HCA.ServiceInstall]\r
+DisplayName     = %MLX4HCA.ServiceDesc%\r
+ServiceType     = %SERVICE_KERNEL_DRIVER%\r
+StartType       = %SERVICE_DEMAND_START%\r
+ErrorControl    = %SERVICE_ERROR_NORMAL%\r
+ServiceBinary   = %12%\mlx4_hca.sys\r
+LoadOrderGroup  = extended base\r
+AddReg          = MLX4HCA.ParamsReg\r
+\r
+[MLX4HCA.EventLog]\r
+AddReg = MLX4HCA.AddEventLogReg\r
+\r
+[MLX4HCA.AddEventLogReg]\r
+HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mlx4_hca.sys"\r
+HKR, , TypesSupported,   0x00010001, 7\r
+\r
+[MLX4HCA.ParamsReg]\r
+HKR,,DeviceCharacteristics,0x10001,0x0100         ; Use same security checks on relative opens\r
+HKR,,Security,,"D:P(A;;GA;;;BA)(A;;GA;;;SY)"      ; Allow generic-all access to Built-in administrators and Local system \r
+HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003\r
+HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\F8C96A49-AE22-41e9-8025-D7E416884D89","Flags",%REG_DWORD%,0xffff\r
+HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\F8C96A49-AE22-41e9-8025-D7E416884D89","Level",%REG_DWORD%,0x3\r
+\r
+;\r
+; The below section is temporarily disabled.\r
+; It should be uncommented after returning MLX4_HCA to WDF model.\r
+;\r
+\r
+;*****************************************\r
+; WDF Coinstaller installation section\r
+;*****************************************\r
+\r
+;[MLX4HCA.DDInstall.ntx86.CoInstallers]\r
+;AddReg=Wdf_CoInstaller_AddReg\r
+;CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+;[MLX4HCA.DDInstall.ntamd64.CoInstallers]\r
+;AddReg=Wdf_CoInstaller_AddReg\r
+;CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+;[MLX4HCA.DDInstall.ntia64.CoInstallers]\r
+;AddReg=Wdf_CoInstaller_AddReg\r
+;CopyFiles=Wdf_CoInstaller_CopyFiles\r
+\r
+;[Wdf_CoInstaller_AddReg]\r
+;HKR,,CoInstallers32,0x00010000, "wdfcoinstaller01005.dll,WdfCoInstaller"\r
+\r
+;[Wdf_CoInstaller_CopyFiles]\r
+;wdfcoinstaller01005.dll\r
+\r
+;[MLX4HCA.DDInstall.ntx86.Wdf]\r
+;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
+\r
+;[MLX4HCA.DDInstall.ntamd64.Wdf]\r
+;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
+\r
+;[MLX4HCA.DDInstall.ntia64.Wdf]\r
+;KmdfService = mlx4_hca, mlx4_hca_wdfsect\r
+\r
+;[mlx4_hca_wdfsect]\r
+;KmdfLibraryVersion = 1.5\r
+\r
+\r
+;*****************************************\r
+; Strings\r
+;*****************************************\r
+\r
+[Strings]\r
+Mlx4HcaClassGuid = "{31B0B28A-26FF-4dca-A6FA-E767C7DFBA20}"\r
+MTL="Mellanox Technologies Ltd."\r
+MLX4HCA.ServiceDesc = "Mellanox ConnectX Virtual Infiband Driver"\r
+Mlx4_Hca.DeviceDesc="Mellanox ConnectX Virtual Channel Adapter"\r
+DiskId = "Mellanox Mlx4 Bus installation disk"\r
+SPSVCINST_NULL = 0x0\r
+SPSVCINST_ASSOCSERVICE = 0x00000002\r
+SERVICE_KERNEL_DRIVER  = 1\r
+SERVICE_DEMAND_START   = 3\r
+SERVICE_ERROR_NORMAL   = 1\r
+REG_DWORD              = 0x00010001\r
+REG_MULTI_SZ_APPEND    = 0x00010008\r
+DIRID_SYSTEM           = 11\r
+DIRID_DRIVERS          = 12\r
+DIRID_SYSTEM_X86       = 16425\r
index 7fe73dc1445b257952480f56e2c9feb7aef460ad..1a91125ea78cf58ac02e7958ed63d8ec434d896b 100644 (file)
@@ -100,8 +100,6 @@ __create_qp (
        struct ib_qp_init_attr qp_init_attr;\r
        struct ib_ucontext *p_uctx = NULL;\r
        struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;\r
-       struct ib_device *p_ib_dev = p_ib_pd->device;\r
-       mlnx_hca_t *p_hca = ibdev2hca(p_ib_dev);\r
        struct ibv_create_qp *p_req = NULL;\r
        \r
        HCA_ENTER(HCA_DBG_QP);\r
@@ -121,7 +119,7 @@ __create_qp (
        // prepare the parameters\r
        RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
        qp_init_attr.event_handler = event_handler;\r
-       qp_init_attr.qp_context = p_hca;\r
+       qp_init_attr.qp_context = (void*)qp_uctx;\r
        qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
        qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
        qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
@@ -153,9 +151,6 @@ __create_qp (
                goto err_create_qp;\r
        }\r
 \r
-       // fill the object\r
-       p_ib_qp->x.ctx = (void*)qp_uctx;\r
-\r
        // Query QP to obtain requested attributes\r
        if (p_qp_attr) {\r
                status = mlnx_query_qp((ib_qp_handle_t)p_ib_qp, p_qp_attr, p_umv_buf);\r
index cedccea2c4b70d8e791b125481449f0a0d4a70bc..971b8358822346e274a5badd8fca868994abff70 100644 (file)
-/*
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "precomp.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "srq.tmh"
-#endif
-
-
-ib_api_status_t
-mlnx_create_srq (
-       IN              const   ib_pd_handle_t                  h_pd,
-       IN              const   void                                            *srq_context,
-       IN                              ci_async_event_cb_t                     event_handler,
-       IN              const   ib_srq_attr_t * const           p_srq_attr,
-               OUT                     ib_srq_handle_t                 *ph_srq,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       int err;
-       ib_api_status_t         status;
-       struct ib_srq *p_ib_srq;
-       struct ib_srq_init_attr srq_init_attr;
-       struct ib_ucontext *p_uctx = NULL;
-       struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;
-       struct ib_device *p_ib_dev = p_ib_pd->device;
-       mlnx_hca_t *p_hca = ibdev2hca(p_ib_dev);
-
-       HCA_ENTER(HCA_DBG_SRQ);
-
-       if( p_umv_buf  && p_umv_buf->command) {
-
-               // sanity checks 
-               if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||
-                       p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||
-                       !p_umv_buf->p_inout_buf) {
-                       status = IB_INVALID_PARAMETER;
-                       goto err_inval_params;
-               }
-               p_uctx = p_ib_pd->p_uctx;
-       }
-
-       // prepare the parameters
-       RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));
-       srq_init_attr.event_handler = event_handler;
-       srq_init_attr.srq_context = p_hca;
-       srq_init_attr.attr.max_wr = p_srq_attr->max_wr;
-       srq_init_attr.attr.max_sge = p_srq_attr->max_sge;
-       srq_init_attr.attr.srq_limit = p_srq_attr->srq_limit;
-
-       // allocate srq 
-       p_ib_srq = ibv_create_srq(p_ib_pd, &srq_init_attr, p_uctx, p_umv_buf );
-       if (IS_ERR(p_ib_srq)) {
-               err = PTR_ERR(p_ib_srq);
-               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));
-               status = errno_to_iberr(err);
-               goto err_create_srq;
-       }
-       p_ib_srq->x.ctx = (void*)srq_context;
-
-       // return the result
-       if (ph_srq) *ph_srq = (ib_srq_handle_t)p_ib_srq;
-
-       status = IB_SUCCESS;
-       
-err_create_srq:
-err_inval_params:
-       if (p_umv_buf && p_umv_buf->command) 
-               p_umv_buf->status = status;
-       if (status != IB_SUCCESS)
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,
-                       ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_SRQ);
-       return status;
-}
-
-
-ib_api_status_t
-mlnx_modify_srq (
-               IN              const   ib_srq_handle_t                         h_srq,
-               IN              const   ib_srq_attr_t* const                    p_srq_attr,
-               IN              const   ib_srq_attr_mask_t                      srq_attr_mask,
-               IN      OUT             ci_umv_buf_t                            *p_umv_buf OPTIONAL )
-{
-       int err;
-       ib_api_status_t         status = IB_SUCCESS;
-       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;
-       UNUSED_PARAM(p_umv_buf);
-
-       HCA_ENTER(HCA_DBG_SRQ);
-
-       err = p_ib_srq->device->modify_srq(p_ib_srq, (void*)p_srq_attr, srq_attr_mask, NULL);
-       status = errno_to_iberr(err);
-
-       if (status != IB_SUCCESS)
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,
-                       ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_SRQ);
-       return status;
-}
-
-ib_api_status_t
-mlnx_query_srq (
-       IN              const   ib_srq_handle_t                         h_srq,
-               OUT                     ib_srq_attr_t* const                    p_srq_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )
-{
-       int err;
-       ib_api_status_t         status = IB_SUCCESS;
-       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;
-       UNUSED_PARAM(p_umv_buf);
-
-       HCA_ENTER(HCA_DBG_SRQ);
-
-       err = p_ib_srq->device->query_srq(p_ib_srq, (void*)p_srq_attr);
-       status = errno_to_iberr(err);
-
-       if (status != IB_SUCCESS)
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,
-                       ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_SRQ);
-       return status;
-}
-
-ib_api_status_t
-mlnx_destroy_srq (
-       IN      const   ib_srq_handle_t         h_srq )
-{
-       int err;
-       ib_api_status_t         status = IB_SUCCESS;
-       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;
-
-       HCA_ENTER(HCA_DBG_SRQ);
-
-       err = ib_destroy_srq(p_ib_srq);
-       status = errno_to_iberr(err);
-
-       if (status != IB_SUCCESS)
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,
-                       ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_SRQ);
-       return status;
-}
-
-void
-mlnx_srq_if(
-       IN      OUT                     ci_interface_t                          *p_interface )
-{
-       p_interface->create_srq = mlnx_create_srq;
-       p_interface->modify_srq = mlnx_modify_srq;
-       p_interface->query_srq = mlnx_query_srq;
-       p_interface->destroy_srq = mlnx_destroy_srq;
-}
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "srq.tmh"\r
+#endif\r
+\r
+\r
+ib_api_status_t\r
+mlnx_create_srq (\r
+       IN              const   ib_pd_handle_t                  h_pd,\r
+       IN              const   void                                            *srq_context,\r
+       IN                              ci_async_event_cb_t                     event_handler,\r
+       IN              const   ib_srq_attr_t * const           p_srq_attr,\r
+               OUT                     ib_srq_handle_t                 *ph_srq,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_srq *p_ib_srq;\r
+       struct ib_srq_init_attr srq_init_attr;\r
+       struct ib_ucontext *p_uctx = NULL;\r
+       struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       if( p_umv_buf  && p_umv_buf->command) {\r
+\r
+               // sanity checks \r
+               if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||\r
+                       p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
+               }\r
+               p_uctx = p_ib_pd->p_uctx;\r
+       }\r
+\r
+       // prepare the parameters\r
+       RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
+       srq_init_attr.event_handler = event_handler;\r
+       srq_init_attr.srq_context = (void*)srq_context;\r
+       srq_init_attr.attr.max_wr = p_srq_attr->max_wr;\r
+       srq_init_attr.attr.max_sge = p_srq_attr->max_sge;\r
+       srq_init_attr.attr.srq_limit = p_srq_attr->srq_limit;\r
+\r
+       // allocate srq \r
+       p_ib_srq = ibv_create_srq(p_ib_pd, &srq_init_attr, p_uctx, p_umv_buf );\r
+       if (IS_ERR(p_ib_srq)) {\r
+               err = PTR_ERR(p_ib_srq);\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_create_srq;\r
+       }\r
+\r
+       // return the result\r
+       if (ph_srq) *ph_srq = (ib_srq_handle_t)p_ib_srq;\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_create_srq:\r
+err_inval_params:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       if (status != IB_SUCCESS)\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_SRQ);\r
+       return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_srq (\r
+               IN              const   ib_srq_handle_t                         h_srq,\r
+               IN              const   ib_srq_attr_t* const                    p_srq_attr,\r
+               IN              const   ib_srq_attr_mask_t                      srq_attr_mask,\r
+               IN      OUT             ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;\r
+       UNUSED_PARAM(p_umv_buf);\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = p_ib_srq->device->modify_srq(p_ib_srq, (void*)p_srq_attr, srq_attr_mask, NULL);\r
+       status = errno_to_iberr(err);\r
+\r
+       if (status != IB_SUCCESS)\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_SRQ);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_srq (\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+               OUT                     ib_srq_attr_t* const                    p_srq_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;\r
+       UNUSED_PARAM(p_umv_buf);\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = p_ib_srq->device->query_srq(p_ib_srq, (void*)p_srq_attr);\r
+       status = errno_to_iberr(err);\r
+\r
+       if (status != IB_SUCCESS)\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_SRQ);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_srq (\r
+       IN      const   ib_srq_handle_t         h_srq )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *p_ib_srq = (struct ib_srq *)h_srq;\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = ib_destroy_srq(p_ib_srq);\r
+       status = errno_to_iberr(err);\r
+\r
+       if (status != IB_SUCCESS)\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
+                       ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_SRQ);\r
+       return status;\r
+}\r
+\r
+void\r
+mlnx_srq_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->create_srq = mlnx_create_srq;\r
+       p_interface->modify_srq = mlnx_modify_srq;\r
+       p_interface->query_srq = mlnx_query_srq;\r
+       p_interface->destroy_srq = mlnx_destroy_srq;\r
+}\r
+\r
index 38c7ff2bee94c6c8ff93730b8f32e382a399e050..87c5586e0e625afb8f24540bd14056686e1994ef 100644 (file)
@@ -870,12 +870,10 @@ mlnx_create_srq (
        int err;\r
        ib_api_status_t         status;\r
        struct ib_srq *ib_srq_p;\r
-       struct mthca_srq *srq_p;\r
        struct ib_srq_init_attr srq_init_attr;\r
        struct ib_ucontext *p_context = NULL;\r
        struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
        struct ib_device *ib_dev = ib_pd_p->device;\r
-       mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
 \r
        HCA_ENTER(HCA_DBG_SRQ);\r
 \r
@@ -894,7 +892,7 @@ mlnx_create_srq (
        // prepare the parameters\r
        RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
        srq_init_attr.event_handler = event_handler;\r
-       srq_init_attr.srq_context = hob_p;\r
+       srq_init_attr.srq_context = (void*)srq_context;\r
        srq_init_attr.attr = *p_srq_attr;\r
 \r
        // allocate srq \r
@@ -906,12 +904,8 @@ mlnx_create_srq (
                goto err_create_srq;\r
        }\r
 \r
-       // fill the object\r
-       srq_p = (struct mthca_srq *)ib_srq_p;\r
-       srq_p->srq_context = (void*)srq_context;\r
-       \r
        // return the result\r
-       if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p;\r
+       if (ph_srq) *ph_srq = (ib_srq_handle_t)ib_srq_p;\r
 \r
        status = IB_SUCCESS;\r
        \r
@@ -1044,7 +1038,6 @@ _create_qp (
        struct ib_ucontext *p_context = NULL;\r
        struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
        struct ib_device *ib_dev = ib_pd_p->device;\r
-       mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
        \r
        HCA_ENTER(HCA_DBG_QP);\r
 \r
@@ -1063,7 +1056,7 @@ _create_qp (
        RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
        qp_init_attr.qp_type = p_create_attr->qp_type;\r
        qp_init_attr.event_handler = event_handler;\r
-       qp_init_attr.qp_context = hob_p;\r
+       qp_init_attr.qp_context = (void*)qp_context;\r
        qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
        qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
        qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
@@ -1087,7 +1080,6 @@ _create_qp (
 \r
        // fill the object\r
        qp_p = (struct mthca_qp *)ib_qp_p;\r
-       qp_p->qp_context = (void*)qp_context;\r
        qp_p->qp_init_attr = qp_init_attr;\r
 \r
        // Query QP to obtain requested attributes\r
@@ -1401,7 +1393,6 @@ mlnx_create_cq (
        int err;\r
        ib_api_status_t         status;\r
        struct ib_cq *ib_cq_p;\r
-       struct mthca_cq *cq_p;\r
        mlnx_hob_t                      *hob_p;\r
        struct ib_device *ib_dev;\r
        struct ib_ucontext *p_context;\r
@@ -1437,7 +1428,7 @@ mlnx_create_cq (
        // allocate cq  \r
        ib_cq_p = ibv_create_cq(ib_dev, \r
                cq_comp_handler, event_handler,\r
-               hob_p, *p_size, p_context, p_umv_buf );\r
+               (void*)cq_context, *p_size, p_context, p_umv_buf );\r
        if (IS_ERR(ib_cq_p)) {\r
                err = PTR_ERR(ib_cq_p);\r
                HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
@@ -1445,15 +1436,11 @@ mlnx_create_cq (
                goto err_create_cq;\r
        }\r
 \r
-       // fill the object\r
-       cq_p = (struct mthca_cq *)ib_cq_p;\r
-       cq_p->cq_context = (void*)cq_context;\r
-       \r
        // return the result\r
 //     *p_size = *p_size;      // return the same value\r
        *p_size = ib_cq_p->cqe;\r
 \r
-       if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
+       if (ph_cq) *ph_cq = (ib_cq_handle_t)ib_cq_p;\r
 \r
        status = IB_SUCCESS;\r
        \r
index f0f5ea0f1e4bbe2aacc6e5ce270460ea0013a16c..d937695ce1a378283802dcf438d6e55f56d2a7d6 100644 (file)
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#include <ib_pack.h>
-
-#include "mthca_dev.h"
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "mthca_cq.tmh"
-#endif
-#include "mthca_cmd.h"
-#include "mthca_memfree.h"
-#include "mthca_wqe.h"
-
-
-#ifdef ALLOC_PRAGMA
-#pragma alloc_text (PAGE, mthca_init_cq_table)
-#pragma alloc_text (PAGE, mthca_cleanup_cq_table)
-#endif
-
-enum {
-       MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
-};
-
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-#pragma pack(push,1)
-struct mthca_cq_context {
-       __be32 flags;
-       __be64 start;
-       __be32 logsize_usrpage;
-       __be32 error_eqn;       /* Tavor only */
-       __be32 comp_eqn;
-       __be32 pd;
-       __be32 lkey;
-       __be32 last_notified_index;
-       __be32 solicit_producer_index;
-       __be32 consumer_index;
-       __be32 producer_index;
-       __be32 cqn;
-       __be32 ci_db;           /* Arbel only */
-       __be32 state_db;        /* Arbel only */
-       u32    reserved;
-};
-#pragma pack(pop)
-
-#define MTHCA_CQ_STATUS_OK          ( 0 << 28)
-#define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)
-#define MTHCA_CQ_STATUS_WRITE_FAIL  (10 << 28)
-#define MTHCA_CQ_FLAG_TR            ( 1 << 18)
-#define MTHCA_CQ_FLAG_OI            ( 1 << 17)
-#define MTHCA_CQ_STATE_DISARMED     ( 0 <<  8)
-#define MTHCA_CQ_STATE_ARMED        ( 1 <<  8)
-#define MTHCA_CQ_STATE_ARMED_SOL    ( 4 <<  8)
-#define MTHCA_EQ_STATE_FIRED        (10 <<  8)
-
-enum {
-       MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
-};
-
-enum {
-       SYNDROME_LOCAL_LENGTH_ERR        = 0x01,
-       SYNDROME_LOCAL_QP_OP_ERR         = 0x02,
-       SYNDROME_LOCAL_EEC_OP_ERR        = 0x03,
-       SYNDROME_LOCAL_PROT_ERR          = 0x04,
-       SYNDROME_WR_FLUSH_ERR            = 0x05,
-       SYNDROME_MW_BIND_ERR             = 0x06,
-       SYNDROME_BAD_RESP_ERR            = 0x10,
-       SYNDROME_LOCAL_ACCESS_ERR        = 0x11,
-       SYNDROME_REMOTE_INVAL_REQ_ERR    = 0x12,
-       SYNDROME_REMOTE_ACCESS_ERR       = 0x13,
-       SYNDROME_REMOTE_OP_ERR           = 0x14,
-       SYNDROME_RETRY_EXC_ERR           = 0x15,
-       SYNDROME_RNR_RETRY_EXC_ERR       = 0x16,
-       SYNDROME_LOCAL_RDD_VIOL_ERR      = 0x20,
-       SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
-       SYNDROME_REMOTE_ABORTED_ERR      = 0x22,
-       SYNDROME_INVAL_EECN_ERR          = 0x23,
-       SYNDROME_INVAL_EEC_STATE_ERR     = 0x24
-};
-
-struct mthca_cqe {
-       __be32 my_qpn;
-       __be32 my_ee;
-       __be32 rqpn;
-       __be16 sl_g_mlpath;
-       __be16 rlid;
-       __be32 imm_etype_pkey_eec;
-       __be32 byte_cnt;
-       __be32 wqe;
-       u8     opcode;
-       u8     is_send;
-       u8     reserved;
-       u8     owner;
-};
-
-struct mthca_err_cqe {
-       __be32 my_qpn;
-       u32    reserved1[3];
-       u8     syndrome;
-       u8     vendor_err;
-       __be16 db_cnt;
-       u32    reserved2;
-       __be32 wqe;
-       u8     opcode;
-       u8     reserved3[2];
-       u8     owner;
-};
-
-#define MTHCA_CQ_ENTRY_OWNER_SW      (0 << 7)
-#define MTHCA_CQ_ENTRY_OWNER_HW      (1 << 7)
-
-#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)
-#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)
-#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)
-#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)
-#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
-
-#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)
-#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)
-#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
-
-static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
-{
-       if (cq->is_direct)
-               return (struct mthca_cqe *)((u8*)cq->queue.direct.page + (entry * MTHCA_CQ_ENTRY_SIZE));
-       else
-               return (struct mthca_cqe *)((u8*)cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].page
-                       + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE);
-}
-
-static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
-{
-       struct mthca_cqe *cqe = get_cqe(cq, i);
-       return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
-}
-
-static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
-{
-       return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);
-}
-
-static inline void set_cqe_hw(struct mthca_cqe *cqe)
-{
-       cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
-}
-
-static void dump_cqe(u32 print_lvl, struct mthca_dev *dev, void *cqe_ptr)
-{
-       __be32 *cqe = cqe_ptr;
-       UNREFERENCED_PARAMETER(dev);
-    UNUSED_PARAM_WOWPP(print_lvl);
-
-       (void) cqe;     /* avoid warning if mthca_dbg compiled away... */
-       HCA_PRINT(print_lvl,HCA_DBG_CQ,("CQE contents \n"));
-       HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x\n",0,
-               cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3])));
-       HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x \n",16,
-               cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7])));
-}
-
-/*
- * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
- * should be correct before calling update_cons_index().
- */
-static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
-                                    int incr)
-{
-       __be32 doorbell[2];
-
-       if (mthca_is_memfree(dev)) {
-               *cq->set_ci_db = cl_hton32(cq->cons_index);
-               wmb();
-       } else {
-               doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
-               doorbell[1] = cl_hton32(incr - 1);
-
-               mthca_write64(doorbell,
-                             dev->kar + MTHCA_CQ_DOORBELL,
-                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-       }
-}
-
-void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
-{
-       struct mthca_cq *cq;
-
-       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-
-       if (!cq) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Completion event for bogus CQ %08x\n", cqn));
-               return;
-       }
-
-       if (mthca_is_memfree(dev)) {
-               if (cq->ibcq.ucontext)
-                       ++*cq->p_u_arm_sn;
-               else
-                       ++cq->arm_sn;
-       }
-
-       cq->ibcq.comp_handler(cq->cq_context);
-}
-
-void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
-                   enum ib_event_type event_type)
-{
-       struct mthca_cq *cq;
-       ib_event_rec_t event;
-       SPIN_LOCK_PREP(lh);
-
-       spin_lock(&dev->cq_table.lock, &lh);
-
-       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-
-       if (cq)
-               atomic_inc(&cq->refcount);
-       spin_unlock(&lh);
-
-       if (!cq) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Async event for bogus CQ %08x\n", cqn));
-               return;
-       }
-
-       event.type = event_type;
-       event.context = cq->ibcq.cq_context;
-       event.vendor_specific = 0;
-       cq->ibcq.event_handler(&event);
-
-       if (atomic_dec_and_test(&cq->refcount))
-               wake_up(&cq->wait);
-}
-
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
-                   struct mthca_srq *srq)
-{
-       struct mthca_cq *cq;
-       struct mthca_cqe *cqe;
-       u32 prod_index;
-       int nfreed = 0;
-       SPIN_LOCK_PREP(lht);
-       SPIN_LOCK_PREP(lh);
-
-       spin_lock_irq(&dev->cq_table.lock, &lht);
-       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-       if (cq)
-               atomic_inc(&cq->refcount);
-       spin_unlock_irq(&lht);
-
-       if (!cq)
-               return;
-
-       spin_lock_irq(&cq->lock, &lh);
-
-       /*
-        * First we need to find the current producer index, so we
-        * know where to start cleaning from.  It doesn't matter if HW
-        * adds new entries after this loop -- the QP we're worried
-        * about is already in RESET, so the new entries won't come
-        * from our QP and therefore don't need to be checked.
-        */
-       for (prod_index = cq->cons_index;
-            cqe_sw(cq, prod_index & cq->ibcq.cqe);
-            ++prod_index) {
-               if (prod_index == cq->cons_index + cq->ibcq.cqe)
-                       break;
-       }
-
-       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
-                 qpn, cqn, cq->cons_index, prod_index));
-
-       /*
-        * Now sweep backwards through the CQ, removing CQ entries
-        * that match our QP by copying older entries on top of them.
-        */
-       while ((int) --prod_index - (int) cq->cons_index >= 0) {
-               cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
-               if (cqe->my_qpn == cl_hton32(qpn)) {
-                       if (srq)
-                               mthca_free_srq_wqe(srq, cl_ntoh32(cqe->wqe));
-                       ++nfreed;
-               } 
-               else 
-               if (nfreed) {
-                       memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
-                               cqe, MTHCA_CQ_ENTRY_SIZE);
-               }
-       }
-
-       if (nfreed) {
-               wmb();
-               cq->cons_index += nfreed;
-               update_cons_index(dev, cq, nfreed);
-       }
-
-       spin_unlock_irq(&lh);
-       if (atomic_dec_and_test(&cq->refcount))
-               wake_up(&cq->wait);
-}
-
-static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
-                           struct mthca_qp *qp, int wqe_index, int is_send,
-                           struct mthca_err_cqe *cqe,
-                           struct _ib_wc *entry, int *free_cqe)
-{
-       int dbd;
-       __be32 new_wqe;
-
-       UNREFERENCED_PARAMETER(cq);
-       
-       if (cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Completion with errro "
-                         "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
-                         cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe),
-                         cq->cqn, cq->cons_index));
-               dump_cqe(TRACE_LEVEL_INFORMATION, dev, cqe);
-       }
-
-
-       /*
-        * For completions in error, only work request ID, status, vendor error
-        * (and freed resource count for RD) have to be set.
-        */
-       switch (cqe->syndrome) {
-       case SYNDROME_LOCAL_LENGTH_ERR:
-               entry->status = IB_WCS_LOCAL_LEN_ERR;
-               break;
-       case SYNDROME_LOCAL_QP_OP_ERR:
-               entry->status = IB_WCS_LOCAL_OP_ERR;
-               break;
-       case SYNDROME_LOCAL_PROT_ERR:
-               entry->status = IB_WCS_LOCAL_PROTECTION_ERR;
-               break;
-       case SYNDROME_WR_FLUSH_ERR:
-               entry->status = IB_WCS_WR_FLUSHED_ERR;
-               break;
-       case SYNDROME_MW_BIND_ERR:
-               entry->status = IB_WCS_MEM_WINDOW_BIND_ERR;
-               break;
-       case SYNDROME_BAD_RESP_ERR:
-               entry->status = IB_WCS_BAD_RESP_ERR;
-               break;
-       case SYNDROME_LOCAL_ACCESS_ERR:
-               entry->status = IB_WCS_LOCAL_ACCESS_ERR;
-               break;
-       case SYNDROME_REMOTE_INVAL_REQ_ERR:
-               entry->status = IB_WCS_REM_INVALID_REQ_ERR;
-               break;
-       case SYNDROME_REMOTE_ACCESS_ERR:
-               entry->status = IB_WCS_REM_ACCESS_ERR;
-               break;
-       case SYNDROME_REMOTE_OP_ERR:
-               entry->status = IB_WCS_REM_OP_ERR;
-               break;
-       case SYNDROME_RETRY_EXC_ERR:
-               entry->status = IB_WCS_TIMEOUT_RETRY_ERR;
-               break;
-       case SYNDROME_RNR_RETRY_EXC_ERR:
-               entry->status = IB_WCS_RNR_RETRY_ERR;
-               break;
-       case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
-               entry->status = IB_WCS_REM_INVALID_REQ_ERR;
-               break;
-       case SYNDROME_REMOTE_ABORTED_ERR:
-       case SYNDROME_LOCAL_EEC_OP_ERR:
-       case SYNDROME_LOCAL_RDD_VIOL_ERR:
-       case SYNDROME_INVAL_EECN_ERR:
-       case SYNDROME_INVAL_EEC_STATE_ERR:
-       default:
-               entry->status = IB_WCS_GENERAL_ERR;
-               break;
-       }
-
-       entry->vendor_specific = cqe->vendor_err;
-       
-       /*
-        * Mem-free HCAs always generate one CQE per WQE, even in the
-        * error case, so we don't have to check the doorbell count, etc.
-        */
-       if (mthca_is_memfree(dev))
-               return;
-
-       mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
-
-       /*
-        * If we're at the end of the WQE chain, or we've used up our
-        * doorbell count, free the CQE.  Otherwise just update it for
-        * the next poll operation.
-        */
-       if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))
-               return;
-
-       cqe->db_cnt   = cl_hton16(cl_ntoh16(cqe->db_cnt) - (u16)dbd);
-       cqe->wqe      = new_wqe;
-       cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
-
-       *free_cqe = 0;
-}
-
-static inline int mthca_poll_one(struct mthca_dev *dev,
-                                struct mthca_cq *cq,
-                                struct mthca_qp **cur_qp,
-                                int *freed,
-                                struct _ib_wc *entry)
-{
-       struct mthca_wq *wq;
-       struct mthca_cqe *cqe;
-       unsigned  wqe_index;
-       int is_error;
-       int is_send;
-       int free_cqe = 1;
-       int err = 0;
-
-       HCA_ENTER(HCA_DBG_CQ);
-       cqe = next_cqe_sw(cq);
-       if (!cqe)
-               return -EAGAIN;
-
-       /*
-        * Make sure we read CQ entry contents after we've checked the
-        * ownership bit.
-        */
-       rmb();
-
-       { // debug print
-               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_CQ,("CQ: 0x%06x/%d: CQE -> QPN 0x%06x, WQE @ 0x%08x\n",
-                         cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn),
-                         cl_ntoh32(cqe->wqe)));
-               dump_cqe(TRACE_LEVEL_VERBOSE, dev, cqe);
-       }
-
-       is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
-               MTHCA_ERROR_CQE_OPCODE_MASK;
-       is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
-
-       if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->qpn) {
-               /*
-                * We do not have to take the QP table lock here,
-                * because CQs will be locked while QPs are removed
-                * from the table.
-                */
-               *cur_qp = mthca_array_get(&dev->qp_table.qp,
-                                         cl_ntoh32(cqe->my_qpn) &
-                                         (dev->limits.num_qps - 1));
-               if (!*cur_qp) {
-                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_CQ, ("CQ entry for unknown QP %06x\n",
-                                  cl_ntoh32(cqe->my_qpn) & 0xffffff));
-                       err = -EINVAL;
-                       goto out;
-               }
-       }
-
-       if (is_send) {
-               wq = &(*cur_qp)->sq;
-               wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset)
-                            >> wq->wqe_shift);
-               entry->wr_id = (*cur_qp)->wrid[wqe_index +
-                                              (*cur_qp)->rq.max];
-       } else if ((*cur_qp)->ibqp.srq) {
-               struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
-               u32 wqe = cl_ntoh32(cqe->wqe);
-               wq = NULL;
-               wqe_index = wqe >> srq->wqe_shift;
-               entry->wr_id = srq->wrid[wqe_index];
-               mthca_free_srq_wqe(srq, wqe);
-       } else {
-               wq = &(*cur_qp)->rq;
-               wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift;
-               entry->wr_id = (*cur_qp)->wrid[wqe_index];
-       }
-
-       if (wq) {
-               if (wq->last_comp < wqe_index)
-                       wq->tail += wqe_index - wq->last_comp;
-               else
-                       wq->tail += wqe_index + wq->max - wq->last_comp;
-
-               wq->last_comp = wqe_index;
-       }
-
-       if (is_send) {
-               entry->recv.ud.recv_opt = 0;
-               switch (cqe->opcode) {
-               case MTHCA_OPCODE_RDMA_WRITE:
-                       entry->wc_type    = IB_WC_RDMA_WRITE;
-                       break;
-               case MTHCA_OPCODE_RDMA_WRITE_IMM:
-                       entry->wc_type    = IB_WC_RDMA_WRITE;
-                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
-                       break;
-               case MTHCA_OPCODE_SEND:
-                       entry->wc_type    = IB_WC_SEND;
-                       break;
-               case MTHCA_OPCODE_SEND_IMM:
-                       entry->wc_type    = IB_WC_SEND;
-                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;
-                       break;
-               case MTHCA_OPCODE_RDMA_READ:
-                       entry->wc_type    = IB_WC_RDMA_READ;
-                       entry->length  = cl_ntoh32(cqe->byte_cnt);
-                       break;
-               case MTHCA_OPCODE_ATOMIC_CS:
-                       entry->wc_type    = IB_WC_COMPARE_SWAP;
-                       entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL;
-                       break;
-               case MTHCA_OPCODE_ATOMIC_FA:
-                       entry->wc_type    = IB_WC_FETCH_ADD;
-                       entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL;
-                       break;
-               case MTHCA_OPCODE_BIND_MW:
-                       entry->wc_type    = IB_WC_MW_BIND;
-                       break;
-               default:
-                       entry->wc_type    = IB_WC_SEND;
-                       break;
-               }
-       } else {
-               entry->length = cl_ntoh32(cqe->byte_cnt);
-               switch (cqe->opcode & 0x1f) {
-               case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
-               case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
-                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;
-                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
-                       entry->wc_type = IB_WC_RECV;
-                       break;
-               case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
-               case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
-                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;
-                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;
-                       entry->wc_type = IB_WC_RECV_RDMA_WRITE;
-                       break;
-               default:
-                       entry->recv.ud.recv_opt = 0;
-                       entry->wc_type = IB_WC_RECV;
-                       break;
-               }
-               entry->recv.ud.remote_lid          = cqe->rlid;
-               entry->recv.ud.remote_qp           = cqe->rqpn & 0xffffff00;
-               entry->recv.ud.pkey_index  = (u16)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16);
-               entry->recv.ud.remote_sl           = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) >> 12);
-               entry->recv.ud.path_bits = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) & 0x7f);
-               entry->recv.ud.recv_opt   |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ?
-                                       IB_RECV_OPT_GRH_VALID : 0;
-       }
-       if (!is_send && cqe->rlid == 0){
-               HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("found rlid == 0 \n "));
-               entry->recv.ud.recv_opt   |= IB_RECV_OPT_FORWARD;
-
-       }
-       if (is_error) {
-               handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
-                       (struct mthca_err_cqe *) cqe, entry, &free_cqe);
-       }
-       else
-               entry->status = IB_WCS_SUCCESS;
-
- out:
-       if (likely(free_cqe)) {
-               set_cqe_hw(cqe);
-               ++(*freed);
-               ++cq->cons_index;
-       }
-       HCA_EXIT(HCA_DBG_CQ);
-       return err;
-}
-
-int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
-                 struct _ib_wc *entry)
-{
-       struct mthca_dev *dev = to_mdev(ibcq->device);
-       struct mthca_cq *cq = to_mcq(ibcq);
-       struct mthca_qp *qp = NULL;
-       int err = 0;
-       int freed = 0;
-       int npolled;
-       SPIN_LOCK_PREP(lh);
-
-       spin_lock_irqsave(&cq->lock, &lh);
-
-       for (npolled = 0; npolled < num_entries; ++npolled) {
-               err = mthca_poll_one(dev, cq, &qp,
-                                    &freed, entry + npolled);
-               if (err)
-                       break;
-       }
-
-       if (freed) {
-               wmb();
-               update_cons_index(dev, cq, freed);
-       }
-
-       spin_unlock_irqrestore(&lh);
-
-       return (err == 0 || err == -EAGAIN) ? npolled : err;
-}
-
-int mthca_poll_cq_list(
-       IN              struct ib_cq *ibcq, 
-       IN      OUT                     ib_wc_t** const                         pp_free_wclist,
-               OUT                     ib_wc_t** const                         pp_done_wclist )
-{
-       struct mthca_dev *dev = to_mdev(ibcq->device);
-       struct mthca_cq *cq = to_mcq(ibcq);
-       struct mthca_qp *qp = NULL;
-       int err = 0;
-       int freed = 0;
-       ib_wc_t         *wc_p, **next_pp;
-       SPIN_LOCK_PREP(lh);
-
-       HCA_ENTER(HCA_DBG_CQ);
-
-       spin_lock_irqsave(&cq->lock, &lh);
-
-       // loop through CQ
-       next_pp = pp_done_wclist;
-       wc_p = *pp_free_wclist;
-       while( wc_p ) {
-               // poll one CQE
-               err = mthca_poll_one(dev, cq, &qp, &freed, wc_p);
-               if (err)
-                       break;
-
-               // prepare for the next loop
-               *next_pp = wc_p;
-               next_pp = &wc_p->p_next;
-               wc_p = wc_p->p_next;
-       }
-
-       // prepare the results
-       *pp_free_wclist = wc_p;         /* Set the head of the free list. */
-       *next_pp = NULL;                                                /* Clear the tail of the done list. */
-
-       // update consumer index
-       if (freed) {
-               wmb();
-               update_cons_index(dev, cq, freed);
-       }
-
-       spin_unlock_irqrestore(&lh);
-       HCA_EXIT(HCA_DBG_CQ);
-       return (err == 0 || err == -EAGAIN)? 0 : err;
-}
-
-
-int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
-{
-       __be32 doorbell[2];
-
-       doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ?
-                                  MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
-                                  MTHCA_TAVOR_CQ_DB_REQ_NOT)      |
-                                 to_mcq(cq)->cqn);
-       doorbell[1] = (__be32) 0xffffffff;
-
-       mthca_write64(doorbell,
-                     to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
-                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
-
-       return 0;
-}
-
-int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
-{
-       struct mthca_cq *cq = to_mcq(ibcq);
-       __be32 doorbell[2];
-       u32 sn;
-       __be32 ci;
-
-       sn = cq->arm_sn & 3;
-       ci = cl_hton32(cq->cons_index);
-
-       doorbell[0] = ci;
-       doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
-                                 (notify == IB_CQ_SOLICITED ? 1 : 2));
-
-       mthca_write_db_rec(doorbell, cq->arm_db);
-
-       /*
-        * Make sure that the doorbell record in host memory is
-        * written before ringing the doorbell via PCI MMIO.
-        */
-       wmb();
-
-       doorbell[0] = cl_hton32((sn << 28)                       |
-                                 (notify == IB_CQ_SOLICITED ?
-                                  MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
-                                  MTHCA_ARBEL_CQ_DB_REQ_NOT)      |
-                                 cq->cqn);
-       doorbell[1] = ci;
-
-       mthca_write64(doorbell,
-                     to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
-                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
-
-       return 0;
-}
-
-static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
-{
-       mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
-                      &cq->queue, cq->is_direct, &cq->mr);
-}
-
-int mthca_init_cq(struct mthca_dev *dev, int nent,
-                 struct mthca_ucontext *ctx, u32 pdn,
-                 struct mthca_cq *cq)
-{
-       int size = NEXT_PAGE_ALIGN(nent * MTHCA_CQ_ENTRY_SIZE );
-       struct mthca_mailbox *mailbox;
-       struct mthca_cq_context *cq_context;
-       int err = -ENOMEM;
-       u8 status;
-       int i;
-       SPIN_LOCK_PREP(lh);
-
-       cq->ibcq.cqe  = nent - 1;
-       cq->is_kernel = !ctx;
-
-       cq->cqn = mthca_alloc(&dev->cq_table.alloc);
-       if (cq->cqn == -1)
-               return -ENOMEM;
-
-       if (mthca_is_memfree(dev)) {
-               err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
-               if (err)
-                       goto err_out;
-
-               if (cq->is_kernel) {
-                       cq->arm_sn = 1;
-
-                       err = -ENOMEM;
-
-                       cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
-                                                            cq->cqn, &cq->set_ci_db);
-                       if (cq->set_ci_db_index < 0)
-                               goto err_out_icm;
-
-                       cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
-                                                         cq->cqn, &cq->arm_db);
-                       if (cq->arm_db_index < 0)
-                               goto err_out_ci;
-               }
-       }
-
-       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox))
-               goto err_out_arm;
-
-       cq_context = mailbox->buf;
-
-       if (cq->is_kernel) {
-               err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
-                                     &cq->queue, &cq->is_direct,
-                                     &dev->driver_pd, 1, &cq->mr);
-               if (err)
-                       goto err_out_mailbox;
-
-               for (i = 0; i < nent; ++i)
-                       set_cqe_hw(get_cqe(cq, i));
-       }
-
-       spin_lock_init(&cq->lock);
-       atomic_set(&cq->refcount, 1);
-       init_waitqueue_head(&cq->wait);
-       KeInitializeMutex(&cq->mutex, 0);
-
-       RtlZeroMemory(cq_context, sizeof *cq_context);
-       cq_context->flags           = cl_hton32(MTHCA_CQ_STATUS_OK      |
-                                                 MTHCA_CQ_STATE_DISARMED |
-                                                 MTHCA_CQ_FLAG_TR);
-       cq_context->logsize_usrpage = cl_hton32((ffs(nent) - 1) << 24);
-       if (ctx)
-               cq_context->logsize_usrpage |= cl_hton32(ctx->uar.index);
-       else
-               cq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);
-       cq_context->error_eqn       = cl_hton32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
-       cq_context->comp_eqn        = cl_hton32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
-       cq_context->pd              = cl_hton32(pdn);
-       cq_context->lkey            = cl_hton32(cq->mr.ibmr.lkey);
-       cq_context->cqn             = cl_hton32(cq->cqn);
-
-       if (mthca_is_memfree(dev)) {
-               cq_context->ci_db    = cl_hton32(cq->set_ci_db_index);
-               cq_context->state_db = cl_hton32(cq->arm_db_index);
-       }
-
-       err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
-       if (err) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("SW2HW_CQ failed (%d)\n", err));
-               goto err_out_free_mr;
-       }
-
-       if (status) {
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_CQ returned status 0x%02x\n",
-                          status));
-               err = -EINVAL;
-               goto err_out_free_mr;
-       }
-
-       spin_lock_irq(&dev->cq_table.lock, &lh);
-       if (mthca_array_set(&dev->cq_table.cq,
-                           cq->cqn & (dev->limits.num_cqs - 1),
-                           cq)) {
-               spin_unlock_irq(&lh);
-               goto err_out_free_mr;
-       }
-       spin_unlock_irq(&lh);
-
-       cq->cons_index = 0;
-
-       mthca_free_mailbox(dev, mailbox);
-
-       return 0;
-
-err_out_free_mr:
-       if (cq->is_kernel)
-               mthca_free_cq_buf(dev, cq);
-
-err_out_mailbox:
-       mthca_free_mailbox(dev, mailbox);
-
-err_out_arm:
-       if (cq->is_kernel && mthca_is_memfree(dev))
-               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
-
-err_out_ci:
-       if (cq->is_kernel && mthca_is_memfree(dev))
-               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
-
-err_out_icm:
-       mthca_table_put(dev, dev->cq_table.table, cq->cqn);
-
-err_out:
-       mthca_free(&dev->cq_table.alloc, cq->cqn);
-
-       return err;
-}
-
-void mthca_free_cq(struct mthca_dev *dev,
-                  struct mthca_cq *cq)
-{
-       struct mthca_mailbox *mailbox;
-       int err;
-       u8 status;
-       SPIN_LOCK_PREP(lh);
-
-       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox)) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("No memory for mailbox to free CQ.\n"));
-               return;
-       }
-
-       err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
-       if (err){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ failed (%d)\n", err));
-       }
-       else if (status){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ returned status 0x%02x\n", status));
-       }
-       { // debug print
-               __be32 *ctx = mailbox->buf;
-               int j;
-               UNUSED_PARAM_WOWPP(ctx);
-               UNUSED_PARAM_WOWPP(j);
-
-               HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("context for CQN %x (cons index %x, next sw %d)\n",
-                      cq->cqn, cq->cons_index,
-                      cq->is_kernel ? !!next_cqe_sw(cq) : 0));
-               for (j = 0; j < 16; ++j)
-                       HCA_PRINT(TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW   ,("[%2x] %08x\n", j * 4, cl_ntoh32(ctx[j])));
-       }
-       spin_lock_irq(&dev->cq_table.lock, &lh);
-       mthca_array_clear(&dev->cq_table.cq,
-                         cq->cqn & (dev->limits.num_cqs - 1));
-       spin_unlock_irq(&lh);
-
-       /* wait for all RUNNING DPCs on that EQ to complete */
-       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
-       KeFlushQueuedDpcs();
-
-       atomic_dec(&cq->refcount);
-       wait_event(&cq->wait, !atomic_read(&cq->refcount));
-
-       if (cq->is_kernel) {
-               mthca_free_cq_buf(dev, cq);
-               if (mthca_is_memfree(dev)) {
-                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);
-                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
-               }
-       }
-
-       mthca_table_put(dev, dev->cq_table.table, cq->cqn);
-       mthca_free(&dev->cq_table.alloc, cq->cqn);
-       mthca_free_mailbox(dev, mailbox);
-}
-
-int mthca_init_cq_table(struct mthca_dev *dev)
-{
-       int err;
-
-       spin_lock_init(&dev->cq_table.lock);
-
-       err = mthca_alloc_init(&dev->cq_table.alloc,
-                              dev->limits.num_cqs,
-                              (1 << 24) - 1,
-                              dev->limits.reserved_cqs);
-       if (err)
-               return err;
-
-       err = mthca_array_init(&dev->cq_table.cq,
-                              dev->limits.num_cqs);
-       if (err)
-               mthca_alloc_cleanup(&dev->cq_table.alloc);
-
-       return err;
-}
-
-void mthca_cleanup_cq_table(struct mthca_dev *dev)
-{
-       mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
-       mthca_alloc_cleanup(&dev->cq_table.alloc);
-}
-
-
+/*\r
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.\r
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.\r
+ * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#include <ib_pack.h>\r
+\r
+#include "mthca_dev.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "mthca_cq.tmh"\r
+#endif\r
+#include "mthca_cmd.h"\r
+#include "mthca_memfree.h"\r
+#include "mthca_wqe.h"\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, mthca_init_cq_table)\r
+#pragma alloc_text (PAGE, mthca_cleanup_cq_table)\r
+#endif\r
+\r
+enum {\r
+       MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE\r
+};\r
+\r
+/*\r
+ * Must be packed because start is 64 bits but only aligned to 32 bits.\r
+ */\r
+#pragma pack(push,1)\r
+struct mthca_cq_context {\r
+       __be32 flags;\r
+       __be64 start;\r
+       __be32 logsize_usrpage;\r
+       __be32 error_eqn;       /* Tavor only */\r
+       __be32 comp_eqn;\r
+       __be32 pd;\r
+       __be32 lkey;\r
+       __be32 last_notified_index;\r
+       __be32 solicit_producer_index;\r
+       __be32 consumer_index;\r
+       __be32 producer_index;\r
+       __be32 cqn;\r
+       __be32 ci_db;           /* Arbel only */\r
+       __be32 state_db;        /* Arbel only */\r
+       u32    reserved;\r
+};\r
+#pragma pack(pop)\r
+\r
+#define MTHCA_CQ_STATUS_OK          ( 0 << 28)\r
+#define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)\r
+#define MTHCA_CQ_STATUS_WRITE_FAIL  (10 << 28)\r
+#define MTHCA_CQ_FLAG_TR            ( 1 << 18)\r
+#define MTHCA_CQ_FLAG_OI            ( 1 << 17)\r
+#define MTHCA_CQ_STATE_DISARMED     ( 0 <<  8)\r
+#define MTHCA_CQ_STATE_ARMED        ( 1 <<  8)\r
+#define MTHCA_CQ_STATE_ARMED_SOL    ( 4 <<  8)\r
+#define MTHCA_EQ_STATE_FIRED        (10 <<  8)\r
+\r
+enum {\r
+       MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe\r
+};\r
+\r
+enum {\r
+       SYNDROME_LOCAL_LENGTH_ERR        = 0x01,\r
+       SYNDROME_LOCAL_QP_OP_ERR         = 0x02,\r
+       SYNDROME_LOCAL_EEC_OP_ERR        = 0x03,\r
+       SYNDROME_LOCAL_PROT_ERR          = 0x04,\r
+       SYNDROME_WR_FLUSH_ERR            = 0x05,\r
+       SYNDROME_MW_BIND_ERR             = 0x06,\r
+       SYNDROME_BAD_RESP_ERR            = 0x10,\r
+       SYNDROME_LOCAL_ACCESS_ERR        = 0x11,\r
+       SYNDROME_REMOTE_INVAL_REQ_ERR    = 0x12,\r
+       SYNDROME_REMOTE_ACCESS_ERR       = 0x13,\r
+       SYNDROME_REMOTE_OP_ERR           = 0x14,\r
+       SYNDROME_RETRY_EXC_ERR           = 0x15,\r
+       SYNDROME_RNR_RETRY_EXC_ERR       = 0x16,\r
+       SYNDROME_LOCAL_RDD_VIOL_ERR      = 0x20,\r
+       SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,\r
+       SYNDROME_REMOTE_ABORTED_ERR      = 0x22,\r
+       SYNDROME_INVAL_EECN_ERR          = 0x23,\r
+       SYNDROME_INVAL_EEC_STATE_ERR     = 0x24\r
+};\r
+\r
+struct mthca_cqe {\r
+       __be32 my_qpn;\r
+       __be32 my_ee;\r
+       __be32 rqpn;\r
+       __be16 sl_g_mlpath;\r
+       __be16 rlid;\r
+       __be32 imm_etype_pkey_eec;\r
+       __be32 byte_cnt;\r
+       __be32 wqe;\r
+       u8     opcode;\r
+       u8     is_send;\r
+       u8     reserved;\r
+       u8     owner;\r
+};\r
+\r
+struct mthca_err_cqe {\r
+       __be32 my_qpn;\r
+       u32    reserved1[3];\r
+       u8     syndrome;\r
+       u8     vendor_err;\r
+       __be16 db_cnt;\r
+       u32    reserved2;\r
+       __be32 wqe;\r
+       u8     opcode;\r
+       u8     reserved3[2];\r
+       u8     owner;\r
+};\r
+\r
+#define MTHCA_CQ_ENTRY_OWNER_SW      (0 << 7)\r
+#define MTHCA_CQ_ENTRY_OWNER_HW      (1 << 7)\r
+\r
+#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)\r
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)\r
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)\r
+#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)\r
+#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)\r
+\r
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)\r
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)\r
+#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)\r
+\r
+static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)\r
+{\r
+       if (cq->is_direct)\r
+               return (struct mthca_cqe *)((u8*)cq->queue.direct.page + (entry * MTHCA_CQ_ENTRY_SIZE));\r
+       else\r
+               return (struct mthca_cqe *)((u8*)cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].page\r
+                       + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE);\r
+}\r
+\r
+static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)\r
+{\r
+       struct mthca_cqe *cqe = get_cqe(cq, i);\r
+       return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;\r
+}\r
+\r
+static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)\r
+{\r
+       return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);\r
+}\r
+\r
+static inline void set_cqe_hw(struct mthca_cqe *cqe)\r
+{\r
+       cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;\r
+}\r
+\r
+static void dump_cqe(u32 print_lvl, struct mthca_dev *dev, void *cqe_ptr)\r
+{\r
+       __be32 *cqe = cqe_ptr;\r
+       UNREFERENCED_PARAMETER(dev);\r
+    UNUSED_PARAM_WOWPP(print_lvl);\r
+\r
+       (void) cqe;     /* avoid warning if mthca_dbg compiled away... */\r
+       HCA_PRINT(print_lvl,HCA_DBG_CQ,("CQE contents \n"));\r
+       HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x\n",0,\r
+               cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3])));\r
+       HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x \n",16,\r
+               cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7])));\r
+}\r
+\r
+/*\r
+ * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index\r
+ * should be correct before calling update_cons_index().\r
+ */\r
+static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,\r
+                                    int incr)\r
+{\r
+       __be32 doorbell[2];\r
+\r
+       if (mthca_is_memfree(dev)) {\r
+               *cq->set_ci_db = cl_hton32(cq->cons_index);\r
+               wmb();\r
+       } else {\r
+               doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);\r
+               doorbell[1] = cl_hton32(incr - 1);\r
+\r
+               mthca_write64(doorbell,\r
+                             dev->kar + MTHCA_CQ_DOORBELL,\r
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));\r
+       }\r
+}\r
+\r
+void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)\r
+{\r
+       struct mthca_cq *cq;\r
+\r
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));\r
+\r
+       if (!cq) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Completion event for bogus CQ %08x\n", cqn));\r
+               return;\r
+       }\r
+\r
+       if (mthca_is_memfree(dev)) {\r
+               if (cq->ibcq.ucontext)\r
+                       ++*cq->p_u_arm_sn;\r
+               else\r
+                       ++cq->arm_sn;\r
+       }\r
+\r
+       cq->ibcq.comp_handler(cq->ibcq.cq_context);\r
+}\r
+\r
+void mthca_cq_event(struct mthca_dev *dev, u32 cqn,\r
+                   enum ib_event_type event_type)\r
+{\r
+       struct mthca_cq *cq;\r
+       ib_event_rec_t event;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       spin_lock(&dev->cq_table.lock, &lh);\r
+\r
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));\r
+\r
+       if (cq)\r
+               atomic_inc(&cq->refcount);\r
+       spin_unlock(&lh);\r
+\r
+       if (!cq) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Async event for bogus CQ %08x\n", cqn));\r
+               return;\r
+       }\r
+\r
+       event.type = event_type;\r
+       event.context = cq->ibcq.cq_context;\r
+       event.vendor_specific = 0;\r
+       cq->ibcq.event_handler(&event);\r
+\r
+       if (atomic_dec_and_test(&cq->refcount))\r
+               wake_up(&cq->wait);\r
+}\r
+\r
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,\r
+                   struct mthca_srq *srq)\r
+{\r
+       struct mthca_cq *cq;\r
+       struct mthca_cqe *cqe;\r
+       u32 prod_index;\r
+       int nfreed = 0;\r
+       SPIN_LOCK_PREP(lht);\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       spin_lock_irq(&dev->cq_table.lock, &lht);\r
+       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));\r
+       if (cq)\r
+               atomic_inc(&cq->refcount);\r
+       spin_unlock_irq(&lht);\r
+\r
+       if (!cq)\r
+               return;\r
+\r
+       spin_lock_irq(&cq->lock, &lh);\r
+\r
+       /*\r
+        * First we need to find the current producer index, so we\r
+        * know where to start cleaning from.  It doesn't matter if HW\r
+        * adds new entries after this loop -- the QP we're worried\r
+        * about is already in RESET, so the new entries won't come\r
+        * from our QP and therefore don't need to be checked.\r
+        */\r
+       for (prod_index = cq->cons_index;\r
+            cqe_sw(cq, prod_index & cq->ibcq.cqe);\r
+            ++prod_index) {\r
+               if (prod_index == cq->cons_index + cq->ibcq.cqe)\r
+                       break;\r
+       }\r
+\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",\r
+                 qpn, cqn, cq->cons_index, prod_index));\r
+\r
+       /*\r
+        * Now sweep backwards through the CQ, removing CQ entries\r
+        * that match our QP by copying older entries on top of them.\r
+        */\r
+       while ((int) --prod_index - (int) cq->cons_index >= 0) {\r
+               cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);\r
+               if (cqe->my_qpn == cl_hton32(qpn)) {\r
+                       if (srq)\r
+                               mthca_free_srq_wqe(srq, cl_ntoh32(cqe->wqe));\r
+                       ++nfreed;\r
+               } \r
+               else \r
+               if (nfreed) {\r
+                       memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),\r
+                               cqe, MTHCA_CQ_ENTRY_SIZE);\r
+               }\r
+       }\r
+\r
+       if (nfreed) {\r
+               wmb();\r
+               cq->cons_index += nfreed;\r
+               update_cons_index(dev, cq, nfreed);\r
+       }\r
+\r
+       spin_unlock_irq(&lh);\r
+       if (atomic_dec_and_test(&cq->refcount))\r
+               wake_up(&cq->wait);\r
+}\r
+\r
+static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,\r
+                           struct mthca_qp *qp, int wqe_index, int is_send,\r
+                           struct mthca_err_cqe *cqe,\r
+                           struct _ib_wc *entry, int *free_cqe)\r
+{\r
+       int dbd;\r
+       __be32 new_wqe;\r
+\r
+       UNREFERENCED_PARAMETER(cq);\r
+       \r
+       if (cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Completion with errro "\r
+                         "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",\r
+                         cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe),\r
+                         cq->cqn, cq->cons_index));\r
+               dump_cqe(TRACE_LEVEL_INFORMATION, dev, cqe);\r
+       }\r
+\r
+\r
+       /*\r
+        * For completions in error, only work request ID, status, vendor error\r
+        * (and freed resource count for RD) have to be set.\r
+        */\r
+       switch (cqe->syndrome) {\r
+       case SYNDROME_LOCAL_LENGTH_ERR:\r
+               entry->status = IB_WCS_LOCAL_LEN_ERR;\r
+               break;\r
+       case SYNDROME_LOCAL_QP_OP_ERR:\r
+               entry->status = IB_WCS_LOCAL_OP_ERR;\r
+               break;\r
+       case SYNDROME_LOCAL_PROT_ERR:\r
+               entry->status = IB_WCS_LOCAL_PROTECTION_ERR;\r
+               break;\r
+       case SYNDROME_WR_FLUSH_ERR:\r
+               entry->status = IB_WCS_WR_FLUSHED_ERR;\r
+               break;\r
+       case SYNDROME_MW_BIND_ERR:\r
+               entry->status = IB_WCS_MEM_WINDOW_BIND_ERR;\r
+               break;\r
+       case SYNDROME_BAD_RESP_ERR:\r
+               entry->status = IB_WCS_BAD_RESP_ERR;\r
+               break;\r
+       case SYNDROME_LOCAL_ACCESS_ERR:\r
+               entry->status = IB_WCS_LOCAL_ACCESS_ERR;\r
+               break;\r
+       case SYNDROME_REMOTE_INVAL_REQ_ERR:\r
+               entry->status = IB_WCS_REM_INVALID_REQ_ERR;\r
+               break;\r
+       case SYNDROME_REMOTE_ACCESS_ERR:\r
+               entry->status = IB_WCS_REM_ACCESS_ERR;\r
+               break;\r
+       case SYNDROME_REMOTE_OP_ERR:\r
+               entry->status = IB_WCS_REM_OP_ERR;\r
+               break;\r
+       case SYNDROME_RETRY_EXC_ERR:\r
+               entry->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
+               break;\r
+       case SYNDROME_RNR_RETRY_EXC_ERR:\r
+               entry->status = IB_WCS_RNR_RETRY_ERR;\r
+               break;\r
+       case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:\r
+               entry->status = IB_WCS_REM_INVALID_REQ_ERR;\r
+               break;\r
+       case SYNDROME_REMOTE_ABORTED_ERR:\r
+       case SYNDROME_LOCAL_EEC_OP_ERR:\r
+       case SYNDROME_LOCAL_RDD_VIOL_ERR:\r
+       case SYNDROME_INVAL_EECN_ERR:\r
+       case SYNDROME_INVAL_EEC_STATE_ERR:\r
+       default:\r
+               entry->status = IB_WCS_GENERAL_ERR;\r
+               break;\r
+       }\r
+\r
+       entry->vendor_specific = cqe->vendor_err;\r
+       \r
+       /*\r
+        * Mem-free HCAs always generate one CQE per WQE, even in the\r
+        * error case, so we don't have to check the doorbell count, etc.\r
+        */\r
+       if (mthca_is_memfree(dev))\r
+               return;\r
+\r
+       mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);\r
+\r
+       /*\r
+        * If we're at the end of the WQE chain, or we've used up our\r
+        * doorbell count, free the CQE.  Otherwise just update it for\r
+        * the next poll operation.\r
+        */\r
+       if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))\r
+               return;\r
+\r
+       cqe->db_cnt   = cl_hton16(cl_ntoh16(cqe->db_cnt) - (u16)dbd);\r
+       cqe->wqe      = new_wqe;\r
+       cqe->syndrome = SYNDROME_WR_FLUSH_ERR;\r
+\r
+       *free_cqe = 0;\r
+}\r
+\r
+static inline int mthca_poll_one(struct mthca_dev *dev,\r
+                                struct mthca_cq *cq,\r
+                                struct mthca_qp **cur_qp,\r
+                                int *freed,\r
+                                struct _ib_wc *entry)\r
+{\r
+       struct mthca_wq *wq;\r
+       struct mthca_cqe *cqe;\r
+       unsigned  wqe_index;\r
+       int is_error;\r
+       int is_send;\r
+       int free_cqe = 1;\r
+       int err = 0;\r
+\r
+       HCA_ENTER(HCA_DBG_CQ);\r
+       cqe = next_cqe_sw(cq);\r
+       if (!cqe)\r
+               return -EAGAIN;\r
+\r
+       /*\r
+        * Make sure we read CQ entry contents after we've checked the\r
+        * ownership bit.\r
+        */\r
+       rmb();\r
+\r
+       { // debug print\r
+               HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_CQ,("CQ: 0x%06x/%d: CQE -> QPN 0x%06x, WQE @ 0x%08x\n",\r
+                         cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn),\r
+                         cl_ntoh32(cqe->wqe)));\r
+               dump_cqe(TRACE_LEVEL_VERBOSE, dev, cqe);\r
+       }\r
+\r
+       is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==\r
+               MTHCA_ERROR_CQE_OPCODE_MASK;\r
+       is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;\r
+\r
+       if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->qpn) {\r
+               /*\r
+                * We do not have to take the QP table lock here,\r
+                * because CQs will be locked while QPs are removed\r
+                * from the table.\r
+                */\r
+               *cur_qp = mthca_array_get(&dev->qp_table.qp,\r
+                                         cl_ntoh32(cqe->my_qpn) &\r
+                                         (dev->limits.num_qps - 1));\r
+               if (!*cur_qp) {\r
+                       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_CQ, ("CQ entry for unknown QP %06x\n",\r
+                                  cl_ntoh32(cqe->my_qpn) & 0xffffff));\r
+                       err = -EINVAL;\r
+                       goto out;\r
+               }\r
+       }\r
+\r
+       if (is_send) {\r
+               wq = &(*cur_qp)->sq;\r
+               wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset)\r
+                            >> wq->wqe_shift);\r
+               entry->wr_id = (*cur_qp)->wrid[wqe_index +\r
+                                              (*cur_qp)->rq.max];\r
+       } else if ((*cur_qp)->ibqp.srq) {\r
+               struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);\r
+               u32 wqe = cl_ntoh32(cqe->wqe);\r
+               wq = NULL;\r
+               wqe_index = wqe >> srq->wqe_shift;\r
+               entry->wr_id = srq->wrid[wqe_index];\r
+               mthca_free_srq_wqe(srq, wqe);\r
+       } else {\r
+               wq = &(*cur_qp)->rq;\r
+               wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift;\r
+               entry->wr_id = (*cur_qp)->wrid[wqe_index];\r
+       }\r
+\r
+       if (wq) {\r
+               if (wq->last_comp < wqe_index)\r
+                       wq->tail += wqe_index - wq->last_comp;\r
+               else\r
+                       wq->tail += wqe_index + wq->max - wq->last_comp;\r
+\r
+               wq->last_comp = wqe_index;\r
+       }\r
+\r
+       if (is_send) {\r
+               entry->recv.ud.recv_opt = 0;\r
+               switch (cqe->opcode) {\r
+               case MTHCA_OPCODE_RDMA_WRITE:\r
+                       entry->wc_type    = IB_WC_RDMA_WRITE;\r
+                       break;\r
+               case MTHCA_OPCODE_RDMA_WRITE_IMM:\r
+                       entry->wc_type    = IB_WC_RDMA_WRITE;\r
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
+                       break;\r
+               case MTHCA_OPCODE_SEND:\r
+                       entry->wc_type    = IB_WC_SEND;\r
+                       break;\r
+               case MTHCA_OPCODE_SEND_IMM:\r
+                       entry->wc_type    = IB_WC_SEND;\r
+                       entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
+                       break;\r
+               case MTHCA_OPCODE_RDMA_READ:\r
+                       entry->wc_type    = IB_WC_RDMA_READ;\r
+                       entry->length  = cl_ntoh32(cqe->byte_cnt);\r
+                       break;\r
+               case MTHCA_OPCODE_ATOMIC_CS:\r
+                       entry->wc_type    = IB_WC_COMPARE_SWAP;\r
+                       entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL;\r
+                       break;\r
+               case MTHCA_OPCODE_ATOMIC_FA:\r
+                       entry->wc_type    = IB_WC_FETCH_ADD;\r
+                       entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL;\r
+                       break;\r
+               case MTHCA_OPCODE_BIND_MW:\r
+                       entry->wc_type    = IB_WC_MW_BIND;\r
+                       break;\r
+               default:\r
+                       entry->wc_type    = IB_WC_SEND;\r
+                       break;\r
+               }\r
+       } else {\r
+               entry->length = cl_ntoh32(cqe->byte_cnt);\r
+               switch (cqe->opcode & 0x1f) {\r
+               case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:\r
+               case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:\r
+                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;\r
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;\r
+                       entry->wc_type = IB_WC_RECV;\r
+                       break;\r
+               case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:\r
+               case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:\r
+                       entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE;\r
+                       entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec;\r
+                       entry->wc_type = IB_WC_RECV_RDMA_WRITE;\r
+                       break;\r
+               default:\r
+                       entry->recv.ud.recv_opt = 0;\r
+                       entry->wc_type = IB_WC_RECV;\r
+                       break;\r
+               }\r
+               entry->recv.ud.remote_lid          = cqe->rlid;\r
+               entry->recv.ud.remote_qp           = cqe->rqpn & 0xffffff00;\r
+               entry->recv.ud.pkey_index  = (u16)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16);\r
+               entry->recv.ud.remote_sl           = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) >> 12);\r
+               entry->recv.ud.path_bits = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) & 0x7f);\r
+               entry->recv.ud.recv_opt   |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ?\r
+                                       IB_RECV_OPT_GRH_VALID : 0;\r
+       }\r
+       if (!is_send && cqe->rlid == 0){\r
+               HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("found rlid == 0 \n "));\r
+               entry->recv.ud.recv_opt   |= IB_RECV_OPT_FORWARD;\r
+\r
+       }\r
+       if (is_error) {\r
+               handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,\r
+                       (struct mthca_err_cqe *) cqe, entry, &free_cqe);\r
+       }\r
+       else\r
+               entry->status = IB_WCS_SUCCESS;\r
+\r
+ out:\r
+       if (likely(free_cqe)) {\r
+               set_cqe_hw(cqe);\r
+               ++(*freed);\r
+               ++cq->cons_index;\r
+       }\r
+       HCA_EXIT(HCA_DBG_CQ);\r
+       return err;\r
+}\r
+\r
+int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,\r
+                 struct _ib_wc *entry)\r
+{\r
+       struct mthca_dev *dev = to_mdev(ibcq->device);\r
+       struct mthca_cq *cq = to_mcq(ibcq);\r
+       struct mthca_qp *qp = NULL;\r
+       int err = 0;\r
+       int freed = 0;\r
+       int npolled;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       spin_lock_irqsave(&cq->lock, &lh);\r
+\r
+       for (npolled = 0; npolled < num_entries; ++npolled) {\r
+               err = mthca_poll_one(dev, cq, &qp,\r
+                                    &freed, entry + npolled);\r
+               if (err)\r
+                       break;\r
+       }\r
+\r
+       if (freed) {\r
+               wmb();\r
+               update_cons_index(dev, cq, freed);\r
+       }\r
+\r
+       spin_unlock_irqrestore(&lh);\r
+\r
+       return (err == 0 || err == -EAGAIN) ? npolled : err;\r
+}\r
+\r
+int mthca_poll_cq_list(\r
+       IN              struct ib_cq *ibcq, \r
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,\r
+               OUT                     ib_wc_t** const                         pp_done_wclist )\r
+{\r
+       struct mthca_dev *dev = to_mdev(ibcq->device);\r
+       struct mthca_cq *cq = to_mcq(ibcq);\r
+       struct mthca_qp *qp = NULL;\r
+       int err = 0;\r
+       int freed = 0;\r
+       ib_wc_t         *wc_p, **next_pp;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       HCA_ENTER(HCA_DBG_CQ);\r
+\r
+       spin_lock_irqsave(&cq->lock, &lh);\r
+\r
+       // loop through CQ\r
+       next_pp = pp_done_wclist;\r
+       wc_p = *pp_free_wclist;\r
+       while( wc_p ) {\r
+               // poll one CQE\r
+               err = mthca_poll_one(dev, cq, &qp, &freed, wc_p);\r
+               if (err)\r
+                       break;\r
+\r
+               // prepare for the next loop\r
+               *next_pp = wc_p;\r
+               next_pp = &wc_p->p_next;\r
+               wc_p = wc_p->p_next;\r
+       }\r
+\r
+       // prepare the results\r
+       *pp_free_wclist = wc_p;         /* Set the head of the free list. */\r
+       *next_pp = NULL;                                                /* Clear the tail of the done list. */\r
+\r
+       // update consumer index\r
+       if (freed) {\r
+               wmb();\r
+               update_cons_index(dev, cq, freed);\r
+       }\r
+\r
+       spin_unlock_irqrestore(&lh);\r
+       HCA_EXIT(HCA_DBG_CQ);\r
+       return (err == 0 || err == -EAGAIN)? 0 : err;\r
+}\r
+\r
+\r
+int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)\r
+{\r
+       __be32 doorbell[2];\r
+\r
+       doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ?\r
+                                  MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :\r
+                                  MTHCA_TAVOR_CQ_DB_REQ_NOT)      |\r
+                                 to_mcq(cq)->cqn);\r
+       doorbell[1] = (__be32) 0xffffffff;\r
+\r
+       mthca_write64(doorbell,\r
+                     to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,\r
+                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));\r
+\r
+       return 0;\r
+}\r
+\r
+int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)\r
+{\r
+       struct mthca_cq *cq = to_mcq(ibcq);\r
+       __be32 doorbell[2];\r
+       u32 sn;\r
+       __be32 ci;\r
+\r
+       sn = cq->arm_sn & 3;\r
+       ci = cl_hton32(cq->cons_index);\r
+\r
+       doorbell[0] = ci;\r
+       doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) |\r
+                                 (notify == IB_CQ_SOLICITED ? 1 : 2));\r
+\r
+       mthca_write_db_rec(doorbell, cq->arm_db);\r
+\r
+       /*\r
+        * Make sure that the doorbell record in host memory is\r
+        * written before ringing the doorbell via PCI MMIO.\r
+        */\r
+       wmb();\r
+\r
+       doorbell[0] = cl_hton32((sn << 28)                       |\r
+                                 (notify == IB_CQ_SOLICITED ?\r
+                                  MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :\r
+                                  MTHCA_ARBEL_CQ_DB_REQ_NOT)      |\r
+                                 cq->cqn);\r
+       doorbell[1] = ci;\r
+\r
+       mthca_write64(doorbell,\r
+                     to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,\r
+                     MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));\r
+\r
+       return 0;\r
+}\r
+\r
+static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)\r
+{\r
+       mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,\r
+                      &cq->queue, cq->is_direct, &cq->mr);\r
+}\r
+\r
+int mthca_init_cq(struct mthca_dev *dev, int nent,\r
+                 struct mthca_ucontext *ctx, u32 pdn,\r
+                 struct mthca_cq *cq)\r
+{\r
+       int size = NEXT_PAGE_ALIGN(nent * MTHCA_CQ_ENTRY_SIZE );\r
+       struct mthca_mailbox *mailbox;\r
+       struct mthca_cq_context *cq_context;\r
+       int err = -ENOMEM;\r
+       u8 status;\r
+       int i;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       cq->ibcq.cqe  = nent - 1;\r
+       cq->is_kernel = !ctx;\r
+\r
+       cq->cqn = mthca_alloc(&dev->cq_table.alloc);\r
+       if (cq->cqn == -1)\r
+               return -ENOMEM;\r
+\r
+       if (mthca_is_memfree(dev)) {\r
+               err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);\r
+               if (err)\r
+                       goto err_out;\r
+\r
+               if (cq->is_kernel) {\r
+                       cq->arm_sn = 1;\r
+\r
+                       err = -ENOMEM;\r
+\r
+                       cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,\r
+                                                            cq->cqn, &cq->set_ci_db);\r
+                       if (cq->set_ci_db_index < 0)\r
+                               goto err_out_icm;\r
+\r
+                       cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,\r
+                                                         cq->cqn, &cq->arm_db);\r
+                       if (cq->arm_db_index < 0)\r
+                               goto err_out_ci;\r
+               }\r
+       }\r
+\r
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);\r
+       if (IS_ERR(mailbox))\r
+               goto err_out_arm;\r
+\r
+       cq_context = mailbox->buf;\r
+\r
+       if (cq->is_kernel) {\r
+               err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,\r
+                                     &cq->queue, &cq->is_direct,\r
+                                     &dev->driver_pd, 1, &cq->mr);\r
+               if (err)\r
+                       goto err_out_mailbox;\r
+\r
+               for (i = 0; i < nent; ++i)\r
+                       set_cqe_hw(get_cqe(cq, i));\r
+       }\r
+\r
+       spin_lock_init(&cq->lock);\r
+       atomic_set(&cq->refcount, 1);\r
+       init_waitqueue_head(&cq->wait);\r
+       KeInitializeMutex(&cq->mutex, 0);\r
+\r
+       RtlZeroMemory(cq_context, sizeof *cq_context);\r
+       cq_context->flags           = cl_hton32(MTHCA_CQ_STATUS_OK      |\r
+                                                 MTHCA_CQ_STATE_DISARMED |\r
+                                                 MTHCA_CQ_FLAG_TR);\r
+       cq_context->logsize_usrpage = cl_hton32((ffs(nent) - 1) << 24);\r
+       if (ctx)\r
+               cq_context->logsize_usrpage |= cl_hton32(ctx->uar.index);\r
+       else\r
+               cq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index);\r
+       cq_context->error_eqn       = cl_hton32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);\r
+       cq_context->comp_eqn        = cl_hton32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);\r
+       cq_context->pd              = cl_hton32(pdn);\r
+       cq_context->lkey            = cl_hton32(cq->mr.ibmr.lkey);\r
+       cq_context->cqn             = cl_hton32(cq->cqn);\r
+\r
+       if (mthca_is_memfree(dev)) {\r
+               cq_context->ci_db    = cl_hton32(cq->set_ci_db_index);\r
+               cq_context->state_db = cl_hton32(cq->arm_db_index);\r
+       }\r
+\r
+       err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("SW2HW_CQ failed (%d)\n", err));\r
+               goto err_out_free_mr;\r
+       }\r
+\r
+       if (status) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_CQ returned status 0x%02x\n",\r
+                          status));\r
+               err = -EINVAL;\r
+               goto err_out_free_mr;\r
+       }\r
+\r
+       spin_lock_irq(&dev->cq_table.lock, &lh);\r
+       if (mthca_array_set(&dev->cq_table.cq,\r
+                           cq->cqn & (dev->limits.num_cqs - 1),\r
+                           cq)) {\r
+               spin_unlock_irq(&lh);\r
+               goto err_out_free_mr;\r
+       }\r
+       spin_unlock_irq(&lh);\r
+\r
+       cq->cons_index = 0;\r
+\r
+       mthca_free_mailbox(dev, mailbox);\r
+\r
+       return 0;\r
+\r
+err_out_free_mr:\r
+       if (cq->is_kernel)\r
+               mthca_free_cq_buf(dev, cq);\r
+\r
+err_out_mailbox:\r
+       mthca_free_mailbox(dev, mailbox);\r
+\r
+err_out_arm:\r
+       if (cq->is_kernel && mthca_is_memfree(dev))\r
+               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);\r
+\r
+err_out_ci:\r
+       if (cq->is_kernel && mthca_is_memfree(dev))\r
+               mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);\r
+\r
+err_out_icm:\r
+       mthca_table_put(dev, dev->cq_table.table, cq->cqn);\r
+\r
+err_out:\r
+       mthca_free(&dev->cq_table.alloc, cq->cqn);\r
+\r
+       return err;\r
+}\r
+\r
+void mthca_free_cq(struct mthca_dev *dev,\r
+                  struct mthca_cq *cq)\r
+{\r
+       struct mthca_mailbox *mailbox;\r
+       int err;\r
+       u8 status;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);\r
+       if (IS_ERR(mailbox)) {\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("No memory for mailbox to free CQ.\n"));\r
+               return;\r
+       }\r
+\r
+       err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);\r
+       if (err){\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ failed (%d)\n", err));\r
+       }\r
+       else if (status){\r
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_CQ returned status 0x%02x\n", status));\r
+       }\r
+       { // debug print\r
+               __be32 *ctx = mailbox->buf;\r
+               int j;\r
+               UNUSED_PARAM_WOWPP(ctx);\r
+               UNUSED_PARAM_WOWPP(j);\r
+\r
+               HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("context for CQN %x (cons index %x, next sw %d)\n",\r
+                      cq->cqn, cq->cons_index,\r
+                      cq->is_kernel ? !!next_cqe_sw(cq) : 0));\r
+               for (j = 0; j < 16; ++j)\r
+                       HCA_PRINT(TRACE_LEVEL_VERBOSE   ,HCA_DBG_LOW   ,("[%2x] %08x\n", j * 4, cl_ntoh32(ctx[j])));\r
+       }\r
+       spin_lock_irq(&dev->cq_table.lock, &lh);\r
+       mthca_array_clear(&dev->cq_table.cq,\r
+                         cq->cqn & (dev->limits.num_cqs - 1));\r
+       spin_unlock_irq(&lh);\r
+\r
+       /* wait for all RUNNING DPCs on that EQ to complete */\r
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);\r
+       KeFlushQueuedDpcs();\r
+\r
+       atomic_dec(&cq->refcount);\r
+       wait_event(&cq->wait, !atomic_read(&cq->refcount));\r
+\r
+       if (cq->is_kernel) {\r
+               mthca_free_cq_buf(dev, cq);\r
+               if (mthca_is_memfree(dev)) {\r
+                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);\r
+                       mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);\r
+               }\r
+       }\r
+\r
+       mthca_table_put(dev, dev->cq_table.table, cq->cqn);\r
+       mthca_free(&dev->cq_table.alloc, cq->cqn);\r
+       mthca_free_mailbox(dev, mailbox);\r
+}\r
+\r
+int mthca_init_cq_table(struct mthca_dev *dev)\r
+{\r
+       int err;\r
+\r
+       spin_lock_init(&dev->cq_table.lock);\r
+\r
+       err = mthca_alloc_init(&dev->cq_table.alloc,\r
+                              dev->limits.num_cqs,\r
+                              (1 << 24) - 1,\r
+                              dev->limits.reserved_cqs);\r
+       if (err)\r
+               return err;\r
+\r
+       err = mthca_array_init(&dev->cq_table.cq,\r
+                              dev->limits.num_cqs);\r
+       if (err)\r
+               mthca_alloc_cleanup(&dev->cq_table.alloc);\r
+\r
+       return err;\r
+}\r
+\r
+void mthca_cleanup_cq_table(struct mthca_dev *dev)\r
+{\r
+       mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);\r
+       mthca_alloc_cleanup(&dev->cq_table.alloc);\r
+}\r
+\r
+\r
index 0917bbbc534525aa14da08775f8d8824e7dabd14..ca5db21ff00fc399f8b6d715b2489daa17f86969 100644 (file)
@@ -189,7 +189,6 @@ struct mthca_ah {
 \r
 struct mthca_cq {\r
        struct ib_cq           ibcq;\r
-       void                                            *cq_context;    // leo: for IBAL shim\r
        spinlock_t             lock;\r
        atomic_t               refcount;\r
        int                    cqn;\r
@@ -234,7 +233,6 @@ struct mthca_srq {
 \r
        wait_queue_head_t       wait;\r
        KMUTEX                  mutex;\r
-       void                            *srq_context;   \r
 };\r
 \r
 struct mthca_wq {\r
@@ -254,7 +252,6 @@ struct mthca_wq {
 \r
 struct mthca_qp {\r
        struct ib_qp           ibqp;\r
-       void                                            *qp_context;    // leo: for IBAL shim\r
        //TODO: added just because absense of ibv_query_qp\r
        // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;\r
        struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp\r
index 481cb9c1354074491a1e590ff2705faf60a30955..cb887b7a780736cc2e0ae063d0fccf20d5e57d68 100644 (file)
@@ -196,6 +196,7 @@ typedef struct _arp_pkt
 #define IP_PROT_IP                     4\r
 #define IP_PROT_TCP                    6\r
 #define IP_PROT_UDP                    17\r
+#define IP_PROT_IGMP           2\r
 \r
 \r
 #include <complib/cl_packon.h>\r
@@ -355,6 +356,55 @@ typedef struct _udp_hdr
 *********/\r
 #include <complib/cl_packoff.h>\r
 \r
+#define IGMP_V2_MEMBERSHIP_QUERY       0x11\r
+#define IGMP_V2_MEMBERSHIP_REPORT      0x16\r
+#define IGMP_V1_MEMBERSHIP_REPORT      0x12    // for backward compatibility with IGMPv1\r
+#define IGMP_V2_LEAVE_GROUP                    0x17\r
+#include <complib/cl_packon.h>\r
+/****s* IB Network Drivers/igmp__v2_hdr_t\r
+* NAME\r
+*      igmp_v2_hdr_t\r
+*\r
+* DESCRIPTION\r
+*      Defines the IGMPv2 header for IP packets.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _igmp_v2_hdr\r
+{\r
+       uint8_t         type;\r
+       uint8_t         max_resp_time;\r
+       net16_t         chksum;\r
+       net32_t         group_address;\r
+}      PACK_SUFFIX igmp_v2_hdr_t;\r
+/*\r
+* FIELDS\r
+*      type\r
+*              type of IGMPv2 message: query/report/leave\r
+*\r
+*      max_resp_time\r
+*              The Max Response Time field is meaningful only in Membership Query\r
+*              messages, and specifies the maximum allowed time before sending a\r
+*              responding report in units of 1/10 second.  In all other messages, it\r
+*              is set to zero by the sender and ignored by receivers.\r
+*\r
+*      checksum\r
+*              The checksum is the 16-bit one's complement of the one's complement\r
+*      sum of the whole IGMP message (the entire IP payload).  \r
+*\r
+*      group_address\r
+*              In a Membership Query message, the group address field is set to zero\r
+*       when sending a General Query, and set to the group address being\r
+*       queried when sending a Group-Specific Query.\r
+*\r
+*       In a Membership Report or Leave Group message, the group address\r
+*       field holds the IP multicast group address of the group being\r
+*       reported or left.\r
+*\r
+* SEE ALSO\r
+*      IB Network Drivers, eth_hdr_t, arp_pkt_t, ip_hdr_t, tcp_hdr_t\r
+*********/\r
+#include <complib/cl_packoff.h>\r
 \r
 #define DHCP_PORT_SERVER               CL_HTON16(67)\r
 #define DHCP_PORT_CLIENT               CL_HTON16(68)\r
index c7d3e35d52092c6b0b71e9c51c51bf4349aaffc2..a563a2742c15bca430ae13edd8989c38bacc8b1b 100644 (file)
@@ -762,8 +762,13 @@ ipoib_refresh_mcast(
 \r
                        if( j != p_adapter->mcast_array_size )\r
                                continue;\r
-\r
-                       ipoib_port_join_mcast( p_port, p_mac_array[i] ,IB_MC_REC_STATE_FULL_MEMBER);\r
+                       if ( ( p_mac_array[i].addr[0] == 1 && p_mac_array[i].addr[1] == 0 && p_mac_array[i].addr[2] == 0x5e &&\r
+                                  p_mac_array[i].addr[3] == 0 && p_mac_array[i].addr[4] == 0 && p_mac_array[i].addr[5] == 1 ) ||\r
+                                 !( p_mac_array[i].addr[0] == 1 && p_mac_array[i].addr[1] == 0 && p_mac_array[i].addr[2] == 0x5e )\r
+                               )\r
+                       {\r
+                               ipoib_port_join_mcast( p_port, p_mac_array[i], IB_MC_REC_STATE_FULL_MEMBER );\r
+                       }\r
                }\r
        }\r
 \r
index b895b5f966d07543b1abd154dcb93b54d19c3d7c..c23a342a3bb56fff7045a0e4adda49bb6514d0a9 100644 (file)
@@ -75,7 +75,7 @@ typedef struct _ipoib_params
        uint32_t        payload_mtu;\r
        uint32_t        xfer_block_size;\r
        mac_addr_t      conf_mac;\r
-\r
+       uint32_t        mc_leave_rescan;\r
 }      ipoib_params_t;\r
 /*\r
 * FIELDS\r
index c8f34310516e3f994fec86748b0279cd87dedb90..3d1056fff933a061de1fce2dd5991b865eec9f98 100644 (file)
@@ -153,7 +153,8 @@ IPOIB_REG_ENTRY HCARegTable[] = {
        {NDIS_STRING_CONST("SaTimeout"),        1, IPOIB_OFFSET(sa_timeout),            IPOIB_SIZE(sa_timeout),         1000,       250,    UINT_MAX},\r
        {NDIS_STRING_CONST("SaRetries"),        1, IPOIB_OFFSET(sa_retry_cnt),          IPOIB_SIZE(sa_retry_cnt),       10,         1,      UINT_MAX},\r
        {NDIS_STRING_CONST("RecvRatio"),        1, IPOIB_OFFSET(recv_pool_ratio),       IPOIB_SIZE(recv_pool_ratio),    1,          1,      10},\r
-       {NDIS_STRING_CONST("PayloadMtu"),       1, IPOIB_OFFSET(payload_mtu),           IPOIB_SIZE(payload_mtu),        2044,         60,   4092}\r
+       {NDIS_STRING_CONST("PayloadMtu"),       1, IPOIB_OFFSET(payload_mtu),           IPOIB_SIZE(payload_mtu),        2044,       60,   4092},\r
+       {NDIS_STRING_CONST("MCLeaveRescan"),    1, IPOIB_OFFSET(mc_leave_rescan),       IPOIB_SIZE(mc_leave_rescan),    260,        1,    3600}\r
 };  \r
 \r
 #define IPOIB_NUM_REG_PARAMS (sizeof (HCARegTable) / sizeof(IPOIB_REG_ENTRY))\r
index 6887fd697f46503420f7a879798f7096d63b872c..30d1edf07c5e675e8f0a606323f44223313897e1 100644 (file)
@@ -62,7 +62,8 @@ typedef struct _ipoib_endpt
        ib_av_handle_t                  h_av;\r
        boolean_t                               expired;\r
        ib_al_ifc_t                             *p_ifc;\r
-\r
+       boolean_t                       is_in_use;\r
+       boolean_t                               is_mcast_listener;\r
 }      ipoib_endpt_t;\r
 /*\r
 * FIELDS\r
index 1e0f94698496f0d8b8236f54f47c3f9f8813e16c..93bb19417e1843103b7b655041810df4135b525c 100644 (file)
@@ -67,7 +67,8 @@ ib_gid_t      bcast_mgid_template = {
 ipoib_port_t   *gp_ipoib_port;\r
 #endif\r
 \r
-\r
+static void __port_mcast_garbage_dpc(KDPC *p_gc_dpc,void *context,void *s_arg1, void *s_arg2);\r
+static void __port_do_mcast_garbage(ipoib_port_t* const        p_port );\r
 /******************************************************************************\r
 *\r
 * Declarations\r
@@ -290,6 +291,14 @@ __send_mgr_filter_ip(
        IN                              size_t                                          buf_len,\r
        IN      OUT                     ipoib_send_desc_t* const        p_desc );\r
 \r
+static NDIS_STATUS\r
+__send_mgr_filter_igmp_v2(\r
+       IN                              ipoib_port_t* const                     p_port,\r
+    IN         const   ip_hdr_t* const                         p_ip_hdr,\r
+       IN                              size_t                                          iph_options_size,\r
+       IN                              NDIS_BUFFER*                            p_buf,\r
+       IN                              size_t                                          buf_len );\r
+\r
 static NDIS_STATUS\r
 __send_mgr_filter_udp(\r
        IN                              ipoib_port_t* const                     p_port,\r
@@ -490,6 +499,13 @@ inline void ipoib_port_deref(ipoib_port_t * p_port, int type)
 #endif\r
 }\r
 \r
+/* function returns pointer to payload that is going after IP header.\r
+*  asssuming that payload and IP header are in the same buffer\r
+*/\r
+static void* GetIpPayloadPtr(const     ip_hdr_t* const p_ip_hdr)\r
+{\r
+       return (void*)((uint8_t*)p_ip_hdr + 4*(p_ip_hdr->ver_hl & 0xf));\r
+}\r
 \r
 /******************************************************************************\r
 *\r
@@ -653,6 +669,9 @@ __port_init(
                        p_adapter->p_ifc->get_err_str( status )) );\r
                return status;\r
        }\r
+        /* Initialize multicast garbage collector timer and DPC object */\r
+        KeInitializeDpc(&p_port->gc_dpc,(PKDEFERRED_ROUTINE)__port_mcast_garbage_dpc,p_port);\r
+        KeInitializeTimerEx(&p_port->gc_timer,SynchronizationTimer);\r
 \r
        /* We only ever destroy from the PnP callback thread. */\r
        cl_status = cl_obj_init( &p_port->obj, CL_DESTROY_SYNC,\r
@@ -748,6 +767,8 @@ __port_free(
 \r
        p_port = PARENT_STRUCT( p_obj, ipoib_port_t, obj );\r
 \r
+       KeCancelTimer(&p_port->gc_timer);\r
+       KeFlushQueuedDpcs();\r
        __endpt_mgr_destroy( p_port );\r
        __recv_mgr_destroy( p_port );\r
        __send_mgr_destroy( p_port );\r
@@ -2133,6 +2154,9 @@ __recv_gen(
        p_eth->hdr.src = p_src->mac;\r
        p_eth->hdr.dst = p_dst->mac;\r
 \r
+       if (p_dst->h_mcast) {\r
+               p_dst->is_in_use = TRUE;\r
+       }\r
        IPOIB_EXIT( IPOIB_DBG_RECV );\r
        return IB_SUCCESS;\r
 }\r
@@ -3106,6 +3130,26 @@ __send_mgr_filter_ip(
        if( p_ip_hdr->offset ||\r
                p_ip_hdr->prot != IP_PROT_UDP )\r
        {\r
+               /* Check if this packet is IGMP */\r
+               if ( p_ip_hdr->prot == IP_PROT_IGMP ) \r
+               {\r
+                       /*\r
+                           In igmp packet I saw that iph arrive in 2 NDIS_BUFFERs:\r
+                               1. iph\r
+                               2. ip options\r
+                               So to get the IGMP packet we need to skip the ip options NDIS_BUFFER\r
+                       */\r
+                       size_t iph_size_in_bytes = (p_ip_hdr->ver_hl & 0xf) * 4;\r
+                       size_t iph_options_size = iph_size_in_bytes - buf_len;\r
+                       buf_len -= sizeof(ip_hdr_t);//without ipheader\r
+\r
+                       /*\r
+                           Could be a case that arrived igmp packet not from type IGMPv2 ,\r
+                               but IGMPv1 or IGMPv3.\r
+                               We anyway pass it to __send_mgr_filter_igmp_v2().\r
+                       */\r
+                       __send_mgr_filter_igmp_v2(p_port, p_ip_hdr, iph_options_size, p_buf, buf_len);\r
+               }\r
                /* Not a UDP packet. */\r
                cl_perf_start( SendTcp );\r
                status = __send_gen( p_port, p_desc );\r
@@ -3125,6 +3169,133 @@ __send_mgr_filter_ip(
        return status;\r
 }\r
 \r
+static NDIS_STATUS\r
+__send_mgr_filter_igmp_v2(\r
+       IN                              ipoib_port_t* const                     p_port,\r
+       IN              const   ip_hdr_t* const                         p_ip_hdr,\r
+       IN                              size_t                                          iph_options_size,\r
+       IN                              NDIS_BUFFER*                            p_buf,\r
+       IN                              size_t                                          buf_len )\r
+{\r
+       igmp_v2_hdr_t           *p_igmp_v2_hdr = NULL;\r
+       NDIS_STATUS                     endpt_status;\r
+       ipoib_endpt_t*          p_endpt = NULL;\r
+       mac_addr_t                      fake_mcast_mac;\r
+\r
+       IPOIB_ENTER( IPOIB_DBG_SEND );\r
+\r
+       IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST,\r
+                        ("buf_len = %d,iph_options_size = %d\n",(int)buf_len,(int)iph_options_size ) );\r
+\r
+       if( !buf_len )\r
+       {\r
+               // To get the IGMP packet we need to skip the ip options NDIS_BUFFER (if exists)\r
+               while ( iph_options_size )\r
+               {\r
+                       NdisGetNextBuffer( p_buf, &p_buf );\r
+                       if( !p_buf )\r
+                       {\r
+                               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                                       ("Failed to get IGMPv2 header buffer.\n") );\r
+                               return NDIS_STATUS_FAILURE;\r
+                       }\r
+                       NdisQueryBufferSafe( p_buf, &p_igmp_v2_hdr, &buf_len, NormalPagePriority );\r
+                       if( !p_igmp_v2_hdr )\r
+                       {\r
+                               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                                       ("Failed to query IGMPv2 header buffer.\n") );\r
+                               return NDIS_STATUS_FAILURE;\r
+                       }\r
+                       iph_options_size-=buf_len;\r
+               }\r
+        \r
+               NdisGetNextBuffer( p_buf, &p_buf );\r
+               if( !p_buf )\r
+               {\r
+                       IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                               ("Failed to get IGMPv2 header buffer.\n") );\r
+                       return NDIS_STATUS_FAILURE;\r
+               }\r
+               NdisQueryBufferSafe( p_buf, &p_igmp_v2_hdr, &buf_len, NormalPagePriority );\r
+               if( !p_igmp_v2_hdr )\r
+               {\r
+                       IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                               ("Failed to query IGMPv2 header buffer.\n") );\r
+                       return NDIS_STATUS_FAILURE;\r
+               }\r
+       }\r
+       else\r
+       {\r
+               /* assuming ip header and options are in the same packet */\r
+               p_igmp_v2_hdr = GetIpPayloadPtr(p_ip_hdr);\r
+       }\r
+       /* Get the IGMP header length. */\r
+       if( buf_len < sizeof(igmp_v2_hdr_t) )\r
+       {\r
+               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                       ("Buffer not large enough for IGMPv2 packet.\n") );\r
+               return NDIS_STATUS_BUFFER_TOO_SHORT;\r
+       }\r
+\r
+       // build fake mac from igmp packet group address\r
+       fake_mcast_mac.addr[0] = 1;\r
+       fake_mcast_mac.addr[1] = ((unsigned char*)&p_igmp_v2_hdr->group_address)[0] & 0x0f;\r
+       fake_mcast_mac.addr[2] = 0x5E;\r
+       fake_mcast_mac.addr[3] = ((unsigned char*)&p_igmp_v2_hdr->group_address)[1];\r
+       fake_mcast_mac.addr[4] = ((unsigned char*)&p_igmp_v2_hdr->group_address)[2];\r
+       fake_mcast_mac.addr[5] = ((unsigned char*)&p_igmp_v2_hdr->group_address)[3];\r
+\r
+       switch ( p_igmp_v2_hdr->type )\r
+       {\r
+       case IGMP_V2_MEMBERSHIP_REPORT:\r
+               /* \r
+                       This mean that some body open listener on this group \r
+                       Change type of mcast endpt to SEND_RECV endpt. So mcast garbage collector \r
+                       will not delete this mcast endpt.\r
+               */\r
+               IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST,\r
+                       ("Catched IGMP_V2_MEMBERSHIP_REPORT message\n") );\r
+               endpt_status = __endpt_mgr_ref( p_port, fake_mcast_mac, &p_endpt );\r
+               if ( p_endpt )\r
+               {\r
+                       cl_obj_lock( &p_port->obj );\r
+                       p_endpt->is_mcast_listener = TRUE;\r
+                       cl_obj_unlock( &p_port->obj );\r
+            ipoib_endpt_deref( p_endpt );\r
+               }\r
+               break;\r
+\r
+       case IGMP_V2_LEAVE_GROUP:\r
+               /* \r
+                       This mean that somebody CLOSE listener on this group .\r
+                   Change type of mcast endpt to SEND_ONLY endpt. So mcast \r
+                       garbage collector will delete this mcast endpt next time.\r
+               */\r
+               IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST,\r
+                            ("Catched IGMP_V2_LEAVE_GROUP message\n") );\r
+               endpt_status = __endpt_mgr_ref( p_port, fake_mcast_mac, &p_endpt );\r
+               if ( p_endpt )\r
+               {\r
+                       cl_obj_lock( &p_port->obj );\r
+                       p_endpt->is_mcast_listener = FALSE;\r
+                       p_endpt->is_in_use = FALSE;\r
+                       cl_obj_unlock( &p_port->obj );\r
+                       ipoib_endpt_deref( p_endpt );\r
+               }\r
+\r
+               __port_do_mcast_garbage(p_port);\r
+\r
+               break;\r
+\r
+       default:\r
+               IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST,\r
+                            ("Send Unknown IGMP message: 0x%x \n", p_igmp_v2_hdr->type ) );\r
+               break;\r
+       }\r
+\r
+       IPOIB_EXIT( IPOIB_DBG_SEND );\r
+       return NDIS_STATUS_SUCCESS;\r
+}\r
 \r
 static NDIS_STATUS\r
 __send_mgr_filter_udp(\r
@@ -3163,7 +3334,7 @@ __send_mgr_filter_udp(
        }\r
        else\r
        {\r
-               p_udp_hdr = (udp_hdr_t*)(p_ip_hdr + 1);\r
+               p_udp_hdr = (udp_hdr_t*)GetIpPayloadPtr(p_ip_hdr);\r
        }\r
        /* Get the UDP header and check the destination port numbers. */\r
        if( buf_len < sizeof(udp_hdr_t) )\r
@@ -3211,7 +3382,6 @@ __send_mgr_filter_udp(
        return status;\r
 }\r
 \r
-\r
 unsigned short ipchksum(unsigned short *ip, int len)\r
 {\r
     unsigned long sum = 0;\r
@@ -3597,6 +3767,14 @@ __send_mgr_queue(
                        return NDIS_STATUS_PENDING;\r
                }\r
        }\r
+       else if ( status == NDIS_STATUS_SUCCESS && \r
+                         ETH_IS_MULTICAST( p_eth_hdr->dst.addr ) &&  \r
+                         !ETH_IS_BROADCAST( p_eth_hdr->dst.addr ) )\r
+       {\r
+               CL_ASSERT( (*pp_endpt) );\r
+               CL_ASSERT((*pp_endpt)->h_mcast != NULL);\r
+               (*pp_endpt)->is_in_use = TRUE;\r
+       }\r
 \r
        IPOIB_EXIT( IPOIB_DBG_SEND );\r
        return status;\r
@@ -3775,6 +3953,44 @@ ipoib_port_send(
                }\r
 \r
                cl_perf_start( SendMgrQueue );\r
+\r
+               if ( ETH_IS_MULTICAST( p_eth_hdr->dst.addr ) && \r
+                        p_eth_hdr->type == ETH_PROT_TYPE_IP &&\r
+                        !ETH_IS_BROADCAST( p_eth_hdr->dst.addr ) ) \r
+               {\r
+                       ip_hdr_t                        *p_ip_hdr;\r
+                       NDIS_BUFFER                     *p_ip_hdr_buf;\r
+                       UINT                            ip_hdr_buf_len;\r
+\r
+                       // Extract the ip hdr \r
+                       NdisGetNextBuffer( p_buf, &p_ip_hdr_buf );\r
+                       if( !p_ip_hdr_buf )\r
+                       {\r
+                               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                                       ("Failed to get IP header buffer.\n") );\r
+                               goto h_end;\r
+                       }\r
+       \r
+                       NdisQueryBufferSafe( p_ip_hdr_buf, &p_ip_hdr, &ip_hdr_buf_len, NormalPagePriority );\r
+                       if( !p_ip_hdr )\r
+                       {\r
+                               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                                       ("Failed to query IP header buffer.\n") );\r
+                               goto h_end;\r
+                       }\r
+\r
+                       if( ip_hdr_buf_len < sizeof(ip_hdr_t) )\r
+                       {\r
+                               /* This buffer is done for.  Get the next buffer. */\r
+                               IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR,\r
+                                       ("Buffer too small for IP packet.\n") );\r
+                               goto h_end;\r
+                       }\r
+               \r
+                       p_eth_hdr->dst.addr[1] = ((unsigned char*)&p_ip_hdr->dst_ip)[0] & 0x0f;\r
+                       p_eth_hdr->dst.addr[3] = ((unsigned char*)&p_ip_hdr->dst_ip)[1];\r
+               }\r
+h_end:\r
                status = __send_mgr_queue( p_port, p_eth_hdr, &desc.p_endpt );\r
                cl_perf_stop( &p_port->p_adapter->perf, SendMgrQueue );\r
                if( status == NDIS_STATUS_PENDING )\r
@@ -4600,6 +4816,7 @@ __endpt_mgr_add_bcast(
                return IB_INSUFFICIENT_RESOURCES;\r
        }\r
        /* set reference to transport to be used while is not attached to the port */\r
+       p_endpt->is_mcast_listener = TRUE;\r
        p_endpt->p_ifc = p_port->p_adapter->p_ifc;\r
        status = ipoib_endpt_set_mcast( p_endpt, p_port->ib_mgr.h_pd,\r
                p_port->port_num, p_mcast_rec );\r
@@ -5251,6 +5468,10 @@ ipoib_port_down(
        KeWaitForSingleObject(\r
                &p_port->sa_event, Executive, KernelMode, FALSE, NULL );\r
 \r
+       /* garbage collector timer is not needed when link is down */\r
+       KeCancelTimer(&p_port->gc_timer);\r
+       KeFlushQueuedDpcs();\r
+\r
        /*\r
         * Put the QP in the error state.  This removes the need to\r
         * synchronize with send/receive callbacks.\r
@@ -5292,6 +5513,7 @@ __bcast_cb(
 {\r
        ipoib_port_t    *p_port;\r
        ib_api_status_t status;\r
+       LARGE_INTEGER   gc_due_time;\r
 \r
        IPOIB_ENTER( IPOIB_DBG_INIT );\r
 \r
@@ -5403,6 +5625,11 @@ err:
        /* Notify the adapter that we now have an active connection. */\r
        ipoib_set_active( p_port->p_adapter );\r
 \r
+       /* garbage collector timer is needed when link is active */\r
+       gc_due_time.QuadPart = -(int64_t)(((uint64_t)p_port->p_adapter->params.mc_leave_rescan * 2000000) * 10);\r
+       KeSetTimerEx(&p_port->gc_timer,gc_due_time,\r
+                           (LONG)p_port->p_adapter->params.mc_leave_rescan*1000,&p_port->gc_dpc);\r
+\r
        KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE );\r
        ipoib_port_deref( p_port, ref_join_bcast );\r
        IPOIB_EXIT( IPOIB_DBG_INIT );\r
@@ -5529,15 +5756,15 @@ ipoib_port_join_mcast(
        mcast_req.member_rec.mlid = 0;\r
        ib_member_set_state( &mcast_req.member_rec.scope_state,state);\r
 \r
-       if( mac.addr[0] == 1 && mac.addr[1] == 0 && mac.addr[2] == 0x5E )\r
+       if( (mac.addr[0] == 1) && (mac.addr[2] == 0x5E ))\r
        {\r
                /*\r
                 * Update the address portion of the MGID with the 28 lower bits of the\r
-                * IP address.  Since we're given a MAC address, we end up using only\r
-                * the 24 lower bits of that network-byte-ordered value (assuming MSb\r
-                * is zero).\r
+                * IP address.  Since we're given a MAC address, we are using \r
+                * 24 lower bits of that network-byte-ordered value (assuming MSb\r
+                * is zero) and 4 lsb bits of the first byte of IP address.\r
                 */\r
-               mcast_req.member_rec.mgid.raw[12] = 0;\r
+               mcast_req.member_rec.mgid.raw[12] = mac.addr[1];\r
                mcast_req.member_rec.mgid.raw[13] = mac.addr[3];\r
                mcast_req.member_rec.mgid.raw[14] = mac.addr[4];\r
                mcast_req.member_rec.mgid.raw[15] = mac.addr[5];\r
@@ -5695,6 +5922,8 @@ __mcast_cb(
                        &p_port->endpt_mgr.lid_endpts, p_endpt->dlid, &p_endpt->lid_item );\r
                CL_ASSERT( p_qitem == &p_endpt->lid_item );\r
        }\r
+       /* set flag that endpoint is use */\r
+       p_endpt->is_in_use = TRUE;\r
        cl_obj_unlock( &p_port->obj );\r
        \r
        /* Try to send all pending sends. */\r
@@ -5751,6 +5980,82 @@ __leave_error_mcast_cb(
        IPOIB_EXIT( IPOIB_DBG_MCAST );\r
 }\r
 \r
+static void __port_do_mcast_garbage(ipoib_port_t* const        p_port)\r
+{\r
+    const mac_addr_t DEFAULT_MCAST_GROUP = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};\r
+       /* Do garbage collecting... */\r
 \r
+       cl_map_item_t   *p_item;\r
+       ipoib_endpt_t   *p_endpt;\r
+       cl_qlist_t              destroy_mc_list;\r
+       uint8_t                 cnt;\r
+       const static GC_MAX_LEAVE_NUM = 80;\r
 \r
+       cl_qlist_init( &destroy_mc_list );\r
+\r
+       cl_obj_lock( &p_port->obj );\r
+       cnt = 0;\r
+       p_item = cl_qmap_head( &p_port->endpt_mgr.mac_endpts );\r
+       while( (p_item != cl_qmap_end( &p_port->endpt_mgr.mac_endpts )) && (cnt < GC_MAX_LEAVE_NUM))\r
+       {\r
+               p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item );\r
+               p_item = cl_qmap_next( p_item );\r
+\r
+               /* Check if the current endpoint is not a multicast listener */\r
+\r
+               if( p_endpt->h_mcast && \r
+                       (!p_endpt->is_mcast_listener) &&\r
+                       ( cl_memcmp( &p_endpt->mac, &DEFAULT_MCAST_GROUP, sizeof(mac_addr_t) ) &&\r
+                        (!p_endpt->is_in_use) ))\r
+               {\r
+                       cl_qmap_remove_item( &p_port->endpt_mgr.mac_endpts,\r
+                               &p_endpt->mac_item );\r
+                       cl_fmap_remove_item( &p_port->endpt_mgr.gid_endpts,\r
+                               &p_endpt->gid_item );\r
+\r
+                       if( p_endpt->dlid )\r
+                       {\r
+                               cl_qmap_remove_item( &p_port->endpt_mgr.lid_endpts,\r
+                                       &p_endpt->lid_item );\r
+                               p_endpt->dlid = 0;\r
+                       }\r
+\r
+                       cl_qlist_insert_tail(\r
+                               &destroy_mc_list, &p_endpt->mac_item.pool_item.list_item );\r
+                       cnt++;\r
+               }\r
+               else\r
+                       p_endpt->is_in_use = FALSE;\r
+       }\r
+       cl_obj_unlock( &p_port->obj );\r
+\r
+       /* Destroy all multicast endpoints now that we have released the lock. */\r
+       while( cl_qlist_count( &destroy_mc_list ) )\r
+       {\r
+               p_endpt = PARENT_STRUCT( cl_qlist_head( &destroy_mc_list ),\r
+                                                                ipoib_endpt_t, mac_item.pool_item.list_item );\r
+               IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT,\r
+                       ("mcast garbage collector: destroying endpoint %02x:%02x:%02x:%02x:%02x:%02x \n", \r
+                                p_endpt->mac.addr[0],\r
+                                p_endpt->mac.addr[1],\r
+                                p_endpt->mac.addr[2],\r
+                                p_endpt->mac.addr[3],\r
+                                p_endpt->mac.addr[4],\r
+                                p_endpt->mac.addr[5]) );\r
+\r
+               cl_obj_destroy( &PARENT_STRUCT( cl_qlist_remove_head( &destroy_mc_list ),\r
+                       ipoib_endpt_t, mac_item.pool_item.list_item )->obj );\r
+       }\r
+}\r
+\r
+static void __port_mcast_garbage_dpc(KDPC *p_gc_dpc,void *context,void *s_arg1, void *s_arg2)\r
+{\r
+       ipoib_port_t *p_port = context;\r
+\r
+       UNREFERENCED_PARAMETER(p_gc_dpc);\r
+       UNREFERENCED_PARAMETER(s_arg1);\r
+       UNREFERENCED_PARAMETER(s_arg2);\r
+\r
+       __port_do_mcast_garbage(p_port);\r
+}\r
 \r
index b312210c97ea6a22fd56bea6ea1be7c747195b26..1b7f10769288e61015133359e23fe09a343d84f6 100644 (file)
@@ -509,6 +509,8 @@ typedef struct _ipoib_port
 \r
        atomic32_t                              hdr_idx;\r
        uint16_t                                pkey_index;\r
+       KDPC                                    gc_dpc;\r
+       KTIMER                                  gc_timer;\r
        ipoib_hdr_t                             hdr[1]; /* Must be last! */\r
 \r
 }      ipoib_port_t;\r
index ef377b4a93269b5cf0abeb121622c96ee30b7130..7cf7b31263f95a786735e59fe872abd402d47614 100644 (file)
@@ -58,6 +58,7 @@ AddService = ipoib, 2, IpoibService, IpoibEventLog
 AddService = ipoib, 2, IpoibService, IpoibEventLog\r
 \r
 [IpoibAddReg]\r
+HKR, ,RDMACapable, %REG_DWORD%, 1\r
 HKR, Ndi,                       Service,    0, "ipoib"\r
 HKR, Ndi\Interfaces,            UpperRange, 0, "ndis5"\r
 HKR, Ndi\Interfaces,            LowerRange, 0, "ethernet"\r
@@ -126,6 +127,12 @@ HKR, Ndi\Params\PayloadMtu,                Default,        0, "2044"
 HKR, Ndi\Params\PayloadMtu,            Min,            0, "60"\r
 HKR, Ndi\Params\PayloadMtu,            Max,            0, "4092"\r
 \r
+HKR, Ndi\Params\MCLeaveRescan,         ParamDesc,      0, "MC leave rescan (sec)"\r
+HKR, Ndi\Params\MCLeaveRescan,         Type,           0, "dword"\r
+HKR, Ndi\Params\MCLeaveRescan,         Default,        0, "260"\r
+HKR, Ndi\Params\MCLeaveRescan,         Optional,       0, "0"\r
+HKR, Ndi\Params\MCLeaveRescan,         Min,            0, "1"\r
+HKR, Ndi\Params\MCLeaveRescan,         Max,            0, "3600"\r
 [IpoibService]\r
 DisplayName     = %IpoibServiceDispName%\r
 ServiceType     = 1 ;%SERVICE_KERNEL_DRIVER%\r
@@ -194,4 +201,5 @@ IcsDisk1             = "OpenIB IPoIB Disk #1"
 DIRID_SYSTEM         = 11\r
 DIRID_DRIVERS        = 12\r
 DIRID_SYSTEM_X86     = 16425\r
+REG_DWORD            = 0x00010001\r
 REG_DWORD_NO_CLOBBER = 0x00010003\r
index a2570aeb19c87dee89c9ac65bd57cdb71f193f92..756c10e632da07adead8cf2be6352b7d672ee852 100644 (file)
@@ -6332,7 +6332,7 @@ typedef struct _ib_vl_arb_table_record
 typedef struct _ib_grh\r
 {\r
        ib_net32_t              ver_class_flow;\r
-       ib_net16_t              resv1;\r
+       uint16_t                resv1;\r
        uint8_t                 resv2;\r
        uint8_t                 hop_limit;\r
        ib_gid_t                src_gid;\r
index 464ba955b7a1710bb699619e492427dd30a25e2d..2e600ffc208138f4d77c636f707bc59025525378 100644 (file)
@@ -231,6 +231,7 @@ typedef enum _ib_async_event_t
        IB_AE_PORT_DOWN,\r
        IB_AE_CLIENT_REREGISTER,\r
        IB_AE_SRQ_LIMIT_REACHED,\r
+       IB_AE_SRQ_CATAS_ERROR,\r
        IB_AE_SRQ_QP_LAST_WQE_REACHED,\r
        IB_AE_UNKNOWN           /* ALWAYS LAST ENUM VALUE */\r
 \r
@@ -324,6 +325,9 @@ typedef enum _ib_async_event_t
 *      IB_AE_CLIENT_REREGISTER\r
 *              The SM idicate to client to reregister its SA records.\r
 *\r
+*      IB_AE_SRQ_LIMIT_REACHED\r
+*              Reached SRQ low watermark\r
+*\r
 *      IB_AE_SRQ_CATAS_ERROR\r
 *              An error occurred while processing or accessing the SRQ that prevents\r
 *              dequeuing a WQE from the SRQ and reporting of receive completions.\r
@@ -386,48 +390,12 @@ ib_get_async_event_str(
 */\r
 typedef struct _ib_event_rec\r
 {\r
-       TO_LONG_PTR(void* ,                     context) ;\r
+       TO_LONG_PTR(void*,              context);\r
        ib_async_event_t                type;\r
 \r
        /* HCA vendor specific event information. */\r
        uint64_t                                vendor_specific;\r
-\r
-       /* The following structures are valid only for trap types. */\r
-       union _trap\r
-       {\r
-               struct\r
-               {\r
-                       uint16_t                        lid;\r
-                       ib_net64_t                      port_guid;\r
-                       uint8_t                         port_num;\r
-\r
-                       /*\r
-                        * The following structure is valid only for\r
-                        * P_KEY, Q_KEY, and M_KEY violation traps.\r
-                        */\r
-                       struct\r
-                       {\r
-                               uint8_t                 sl;\r
-                               uint16_t                src_lid;\r
-                               uint16_t                dest_lid;\r
-                               union _key\r
-                               {\r
-                                       uint16_t        pkey;\r
-                                       uint32_t        qkey;\r
-                                       uint64_t        mkey;\r
-                               } key;\r
-                               uint32_t                src_qp;\r
-                               uint32_t                dest_qp;\r
-                               ib_gid_t                src_gid;\r
-                               ib_gid_t                dest_gid;\r
-\r
-                       }       violation;\r
-\r
-               } info;\r
-\r
-               ib_net64_t      sysimg_guid;\r
-\r
-       }       trap;\r
+       uint8_t                                 port_number;\r
 \r
 }      ib_event_rec_t;\r
 /*******/\r
@@ -593,6 +561,8 @@ typedef struct _ib_port_attr
         * timeout = 4.096 microseconds * 2^subnet_timeout\r
         */\r
        uint8_t                                 subnet_timeout;\r
+       uint8_t                                 active_speed;\r
+       uint8_t                                 phys_state;\r
 \r
        ib_port_cap_t                   cap;\r
        uint16_t                                pkey_ctr;\r
@@ -604,8 +574,8 @@ typedef struct _ib_port_attr
         * Pointers at the end of the structure to allow doing a simple\r
         * memory comparison of contents up to the first pointer.\r
         */\r
-       TO_LONG_PTR(ib_gid_t* ,         p_gid_table) ;\r
-       TO_LONG_PTR(ib_net16_t* ,               p_pkey_table) ;\r
+       TO_LONG_PTR(ib_gid_t*,  p_gid_table);\r
+       TO_LONG_PTR(ib_net16_t*,p_pkey_table);\r
 \r
 }      ib_port_attr_t;\r
 /*\r
@@ -698,13 +668,14 @@ typedef struct _ib_ca_attr
        boolean_t                               system_image_guid_support;\r
        boolean_t                               hw_agents;\r
        boolean_t                               ipoib_csum;\r
+       \r
        ib_net64_t                              system_image_guid;\r
 \r
        uint32_t                                num_page_sizes;\r
        uint8_t                                 num_ports;\r
 \r
-       TO_LONG_PTR(uint32_t* ,         p_page_size) ;\r
-       TO_LONG_PTR(ib_port_attr_t* ,   p_port_attr) ;\r
+       TO_LONG_PTR(uint32_t*,  p_page_size);\r
+       TO_LONG_PTR(ib_port_attr_t*, p_port_attr);\r
 \r
 }      ib_ca_attr_t;\r
 /*\r
@@ -721,7 +692,7 @@ typedef struct _ib_ca_attr
 *      revision\r
 *              Revision ID of this adapter\r
 *\r
-*      Fw_ver\r
+*      fw_ver\r
 *              Device Firmware version.\r
 *\r
 *      size\r
@@ -935,7 +906,8 @@ typedef enum _ib_pd_type
 {\r
        IB_PDT_NORMAL,\r
        IB_PDT_ALIAS,\r
-       IB_PDT_SQP\r
+       IB_PDT_SQP,\r
+       IB_PDT_UD\r
 \r
 }      ib_pd_type_t;\r
 /*\r
@@ -948,6 +920,9 @@ typedef enum _ib_pd_type
 *\r
 *      IB_PDT_SQP\r
 *              Protection domain for special queue pair usage.\r
+*\r
+*      IB_PDT_UD\r
+*              Protection domain for UD queue pair usage.\r
 *****/\r
 \r
 \r
@@ -1001,15 +976,16 @@ typedef enum _ib_qp_type
 {\r
        IB_QPT_RELIABLE_CONN    = 0,            /* Matches CM REQ transport type */\r
        IB_QPT_UNRELIABLE_CONN  = 1,            /* Matches CM REQ transport type */\r
-       IB_QPT_UNRELIABLE_DGRM  = 3,            /* Purposefully skip RDD type. */\r
+       IB_QPT_RELIABLE_DGRM    = 2,    /* Matches CM REQ transport type */\r
+       IB_QPT_UNRELIABLE_DGRM,\r
        IB_QPT_QP0,\r
        IB_QPT_QP1,\r
        IB_QPT_RAW_IPV6,\r
        IB_QPT_RAW_ETHER,\r
        IB_QPT_MAD,                                                             /* InfiniBand Access Layer */\r
        IB_QPT_QP0_ALIAS,                                               /* InfiniBand Access Layer */\r
-       IB_QPT_QP1_ALIAS                                                /* InfiniBand Access Layer */\r
-\r
+       IB_QPT_QP1_ALIAS,                                               /* InfiniBand Access Layer */\r
+       IB_QPT_UNKNOWN\r
 }      ib_qp_type_t;\r
 /*\r
 * VALUES\r
@@ -1019,6 +995,9 @@ typedef enum _ib_qp_type
 *      IB_QPT_UNRELIABLE_CONN\r
 *              Unreliable, connected queue pair.\r
 *\r
+*      IB_QPT_RELIABLE_DGRM\r
+*              Reliable, datagram queue pair.\r
+*\r
 *      IB_QPT_UNRELIABLE_DGRM\r
 *              Unreliable, datagram queue pair.\r
 *\r
@@ -1051,6 +1030,34 @@ typedef enum _ib_qp_type
 *****/\r
 \r
 \r
+/****f* IBA Base: Types/ib_get_qp_type_str\r
+* NAME\r
+*      ib_get_qp_type_str\r
+*\r
+* DESCRIPTION\r
+*      Returns a string for the specified QP type\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT const char* AL_API\r
+ib_get_qp_type_str(\r
+       IN                              uint8_t                                         qp_type );\r
+\r
+/*\r
+* PARAMETERS\r
+*      qp_type\r
+*              [in] Encoded QP type as defined in the\r
+QP attribute.\r
+\r
+* RETURN VALUES\r
+*      Pointer to the QP type string.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_qp_type_t\r
+*********/\r
+\r
 /****d* Access Layer/ib_access_t\r
 * NAME\r
 *      ib_access_t\r
@@ -1178,14 +1185,15 @@ typedef struct _ib_qp_create
 {\r
        ib_qp_type_t                    qp_type;\r
 \r
+       uint32_t                                sq_max_inline;\r
        uint32_t                                sq_depth;\r
        uint32_t                                rq_depth;\r
        uint32_t                                sq_sge;\r
        uint32_t                                rq_sge;\r
 \r
-TO_LONG_PTR(   struct _ib_cq* ,                        h_sq_cq) ; \r
-TO_LONG_PTR(   struct _ib_cq* ,                        h_rq_cq) ; \r
-TO_LONG_PTR(   struct _ib_srq* ,                       h_srq) ; \r
+       TO_LONG_PTR(ib_cq_handle_t, h_sq_cq);\r
+       TO_LONG_PTR(ib_cq_handle_t, h_rq_cq);\r
+       TO_LONG_PTR(ib_srq_handle_t, h_srq);\r
 \r
        boolean_t                               sq_signaled;\r
 \r
@@ -1195,6 +1203,10 @@ TO_LONG_PTR(     struct _ib_srq* ,                       h_srq) ;
 *      type\r
 *              Specifies the type of queue pair to create.\r
 *\r
+*      sq_max_inline\r
+*              Maximum payload that can be inlined directly in a WQE, eliminating\r
+*              protection checks and additional DMA operations.\r
+*\r
 *      sq_depth\r
 *              Indicates the requested maximum number of work requests that may be\r
 *              outstanding on the queue pair's send queue.  This value must be less\r
@@ -1257,7 +1269,7 @@ TO_LONG_PTR(      struct _ib_srq* ,                       h_srq) ;
 */\r
 typedef struct _ib_qp_attr\r
 {\r
-TO_LONG_PTR(   struct _ib_pd* ,                        h_pd) ; \r
+       TO_LONG_PTR(ib_pd_handle_t, h_pd);\r
        ib_qp_type_t                    qp_type;\r
        ib_access_t                             access_ctrl;\r
        uint16_t                                pkey_index;\r
@@ -1270,9 +1282,9 @@ TO_LONG_PTR(      struct _ib_pd* ,                        h_pd) ;
        uint8_t                                 init_depth;\r
        uint8_t                                 resp_res;\r
 \r
-TO_LONG_PTR(   struct ib_cq* ,                 h_sq_cq) ; \r
-TO_LONG_PTR(   struct ib_cq* ,                 h_rq_cq) ; \r
-TO_LONG_PTR(   struct ib_srq* ,                        h_srq) ; \r
+       TO_LONG_PTR(ib_cq_handle_t, h_sq_cq);\r
+       TO_LONG_PTR(ib_cq_handle_t, h_rq_cq);\r
+       TO_LONG_PTR(ib_srq_handle_t,h_srq);\r
 \r
        boolean_t                               sq_signaled;\r
 \r
@@ -1359,7 +1371,6 @@ typedef struct _ib_qp_mod
        {\r
                struct _qp_init\r
                {\r
-                       ib_qp_opts_t            opts;\r
                        uint8_t                         primary_port;\r
                        ib_net32_t                      qkey;\r
                        uint16_t                        pkey_index;\r
@@ -1438,16 +1449,46 @@ typedef struct _ib_qp_mod
 */\r
 typedef enum _ib_wr_type_t\r
 {\r
-       WR_SEND = 1,\r
+       WR_SEND,\r
        WR_RDMA_WRITE,\r
        WR_RDMA_READ,\r
        WR_COMPARE_SWAP,\r
-       WR_FETCH_ADD\r
+       WR_FETCH_ADD,\r
+       WR_UNKNOWN\r
 \r
 }      ib_wr_type_t;\r
 /*****/\r
 \r
 \r
+/****f* IBA Base: Types/ib_get_wr_type_str\r
+* NAME\r
+*      ib_get_wr_type_str\r
+*\r
+* DESCRIPTION\r
+*      Returns a string for the specified work request type\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT const char* AL_API\r
+ib_get_wr_type_str(\r
+       IN                              uint8_t                                         wr_type );\r
+\r
+/*\r
+* PARAMETERS\r
+*      wr_type\r
+*              [in] Encoded work request type as defined in the\r
+work request attribute.\r
+\r
+* RETURN VALUES\r
+*      Pointer to the work request type string.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_wr_type_t\r
+*********/\r
+\r
+\r
 /****s* Access Layer/ib_local_ds_t\r
 * NAME\r
 *      ib_local_ds_t\r
@@ -1486,6 +1527,9 @@ typedef uint32_t                                  ib_send_opt_t;
 #define IB_SEND_OPT_SOLICITED          0x00000008\r
 #define IB_SEND_OPT_INLINE                     0x00000010\r
 #define IB_SEND_OPT_LOCAL                      0x00000020\r
+#define IB_SEND_OPT_TX_IP_CSUM         0x00000040\r
+#define IB_SEND_OPT_TX_TCP_UDP_CSUM    0x00000080\r
+\r
 #define IB_SEND_OPT_VEND_MASK          0xFFFF0000\r
 /*\r
 * VALUES\r
@@ -1496,8 +1540,8 @@ typedef uint32_t                                  ib_send_opt_t;
 *              Send immediate data with the given request.\r
 *\r
 *      IB_SEND_OPT_FENCE\r
-*              The operation is fenced.  Complete all pending send operations before\r
-*              processing this request.\r
+*              The operation is fenced.  Complete all pending send operations\r
+*              before processing this request.\r
 *\r
 *      IB_SEND_OPT_SIGNALED\r
 *              If the queue pair is configured for signaled completion, then\r
@@ -1514,11 +1558,13 @@ typedef uint32_t                                        ib_send_opt_t;
 *              vendor specific restrictions on the size of send operation that may\r
 *              be performed as inline.\r
 *\r
+*\r
 *      IB_SEND_OPT_LOCAL\r
 *              Indicates that a sent MAD request should be given to the local VPD for\r
 *              processing.  MADs sent using this option are not placed on the wire.\r
 *              This send option is only valid for MAD send operations.\r
 *\r
+*\r
 *      IB_SEND_OPT_VEND_MASK\r
 *              This mask indicates bits reserved in the send options that may be used\r
 *              by the verbs provider to indicate vendor specific options.  Bits set\r
@@ -1540,57 +1586,67 @@ typedef uint32_t                                        ib_send_opt_t;
 */\r
 typedef struct _ib_send_wr\r
 {\r
-       TO_LONG_PTR(struct _ib_send_wr* ,       p_next) ;\r
        uint64_t                                        wr_id;\r
+       struct _ib_send_wr*                     p_next;\r
+       ib_local_ds_t*                          ds_array;\r
+       uint32_t                                        num_ds;\r
        ib_wr_type_t                            wr_type;\r
        ib_send_opt_t                           send_opt;\r
-       uint32_t                                        num_ds;\r
-       TO_LONG_PTR(ib_local_ds_t* ,            ds_array) ;\r
        ib_net32_t                                      immediate_data;\r
 \r
-       union _send_dgrm\r
+       union\r
        {\r
-               struct _send_ud\r
+               union _send_dgrm\r
                {\r
-                       ib_net32_t              remote_qp;\r
-                       ib_net32_t              remote_qkey;\r
-TO_LONG_PTR(                   struct _ib_av* ,        h_av) ; \r
-                       uint16_t                pkey_index;\r
-                       TO_LONG_PTR(void* ,     rsvd) ;\r
-\r
-               }       ud;\r
-\r
-               struct _send_raw_ether\r
-               {\r
-                       ib_net16_t              dest_lid;\r
-                       uint8_t                 path_bits;\r
-                       uint8_t                 sl;\r
-                       uint8_t                 max_static_rate;\r
-                       ib_net16_t              ether_type;\r
-\r
-               }       raw_ether;\r
+                       struct _send_ud\r
+                       {\r
+                               ib_av_handle_t  h_av; \r
+                               ib_net32_t              remote_qp;\r
+                               ib_net32_t              remote_qkey;\r
+                               void*                   rsvd;\r
+                               uint16_t                pkey_index;\r
 \r
-               struct _send_raw_ipv6\r
-               {\r
-                       ib_net16_t              dest_lid;\r
-                       uint8_t                 path_bits;\r
-                       uint8_t                 sl;\r
-                       uint8_t                 max_static_rate;\r
+                       }       ud;\r
 \r
-               }       raw_ipv6;\r
+                       struct _send_rd\r
+                       {\r
+                               ib_net32_t              remote_qp;\r
+                               ib_net32_t              remote_qkey;\r
+                               ib_net32_t              eecn;\r
 \r
-       }       dgrm;\r
+                       }       rd;\r
 \r
-       struct _send_remote_ops\r
-       {\r
-               uint64_t                        vaddr;\r
-               net32_t                         rkey;\r
+                       struct _send_raw_ether\r
+                       {\r
+                               ib_net16_t              dest_lid;\r
+                               uint8_t                 path_bits;\r
+                               uint8_t                 sl;\r
+                               uint8_t                 max_static_rate;\r
+                               ib_net16_t              ether_type;\r
+       \r
+                       }       raw_ether;\r
 \r
-               ib_net64_t                      atomic1;\r
-               ib_net64_t                      atomic2;\r
+                       struct _send_raw_ipv6\r
+                       {\r
+                               ib_net16_t              dest_lid;\r
+                               uint8_t                 path_bits;\r
+                               uint8_t                 sl;\r
+                               uint8_t                 max_static_rate;\r
+       \r
+                       }       raw_ipv6;\r
+       \r
+               }       dgrm;\r
 \r
-       }       remote_ops;\r
+               struct _send_remote_ops\r
+               {\r
+                       uint64_t                        vaddr;\r
+                       net32_t                         rkey;\r
 \r
+                       ib_net64_t                      atomic1;\r
+                       ib_net64_t                      atomic2;\r
+       \r
+               }       remote_ops;\r
+       };\r
 }      ib_send_wr_t;\r
 /*\r
 * FIELDS\r
@@ -1707,10 +1763,10 @@ TO_LONG_PTR(                    struct _ib_av* ,        h_av) ;
 */\r
 typedef struct _ib_recv_wr\r
 {\r
-       TO_LONG_PTR(struct _ib_recv_wr* ,       p_next) ;\r
+       TO_LONG_PTR(struct _ib_recv_wr*, p_next);\r
        uint64_t                                        wr_id;\r
        uint32_t                                        num_ds;\r
-       TO_LONG_PTR(ib_local_ds_t* ,            ds_array) ;\r
+       TO_LONG_PTR(ib_local_ds_t*,     ds_array);\r
 \r
 }      ib_recv_wr_t;\r
 /*\r
@@ -1751,7 +1807,7 @@ typedef struct _ib_bind_wr
        uint64_t                                wr_id;\r
        ib_send_opt_t                   send_opt;\r
 \r
-TO_LONG_PTR(   struct _ib_mr* ,                        h_mr) ; \r
+       TO_LONG_PTR(ib_mr_handle_t, h_mr);\r
        ib_access_t                             access_ctrl;\r
        net32_t                                 current_rkey;\r
 \r
@@ -1808,6 +1864,9 @@ typedef enum _ib_wc_status_t
        IB_WCS_RNR_RETRY_ERR,\r
        IB_WCS_TIMEOUT_RETRY_ERR,\r
        IB_WCS_REM_INVALID_REQ_ERR,\r
+       IB_WCS_BAD_RESP_ERR,\r
+       IB_WCS_LOCAL_ACCESS_ERR,\r
+       IB_WCS_GENERAL_ERR,\r
        IB_WCS_UNMATCHED_RESPONSE,                      /* InfiniBand Access Layer */\r
        IB_WCS_CANCELED,                                        /* InfiniBand Access Layer */\r
        IB_WCS_REM_ABORT_ERR,\r
@@ -1873,6 +1932,23 @@ typedef enum _ib_wc_status_t
 *                      - There was insufficient buffers to receive a new atomic operation.\r
 *                      - An RDMA request was larger than 2^31 bytes.\r
 *\r
+*      IB_WCS_BAD_RESP_ERR,\r
+*              An unexpected transport layer opcode was returned\r
+*              by the responder.\r
+*\r
+*      IB_WCS_LOCAL_ACCESS_ERR,\r
+*              A protection error occurred on a local data buffer\r
+*              during the processing of a RDMA Write with Immediate Data \r
+*              operation sent from the remote node.\r
+*\r
+*      IB_WCS_REM_ABORT_ERR,\r
+*              The operation was aborted (e.g., For UD QPs associated with an SRQ, \r
+*              the responder aborted the operation).\r
+*\r
+*      IB_WCS_REM_ABORT_ERR,\r
+*              The operation was aborted (e.g., For UD QPs associated with an SRQ, \r
+*              the responder aborted the operation).\r
+*\r
 *      IB_WCS_UNMATCHED_RESPONSE\r
 *              A response MAD was received for which there was no matching send.  The\r
 *              send operation may have been canceled by the user or may have timed\r
@@ -1880,12 +1956,10 @@ typedef enum _ib_wc_status_t
 *\r
 *      IB_WCS_CANCELED\r
 *              The completed work request was canceled by the user.\r
-*\r
-*      IB_WCS_REM_ABORT_ERR,\r
-*              The operation was aborted (e.g., For UD QPs associated with an SRQ, \r
-*              the responder aborted the operation).\r
-*\r
-\r
+ *\r
+ *     IB_WCS_GENERAL_ERR,\r
+ *             Any other error\r
+ *\r
 *****/\r
 \r
 \r
@@ -1929,13 +2003,14 @@ typedef enum _ib_wc_type_t
 {\r
        IB_WC_SEND,\r
        IB_WC_RDMA_WRITE,\r
-       IB_WC_RECV,\r
        IB_WC_RDMA_READ,\r
-       IB_WC_MW_BIND,\r
-       IB_WC_FETCH_ADD,\r
        IB_WC_COMPARE_SWAP,\r
+       IB_WC_FETCH_ADD,\r
+       IB_WC_MW_BIND,\r
+       IB_WC_UNKNOWN1,\r
+       IB_WC_RECV = (1 << 7),\r
        IB_WC_RECV_RDMA_WRITE,\r
-       IB_WC_UNKNOWN\r
+       IB_WC_UNKNOWN2\r
 \r
 }      ib_wc_type_t;\r
 /*****/\r
@@ -2012,18 +2087,18 @@ typedef uint32_t                                        ib_recv_opt_t;
 */\r
 typedef struct _ib_wc\r
 {\r
-       TO_LONG_PTR(struct _ib_wc* ,    p_next) ;\r
+       TO_LONG_PTR(struct _ib_wc*, p_next);\r
        uint64_t                                wr_id;\r
        ib_wc_type_t                    wc_type;\r
 \r
        uint32_t                                length;\r
-       ib_wc_status_t                  status;\r
        struct {\r
                uint8_t                         vendor_specific;\r
                uint8_t                         csum_ok;\r
                uint16_t                        vendor_specific2;\r
                uint32_t                        vendor_specific3;\r
        };\r
+       ib_wc_status_t                  status;\r
 \r
        union _wc_recv\r
        {\r
@@ -2046,6 +2121,16 @@ typedef struct _ib_wc
 \r
                }       ud;\r
 \r
+               struct _wc_rd\r
+               {\r
+                       ib_net32_t      remote_eecn;\r
+                       ib_net32_t      remote_qp;\r
+                       ib_net16_t      remote_lid;\r
+                       uint8_t         remote_sl;\r
+                       uint32_t        free_cnt;\r
+\r
+               }       rd;\r
+\r
                struct _wc_raw_ipv6\r
                {\r
                        ib_net16_t              remote_lid;\r
@@ -2081,6 +2166,7 @@ typedef struct _ib_wc
 *      wc_type\r
 *              Indicates the type of work completion.\r
 *\r
+*\r
 *      length\r
 *              The total length of the data sent or received with the work request.\r
 *\r
@@ -2112,8 +2198,8 @@ typedef struct _ib_wc
 *              Identifies the source queue pair of a received datagram.\r
 *\r
 *      recv.ud.pkey_index\r
-*              The pkey index of the source queue pair. This is valid only for\r
-*              IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types.\r
+*              The pkey index for the source queue pair. This is valid only for\r
+*              GSI type QP's.\r
 *\r
 *      recv.ud.remote_lid\r
 *              The source LID of the received datagram.\r
@@ -2124,6 +2210,23 @@ typedef struct _ib_wc
 *      recv.ud.path_bits\r
 *              path bits...\r
 *\r
+*      recv.rd.remote_eecn\r
+*              The remote end-to-end context number that sent the received message.\r
+*\r
+*      recv.rd.remote_qp\r
+*              Identifies the source queue pair of a received message.\r
+*\r
+*      recv.rd.remote_lid\r
+*              The source LID of the received message.\r
+*\r
+*      recv.rd.remote_sl\r
+*              The service level used by the source of the received message.\r
+*\r
+*      recv.rd.free_cnt\r
+*              The number of available entries in the completion queue.  Reliable\r
+*              datagrams may complete out of order, so this field may be used to\r
+*              determine the number of additional completions that may occur.\r
+*\r
 *      recv.raw_ipv6.remote_lid\r
 *              The source LID of the received message.\r
 *\r
@@ -2169,7 +2272,7 @@ typedef struct _ib_wc
 */\r
 typedef struct _ib_mr_create\r
 {\r
-       TO_LONG_PTR(void* ,                     vaddr) ;\r
+       TO_LONG_PTR(void*,              vaddr);\r
        uint64_t                                length;\r
        ib_access_t                             access_ctrl;\r
 \r
@@ -2189,6 +2292,88 @@ typedef struct _ib_mr_create
 *      ib_access_t\r
 *****/\r
 \r
+#ifdef CL_KERNEL\r
+\r
+/****s* Access Layer/mlnx_fmr_create_t\r
+* NAME\r
+*      mlnx_fmr_create_t\r
+*\r
+* DESCRIPTION\r
+*      Information required to create a Mellanox fast memory region.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _mlnx_fmr_create\r
+{\r
+       int                                     max_pages;\r
+       int                                     max_maps;\r
+       uint8_t                         page_size;\r
+       ib_access_t                     access_ctrl;\r
+\r
+}      mlnx_fmr_create_t;\r
+/*\r
+* FIELDS\r
+*      max_pages\r
+*              max pages in the region.\r
+*\r
+*      max_maps\r
+*              max times, the region can be mapped before remapping.\r
+*\r
+*      page_size\r
+*              log2 of the page size (e.g. 12 for 4KB).\r
+*\r
+*      access_ctrl\r
+*              Access rights of the registered region.\r
+*\r
+* NOTES\r
+*      This is a Mellanox specific extension to verbs.\r
+*\r
+* SEE ALSO\r
+*      ib_access_t\r
+*****/\r
+\r
+\r
+/****s* Access Layer/mlnx_fmr_pool_create_t\r
+* NAME\r
+*      mlnx_fmr_pool_create_t\r
+*\r
+* DESCRIPTION\r
+*      Information required to create a Mellanox fast memory region pool.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _mlnx_fmr_pool_create\r
+{\r
+       int                             max_pages_per_fmr;\r
+       uint8_t                 page_size;      /* really - page_shift, log2 of page_size */\r
+       enum ib_access_flags    access_ctrl;\r
+       int                             pool_size;\r
+       int                             dirty_watermark;\r
+       void                    (*flush_function)(mlnx_fmr_pool_handle_t h_pool, void *arg);\r
+       void                            *flush_arg;\r
+       boolean_t                       cache;\r
+}      mlnx_fmr_pool_create_t;\r
+/*\r
+* FIELDS\r
+*      max_pages\r
+*              max pages in the region.\r
+*\r
+*      max_maps\r
+*              max times, the region can be mapped before remapping.\r
+*\r
+*      page_size\r
+*              log2 of the page size (e.g. 12 for 4KB).\r
+*\r
+*      access_ctrl\r
+*              Access rights of the registered region.\r
+*\r
+* NOTES\r
+*      This is a Mellanox specific extension to verbs.\r
+*\r
+* SEE ALSO\r
+*      ib_access_t\r
+*****/\r
+#endif\r
 \r
 /****s* Access Layer/ib_phys_range_t\r
 * NAME\r
@@ -2234,7 +2419,7 @@ typedef struct _ib_phys_create
 {\r
        uint64_t                                        length;\r
        uint32_t                                        num_ranges;\r
-       TO_LONG_PTR(ib_phys_range_t* ,  range_array) ;\r
+       ib_phys_range_t*                        range_array;\r
        uint32_t                                        buf_offset;\r
        uint32_t                                        hca_page_size;\r
        ib_access_t                                     access_ctrl;\r
@@ -2278,7 +2463,7 @@ typedef struct _ib_phys_create
 */\r
 typedef struct _ib_mr_attr\r
 {\r
-TO_LONG_PTR(   struct ib_pd* ,                 h_pd) ; \r
+       TO_LONG_PTR(ib_pd_handle_t, h_pd);\r
        uint64_t                                local_lb;\r
        uint64_t                                local_ub;\r
        uint64_t                                remote_lb;\r
@@ -2364,6 +2549,8 @@ typedef uint32_t                                                  ib_ca_mod_t;
 #define IB_CA_MOD_SHUTDOWN_PORT                                0x00100000\r
 #define IB_CA_MOD_INIT_TYPE_VALUE                      0x00200000\r
 #define IB_CA_MOD_SYSTEM_IMAGE_GUID                    0x00400000\r
+#define IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED       0x00800000\r
+#define IB_CA_MOD_RESERVED_MASK                                0xFF000000\r
 /*\r
 * VALUES\r
 *      IB_CA_MOD_IS_CM_SUPPORTED\r
@@ -2374,26 +2561,28 @@ typedef uint32_t                                                        ib_ca_mod_t;
 *              Indicates if there is an SNMP agent accessible through the port.\r
 *\r
 *      IB_CA_MOD_IS_DEV_MGMT_SUPPORTED\r
-*              Indicates if there is a device management agent accessible through\r
-*              the port.\r
+*              Indicates if there is a device management agent accessible\r
+*              through the port.\r
 *\r
 *      IB_CA_MOD_IS_VEND_SUPPORTED\r
-*              Indicates if there is a vendor supported agent accessible through\r
-*              the port.\r
+*              Indicates if there is a vendor supported agent accessible\r
+*              through the port.\r
 *\r
 *      IB_CA_MOD_IS_SM\r
 *              Indicates if there is a subnet manager accessible through\r
 *              the port.\r
 *\r
 *      IB_CA_MOD_IS_SM_DISABLED\r
-*              Indicates if the port has been disabled for configuration by the subnet\r
-*              manager.\r
+*              Indicates if the port has been disabled for configuration by the\r
+*              subnet manager.\r
 *\r
 *      IB_CA_MOD_QKEY_CTR\r
-*              Used to reset the qkey violation counter associated with the port.\r
+*              Used to reset the qkey violation counter associated with the\r
+*              port.\r
 *\r
 *      IB_CA_MOD_PKEY_CTR\r
-*              Used to reset the pkey violation counter associated with the port.\r
+*              Used to reset the pkey violation counter associated with the\r
+*              port.\r
 *\r
 *      IB_CA_MOD_IS_NOTICE_SUPPORTED\r
 *              Indicates that this CA supports ability to generate Notices for\r
@@ -2404,7 +2593,8 @@ typedef uint32_t                                                  ib_ca_mod_t;
 *              trap messages. (only applicable to switches)\r
 *\r
 *      IB_CA_MOD_IS_APM_SUPPORTED\r
-*              Indicates that this port is capable of performing Automatic Migration.\r
+*              Indicates that this port is capable of performing Automatic\r
+*              Path Migration.\r
 *\r
 *      IB_CA_MOD_IS_SLMAP_SUPPORTED\r
 *              Indicates this port supports SLMAP capability.\r
@@ -2442,6 +2632,13 @@ typedef uint32_t                                                 ib_ca_mod_t;
 *\r
 *      IB_CA_MOD_SYSTEM_IMAGE_GUID\r
 *              Used to modify the system image GUID for the port.\r
+*\r
+*      IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED\r
+*              Used to modify the system image GUID for the port.\r
+*\r
+*      IB_CA_MOD_RESERVED_MASK\r
+*              Mask of all the reserved bits.  If any of these bits are set\r
+*              ib_modify_ca will return IB_INVALID_PARAMETER.\r
 *****/\r
 \r
 \r
@@ -2559,7 +2756,7 @@ typedef struct _ib_ci_op
        IN                              uint32_t                                        buf_size;\r
        IN                              uint32_t                                        buf_info;\r
        IN      OUT                     int32_t                                         status;\r
-    IN OUT                     TO_LONG_PTR(void*,p_buf) OPTIONAL; // Do not put it last in the structure, because of memory alignment\r
+       IN      OUT                     TO_LONG_PTR(void*,                      p_buf) OPTIONAL; // Do not put it last in the structure, because of memory alignment\r
                OUT                     uint32_t                                        num_bytes_ret;\r
        \r
 \r