]> git.openfabrics.org - ~emulex/for-vlad/old/compat-rdma.git/commitdiff
Add SRP backport and refresh all patches
authorVladimir Sokolovsky <vlad@mellanox.com>
Tue, 2 Oct 2012 14:05:14 +0000 (16:05 +0200)
committerVladimir Sokolovsky <vlad@mellanox.com>
Tue, 2 Oct 2012 14:05:14 +0000 (16:05 +0200)
This patch has been tested on RHEL 6.0, RHEL 6.1, RHEL 6.2, RHEL 6.3
and Ubuntu 10.04.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
38 files changed:
linux-next-cherry-picks/0001-RDMA-ucma.c-Different-fix-for-ucma-context-uid-0-cau.patch
linux-next-pending/0001-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch [new file with mode: 0644]
linux-next-pending/0001-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch [deleted file]
linux-next-pending/0002-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch [deleted file]
linux-next-pending/0002-RDMA-nes-Loopback-clean-up-linux-next-pending.patch [new file with mode: 0644]
linux-next-pending/0003-Commit-3236b2d4-IB-qib-MADs-with-misset-M_Keys-shoul.patch [new file with mode: 0644]
linux-next-pending/0003-RDMA-nes-Loopback-clean-up-linux-next-pending.patch [deleted file]
linux-next-pending/0004-An-MR-reference-leak-exists-when-handling-UC-RDMA-wr.patch [new file with mode: 0644]
linux-next-pending/0004-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch [deleted file]
linux-next-pending/0005-A-timing-issue-can-occur-where-qib_mr_dereg-can-retu.patch [new file with mode: 0644]
linux-next-pending/0005-IB-qib-portinfo-compliance.patch [deleted file]
linux-next-pending/0006-IB-qib-mr-ebusy.patch [deleted file]
linux-next-pending/0007-IB-qib-uc-refcount-leak.patch [deleted file]
patches/0001-ib_core-backport-dst_fetch_ha.patch
patches/0002-ib-core-Backport-flowi4-and-flowi6.patch
patches/0003-BACKPORT-core-netlink-for-kernels-3.4.patch
patches/0004-ib-core-Backport-pinned_vm-for-kernels-3.2.patch
patches/0005-ib-core-Backport-CLASS_ATTR-for-kernels-2.6.34.patch
patches/0006-Backport-mlx4_ib.patch
patches/0007-BACKPORT-ucma-Revert-sysctl-registrations.patch
patches/0008-RDMA-nes-Backports-for-RHEL-6.2-and-6.3.patch
patches/0009-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch
patches/0010-IB-qib-backport-qib_fs.c-before-2.6.35.patch
patches/0011-cxgb3-Backports-for-RHEL6.2-RHEL6.3-and-SLES11-SP2.patch
patches/0012-IB-qib-backport-3.2-for-pinned_vm-field.patch
patches/0013-cxgb4-Backports-for-RHEL6.2-RHEL-6.3-and-SLES11-SP2.patch
patches/0014-IB-ipath-backport-qib_fs.c-before-2.6.35.patch
patches/0015-IB-ipath-backport-3.2-for-pinned_vm-field.patch
patches/0016-iw_cxgb3-Backports-for-RHEL6.2-RHEL-6.3-and-SLES11-S.patch
patches/0017-iw_cxgb4-Backports-for-RHEL6.2-RHEL6.3-and-SLES11-SP.patch
patches/0018-IPoIB-Backports-for-RHEL6.2-RHEL6.3-and-SLES11-SP2.patch
patches/0019-mlx4_en-Backports-for-RHEL6.2-RHEL6.3-and-SLES11-SP2.patch
patches/0020-NFSRDMA-RHEL6.3-and-SLES11-SP2-backport.patch
patches/0021-RDMA-nes-Updated-backports.patch
patches/0022-iw_cxgb4-Fix-bug-2369-in-OFED-bugzilla.patch
patches/0023-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch [new file with mode: 0644]
patches/0024-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch [new file with mode: 0644]
patches/0025-ib_srp-Backport-to-older-kernels.patch [new file with mode: 0644]

index 965b48ce7ef1fdff6870158d75732b0d86b26b90..afd793aeef851deadfc2bdad19969f3d2a373d27 100644 (file)
@@ -1,7 +1,8 @@
-From 63e0d2fdb051ee6beb0990cb4722c9cf6816d80b Mon Sep 17 00:00:00 2001
+From 35d307dfdb251ebfd997fe8d9cdd0a4c64b90595 Mon Sep 17 00:00:00 2001
 From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
 Date: Tue, 7 Aug 2012 17:46:42 -0500
-Subject: [PATCH] RDMA/ucma.c: Different fix for ucma context uid=0, causing iWarp RDMA applications to fail in connection establishment
+Subject: [PATCH] RDMA/ucma.c: Different fix for ucma context uid=0, causing
+ iWarp RDMA applications to fail in connection establishment
 
 Fix for ucma context uid=0, causing iWarp RDMA applications to fail in connection establishment.
 
@@ -12,7 +13,7 @@ Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
 Signed-off-by: Sean Hefty <Sean.Hefty@intel.com>
 ---
  drivers/infiniband/core/ucma.c |    2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
+ 1 file changed, 1 insertion(+), 1 deletion(-)
 
 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
 index 5861cdb..28bf514 100644
@@ -35,5 +36,5 @@ index 5861cdb..28bf514 100644
                if (!ctx->backlog) {
                        ret = -ENOMEM;
 -- 
-1.7.0.4
+1.7.9.5
 
diff --git a/linux-next-pending/0001-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch b/linux-next-pending/0001-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch
new file mode 100644 (file)
index 0000000..81a08c2
--- /dev/null
@@ -0,0 +1,73 @@
+From 669b99baeb7c8d2a887241aa761fe5ee1d144f4f Mon Sep 17 00:00:00 2001
+From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+Date: Fri, 7 Sep 2012 19:37:55 -0500
+Subject: [PATCH 1/5] RDMA/nes: Fix for TSO low nic throughput -
+ linux-next-pending
+
+Fix for TSO low nic throughput with linux-3.5
+skb_is_gso() is changed to bool and returns 1 instead of MSS.
+The gso_size from skb_shared_info is now used to pass MSS to hardware.
+
+(the patch is linux-next-pending)
+
+Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+---
+ drivers/infiniband/hw/nes/nes_nic.c |   28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 7674358..4f73965 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -388,18 +388,16 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               tcph = tcp_hdr(skb);
+-              if (1) {
+-                      if (skb_is_gso(skb)) {
+-                              /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
+-                                              netdev->name, skb_is_gso(skb)); */
+-                              wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
+-                                              NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+-                              set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+-                                              ((u32)tcph->doff) |
+-                                              (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
+-                      } else {
+-                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
+-                      }
++              if (skb_is_gso(skb)) {
++                      nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
++                                      netdev->name, skb_shinfo(skb)->gso_size);
++                      wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
++                                      NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
++                      set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
++                                      ((u32)tcph->doff) |
++                                      (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
++              } else {
++                      wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
+               }
+       } else {        /* CHECKSUM_HW */
+               wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
+@@ -600,7 +598,7 @@ tso_sq_no_longer_full:
+                                                       " (%u frags), tso_size=%u\n",
+                                                       netdev->name,
+                                                       skb->len, skb_headlen(skb),
+-                                                      skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
++                                                      skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->gso_size);
+                               }
+                               memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
+                                               skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
+@@ -652,8 +650,8 @@ tso_sq_no_longer_full:
+                               } else {
+                                       nesnic->tx_skb[nesnic->sq_head] = NULL;
+                               }
+-                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+-                              if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
++                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
++                              if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) {
+                                       wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
+                               } else {
+                                       iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
+-- 
+1.7.9.5
+
diff --git a/linux-next-pending/0001-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch b/linux-next-pending/0001-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch
deleted file mode 100644 (file)
index 22ae56a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-From 536c7cc5997776dc9e65d9ab8869d535505506a2 Mon Sep 17 00:00:00 2001
-From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
-Date: Fri, 7 Sep 2012 19:41:29 -0500
-Subject: [PATCH 1/4] RDMA/nes: Fix for loopback MAC address - Backport change
-
-RDMA/nes: Fix for resolving correctly the MAC address for loopback connection
-(the patch should be applied on top of previous backports)
-
-Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
----
- drivers/infiniband/hw/nes/nes_cm.c |    5 +----
- 1 file changed, 1 insertion(+), 4 deletions(-)
-
-diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index 0b5b1a9..d9fa245 100644
---- a/drivers/infiniband/hw/nes/nes_cm.c
-+++ b/drivers/infiniband/hw/nes/nes_cm.c
-@@ -1363,11 +1363,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
-       else
-               netdev = nesvnic->netdev;
--#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
--#else
--      neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
--#endif
-+
-       rcu_read_lock();
-       if (neigh) {
-               if (neigh->nud_state & NUD_VALID) {
--- 
-1.7.9.5
-
diff --git a/linux-next-pending/0002-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch b/linux-next-pending/0002-RDMA-nes-Fix-for-TSO-low-nic-throughput-linux-next-p.patch
deleted file mode 100644 (file)
index 781c750..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-From a2eb1d38ce10c7ce8070194b21fabe36fdd40ffd Mon Sep 17 00:00:00 2001
-From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
-Date: Fri, 7 Sep 2012 19:37:55 -0500
-Subject: [PATCH 2/4] RDMA/nes: Fix for TSO low nic throughput -
- linux-next-pending
-
-Fix for TSO low nic throughput with linux-3.5
-skb_is_gso() is changed to bool and returns 1 instead of MSS.
-The gso_size from skb_shared_info is now used to pass MSS to hardware.
-
-(the patch is linux-next-pending)
-
-Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
----
- drivers/infiniband/hw/nes/nes_nic.c |   28 +++++++++++++---------------
- 1 file changed, 13 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
-index 696b80e..5242294 100644
---- a/drivers/infiniband/hw/nes/nes_nic.c
-+++ b/drivers/infiniband/hw/nes/nes_nic.c
-@@ -388,18 +388,16 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               tcph = tcp_hdr(skb);
--              if (1) {
--                      if (skb_is_gso(skb)) {
--                              /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
--                                              netdev->name, skb_is_gso(skb)); */
--                              wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
--                                              NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
--                              set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
--                                              ((u32)tcph->doff) |
--                                              (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
--                      } else {
--                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
--                      }
-+              if (skb_is_gso(skb)) {
-+                      nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
-+                                      netdev->name, skb_shinfo(skb)->gso_size);
-+                      wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
-+                                      NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
-+                      set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
-+                                      ((u32)tcph->doff) |
-+                                      (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
-+              } else {
-+                      wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
-               }
-       } else {        /* CHECKSUM_HW */
-               wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
-@@ -600,7 +598,7 @@ tso_sq_no_longer_full:
-                                                       " (%u frags), tso_size=%u\n",
-                                                       netdev->name,
-                                                       skb->len, skb_headlen(skb),
--                                                      skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
-+                                                      skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->gso_size);
-                               }
-                               memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
-                                               skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
-@@ -652,8 +650,8 @@ tso_sq_no_longer_full:
-                               } else {
-                                       nesnic->tx_skb[nesnic->sq_head] = NULL;
-                               }
--                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
--                              if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
-+                              wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
-+                              if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) {
-                                       wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
-                               } else {
-                                       iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
--- 
-1.7.9.5
-
diff --git a/linux-next-pending/0002-RDMA-nes-Loopback-clean-up-linux-next-pending.patch b/linux-next-pending/0002-RDMA-nes-Loopback-clean-up-linux-next-pending.patch
new file mode 100644 (file)
index 0000000..70c0960
--- /dev/null
@@ -0,0 +1,86 @@
+From b52ebe158b3bb1b21b488f18683a17259d4b5b74 Mon Sep 17 00:00:00 2001
+From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+Date: Fri, 7 Sep 2012 19:34:28 -0500
+Subject: [PATCH 2/5] RDMA/nes: Loopback clean up - linux-next-pending
+
+Necessary clean up for the loopback code
+
+(the patch is linux-next-pending)
+
+Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+---
+ drivers/infiniband/hw/nes/nes_cm.c |   30 ++++++------------------------
+ 1 file changed, 6 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index d9fa245..8a2c301 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -1472,12 +1472,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+       cm_node->loopbackpartner = NULL;
+       /* get the mac addr for the remote node */
+-      if (ipv4_is_loopback(htonl(cm_node->rem_addr))) {
+-              arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
+-      } else {
+-              oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+-              arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
+-      }
++      oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
++      arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
+       if (arpindex < 0) {
+               kfree(cm_node);
+               return NULL;
+@@ -3160,11 +3156,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       nesqp->nesqp_context->tcpPorts[1] =
+               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+-      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+-              nesqp->nesqp_context->ip0 =
+-                      cpu_to_le32(ntohl(nesvnic->local_ipaddr));
+-      else
+-              nesqp->nesqp_context->ip0 =
++      nesqp->nesqp_context->ip0 =
+                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+       nesqp->nesqp_context->misc2 |= cpu_to_le32(
+@@ -3189,10 +3181,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       memset(&nes_quad, 0, sizeof(nes_quad));
+       nes_quad.DstIpAdrIndex =
+               cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+-      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+-              nes_quad.SrcIpadr = nesvnic->local_ipaddr;
+-      else
+-              nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
++      nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+@@ -3545,11 +3534,7 @@ static void cm_event_connected(struct nes_cm_event *event)
+               cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+       nesqp->nesqp_context->tcpPorts[1] =
+               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+-      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+-              nesqp->nesqp_context->ip0 =
+-                      cpu_to_le32(ntohl(nesvnic->local_ipaddr));
+-      else
+-              nesqp->nesqp_context->ip0 =
++      nesqp->nesqp_context->ip0 =
+                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+       nesqp->nesqp_context->misc2 |= cpu_to_le32(
+@@ -3578,10 +3563,7 @@ static void cm_event_connected(struct nes_cm_event *event)
+       nes_quad.DstIpAdrIndex =
+               cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+-      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+-              nes_quad.SrcIpadr = nesvnic->local_ipaddr;
+-      else
+-              nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
++      nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+-- 
+1.7.9.5
+
diff --git a/linux-next-pending/0003-Commit-3236b2d4-IB-qib-MADs-with-misset-M_Keys-shoul.patch b/linux-next-pending/0003-Commit-3236b2d4-IB-qib-MADs-with-misset-M_Keys-shoul.patch
new file mode 100644 (file)
index 0000000..ff31e56
--- /dev/null
@@ -0,0 +1,35 @@
+From f04e4bef8305cb265398672a574e56276a85b28f Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 2 Oct 2012 15:53:02 +0200
+Subject: [PATCH 3/5] Commit 3236b2d4 ("IB/qib: MADs with misset M_Keys should
+ return failure") introduced a return code assignment
+ that unfortunately introduced an unconditional exit for
+ the routine due to the lack of braces.
+
+This patch adds the braces to correct the original patch.
+
+Reviewed-by: Dean Luick <dean.luick@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+---
+ drivers/infiniband/hw/qib/qib_mad.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
+index 4339021..3e55415 100644
+--- a/drivers/infiniband/hw/qib/qib_mad.c
++++ b/drivers/infiniband/hw/qib/qib_mad.c
+@@ -463,9 +463,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+               if (port_num != port) {
+                       ibp = to_iport(ibdev, port_num);
+                       ret = check_mkey(ibp, smp, 0);
+-                      if (ret)
++                      if (ret) {
+                               ret = IB_MAD_RESULT_FAILURE;
+                               goto bail;
++                      }
+               }
+       }
+-- 
+1.7.9.5
+
diff --git a/linux-next-pending/0003-RDMA-nes-Loopback-clean-up-linux-next-pending.patch b/linux-next-pending/0003-RDMA-nes-Loopback-clean-up-linux-next-pending.patch
deleted file mode 100644 (file)
index a76a1b0..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-From 5061a057e8d166edd67d28d9531c0b1b4b8a8724 Mon Sep 17 00:00:00 2001
-From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
-Date: Fri, 7 Sep 2012 19:34:28 -0500
-Subject: [PATCH 3/4] RDMA/nes: Loopback clean up - linux-next-pending
-
-Necessary clean up for the loopback code
-
-(the patch is linux-next-pending)
-
-Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
----
- drivers/infiniband/hw/nes/nes_cm.c |   30 ++++++------------------------
- 1 file changed, 6 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index d9fa245..8a2c301 100644
---- a/drivers/infiniband/hw/nes/nes_cm.c
-+++ b/drivers/infiniband/hw/nes/nes_cm.c
-@@ -1472,12 +1472,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
-       cm_node->loopbackpartner = NULL;
-       /* get the mac addr for the remote node */
--      if (ipv4_is_loopback(htonl(cm_node->rem_addr))) {
--              arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
--      } else {
--              oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
--              arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
--      }
-+      oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
-+      arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
-       if (arpindex < 0) {
-               kfree(cm_node);
-               return NULL;
-@@ -3160,11 +3156,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-       nesqp->nesqp_context->tcpPorts[1] =
-               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
--      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
--              nesqp->nesqp_context->ip0 =
--                      cpu_to_le32(ntohl(nesvnic->local_ipaddr));
--      else
--              nesqp->nesqp_context->ip0 =
-+      nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
-       nesqp->nesqp_context->misc2 |= cpu_to_le32(
-@@ -3189,10 +3181,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-       memset(&nes_quad, 0, sizeof(nes_quad));
-       nes_quad.DstIpAdrIndex =
-               cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
--      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
--              nes_quad.SrcIpadr = nesvnic->local_ipaddr;
--      else
--              nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-+      nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
-       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
-@@ -3545,11 +3534,7 @@ static void cm_event_connected(struct nes_cm_event *event)
-               cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
-       nesqp->nesqp_context->tcpPorts[1] =
-               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
--      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
--              nesqp->nesqp_context->ip0 =
--                      cpu_to_le32(ntohl(nesvnic->local_ipaddr));
--      else
--              nesqp->nesqp_context->ip0 =
-+      nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
-       nesqp->nesqp_context->misc2 |= cpu_to_le32(
-@@ -3578,10 +3563,7 @@ static void cm_event_connected(struct nes_cm_event *event)
-       nes_quad.DstIpAdrIndex =
-               cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
--      if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
--              nes_quad.SrcIpadr = nesvnic->local_ipaddr;
--      else
--              nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-+      nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
-       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
--- 
-1.7.9.5
-
diff --git a/linux-next-pending/0004-An-MR-reference-leak-exists-when-handling-UC-RDMA-wr.patch b/linux-next-pending/0004-An-MR-reference-leak-exists-when-handling-UC-RDMA-wr.patch
new file mode 100644 (file)
index 0000000..913b339
--- /dev/null
@@ -0,0 +1,54 @@
+From 984a66a622f5a0c4aaceaa48ddb265de5a454392 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 2 Oct 2012 15:53:02 +0200
+Subject: [PATCH 4/5] An MR reference leak exists when handling UC RDMA writes
+ with immediate data because we manipulate the reference
+ counts as if the operation had been a send.
+
+This patch moves the last_imm label so that the RDMA write operations
+with immediate data converge at the cq building code.  The copy/mr
+deref code is now done correctly prior to the branch to last_imm.
+
+Reviewed-by: Edward Mascarenhas <edward.mascarenhas@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+---
+ drivers/infiniband/hw/qib/qib_uc.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
+index ce7387f..70b4cb7 100644
+--- a/drivers/infiniband/hw/qib/qib_uc.c
++++ b/drivers/infiniband/hw/qib/qib_uc.c
+@@ -403,7 +403,6 @@ send_last:
+               if (unlikely(wc.byte_len > qp->r_len))
+                       goto rewind;
+               wc.opcode = IB_WC_RECV;
+-last_imm:
+               qib_copy_sge(&qp->r_sge, data, tlen, 0);
+               while (qp->s_rdma_read_sge.num_sge) {
+                       atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+@@ -411,6 +410,7 @@ last_imm:
+                               qp->s_rdma_read_sge.sge =
+                                       *qp->s_rdma_read_sge.sg_list++;
+               }
++last_imm:
+               wc.wr_id = qp->r_wr_id;
+               wc.status = IB_WC_SUCCESS;
+               wc.qp = &qp->ibqp;
+@@ -509,6 +509,12 @@ rdma_last_imm:
+               }
+               wc.byte_len = qp->r_len;
+               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
++              qib_copy_sge(&qp->r_sge, data, tlen, 1);
++              while (qp->r_sge.num_sge) {
++                      atomic_dec(&qp->r_sge.sge.mr->refcount);
++                      if (--qp->r_sge.num_sge)
++                              qp->r_sge.sge = *qp->r_sge.sg_list++;
++              }
+               goto last_imm;
+       case OP(RDMA_WRITE_LAST):
+-- 
+1.7.9.5
+
diff --git a/linux-next-pending/0004-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch b/linux-next-pending/0004-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch
deleted file mode 100644 (file)
index b2cbbef..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-From 927a677bdc854f1a8cbd766560486c5ffdea0ab7 Mon Sep 17 00:00:00 2001
-From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
-Date: Fri, 7 Sep 2012 19:45:21 -0500
-Subject: [PATCH 4/4] RDMA/nes: TSO is enabled again for linux-3.5 - Backport
- change
-
-RDMA/nes: TSO is enabled again for linux-3.5
-(the patch should be applied on top of previous backports)
-
-Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
----
- drivers/infiniband/hw/nes/nes_nic.c |    2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
-index 5242294..4f73965 100644
---- a/drivers/infiniband/hw/nes/nes_nic.c
-+++ b/drivers/infiniband/hw/nes/nes_nic.c
-@@ -1758,12 +1758,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
-       netdev->features |= NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX;
-       if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
--#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,3,0))
-               netdev->features |= NETIF_F_TSO;
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
-               netdev->hw_features |= NETIF_F_TSO;
- #endif
--#endif
-       }
-       nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
--- 
-1.7.9.5
-
diff --git a/linux-next-pending/0005-A-timing-issue-can-occur-where-qib_mr_dereg-can-retu.patch b/linux-next-pending/0005-A-timing-issue-can-occur-where-qib_mr_dereg-can-retu.patch
new file mode 100644 (file)
index 0000000..3576c79
--- /dev/null
@@ -0,0 +1,1037 @@
+From f96007a6bc75a3644489b159050fce06d1f19655 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 2 Oct 2012 15:53:02 +0200
+Subject: [PATCH 5/5] A timing issue can occur where qib_mr_dereg can return
+ -EBUSY if the MR use count is not zero.
+
+This can occur if the MR is de-registered while RDMA read response
+packets are being progressed from the SDMA ring.  The suspicion is
+that the peer sent an RDMA read request, which has already been copied
+across to the peer.  The peer sees the completion of his request and
+then communicates to the responder that the MR is not needed any
+longer.  The responder tries to de-register the MR, catching some
+responses remaining in the SDMA ring holding the MR use count.
+
+The code now uses a get/put paradigm to track MR use counts and
+coordinates with the MR de-registration process using a completion
+when the count has reached zero.  A timeout on the delay is in place
+to catch other EBUSY issues.
+
+The reference count protocol is as follows:
+- The return to the user counts as 1
+- A reference from the lk_table or the qib_ibdev counts as 1.
+- Transient I/O operations increase/decrease as necessary
+
+A lot of code duplication has been folded into the new routines
+init_qib_mregion() and deinit_qib_mregion().  Additionally, explicit
+initialization of fields to zero is now handled by kzalloc().
+
+Also, duplicated code 'while.*num_sge' that decrements reference
+counts have been consolidated in qib_put_ss().
+
+Reviewed-by: Ramkrishna Vepa <ramkrishna.vepa@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+---
+ drivers/infiniband/hw/qib/qib_keys.c  |   84 +++++++-----
+ drivers/infiniband/hw/qib/qib_mr.c    |  242 ++++++++++++++++++---------------
+ drivers/infiniband/hw/qib/qib_qp.c    |   21 +--
+ drivers/infiniband/hw/qib/qib_rc.c    |   24 ++--
+ drivers/infiniband/hw/qib/qib_ruc.c   |   14 +-
+ drivers/infiniband/hw/qib/qib_uc.c    |   33 +----
+ drivers/infiniband/hw/qib/qib_ud.c    |   12 +-
+ drivers/infiniband/hw/qib/qib_verbs.c |   10 +-
+ drivers/infiniband/hw/qib/qib_verbs.h |   28 +++-
+ 9 files changed, 244 insertions(+), 224 deletions(-)
+
+diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
+index 8fd19a4..8b5ee3a 100644
+--- a/drivers/infiniband/hw/qib/qib_keys.c
++++ b/drivers/infiniband/hw/qib/qib_keys.c
+@@ -35,21 +35,40 @@
+ /**
+  * qib_alloc_lkey - allocate an lkey
+- * @rkt: lkey table in which to allocate the lkey
+  * @mr: memory region that this lkey protects
++ * @dma_region: 0->normal key, 1->restricted DMA key
++ *
++ * Returns 0 if successful, otherwise returns -errno.
++ *
++ * Increments mr reference count and sets published
++ * as required.
++ *
++ * Sets the lkey field mr for non-dma regions.
+  *
+- * Returns 1 if successful, otherwise returns 0.
+  */
+-int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
++int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+ {
+       unsigned long flags;
+       u32 r;
+       u32 n;
+-      int ret;
++      int ret = 0;
++      struct qib_ibdev *dev = to_idev(mr->pd->device);
++      struct qib_lkey_table *rkt = &dev->lk_table;
+       spin_lock_irqsave(&rkt->lock, flags);
++      /* special case for dma_mr lkey == 0 */
++      if (dma_region) {
++              /* should the dma_mr be relative to the pd? */
++              if (!dev->dma_mr) {
++                      qib_get_mr(mr);
++                      dev->dma_mr = mr;
++                      mr->lkey_published = 1;
++              }
++              goto success;
++      }
++
+       /* Find the next available LKEY */
+       r = rkt->next;
+       n = r;
+@@ -57,11 +76,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+               if (rkt->table[r] == NULL)
+                       break;
+               r = (r + 1) & (rkt->max - 1);
+-              if (r == n) {
+-                      spin_unlock_irqrestore(&rkt->lock, flags);
+-                      ret = 0;
++              if (r == n)
+                       goto bail;
+-              }
+       }
+       rkt->next = (r + 1) & (rkt->max - 1);
+       /*
+@@ -76,46 +92,50 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+               mr->lkey |= 1 << 8;
+               rkt->gen++;
+       }
++      qib_get_mr(mr);
+       rkt->table[r] = mr;
++      mr->lkey_published = 1;
++success:
+       spin_unlock_irqrestore(&rkt->lock, flags);
+-
+-      ret = 1;
+-
+-bail:
++out:
+       return ret;
++bail:
++      spin_unlock_irqrestore(&rkt->lock, flags);
++      ret = -ENOMEM;
++      goto out;
+ }
+ /**
+  * qib_free_lkey - free an lkey
+- * @rkt: table from which to free the lkey
+- * @lkey: lkey id to free
++ * @mr: mr to free from tables
+  */
+-int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
++void qib_free_lkey(struct qib_mregion *mr)
+ {
+       unsigned long flags;
+       u32 lkey = mr->lkey;
+       u32 r;
+-      int ret;
++      struct qib_ibdev *dev = to_idev(mr->pd->device);
++      struct qib_lkey_table *rkt = &dev->lk_table;
++
++      spin_lock_irqsave(&rkt->lock, flags);
++      if (!mr->lkey_published)
++              goto out;
++      mr->lkey_published = 0;
++
+       spin_lock_irqsave(&dev->lk_table.lock, flags);
+       if (lkey == 0) {
+               if (dev->dma_mr && dev->dma_mr == mr) {
+-                      ret = atomic_read(&dev->dma_mr->refcount);
+-                      if (!ret)
+-                              dev->dma_mr = NULL;
+-              } else
+-                      ret = 0;
++                      qib_put_mr(dev->dma_mr);
++                      dev->dma_mr = NULL;
++              }
+       } else {
+               r = lkey >> (32 - ib_qib_lkey_table_size);
+-              ret = atomic_read(&dev->lk_table.table[r]->refcount);
+-              if (!ret)
+-                      dev->lk_table.table[r] = NULL;
++              qib_put_mr(dev->dma_mr);
++              rkt->table[r] = NULL;
+       }
++out:
+       spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+-
+-      if (ret)
+-              ret = -EBUSY;
+-      return ret;
+ }
+ /**
+@@ -150,7 +170,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+                       goto bail;
+               if (!dev->dma_mr)
+                       goto bail;
+-              atomic_inc(&dev->dma_mr->refcount);
++              qib_get_mr(dev->dma_mr);
+               spin_unlock_irqrestore(&rkt->lock, flags);
+               isge->mr = dev->dma_mr;
+@@ -171,7 +191,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+                    off + sge->length > mr->length ||
+                    (mr->access_flags & acc) != acc))
+               goto bail;
+-      atomic_inc(&mr->refcount);
++      qib_get_mr(mr);
+       spin_unlock_irqrestore(&rkt->lock, flags);
+       off += mr->offset;
+@@ -245,7 +265,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+                       goto bail;
+               if (!dev->dma_mr)
+                       goto bail;
+-              atomic_inc(&dev->dma_mr->refcount);
++              qib_get_mr(dev->dma_mr);
+               spin_unlock_irqrestore(&rkt->lock, flags);
+               sge->mr = dev->dma_mr;
+@@ -265,7 +285,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+       if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+                    (mr->access_flags & acc) == 0))
+               goto bail;
+-      atomic_inc(&mr->refcount);
++      qib_get_mr(mr);
+       spin_unlock_irqrestore(&rkt->lock, flags);
+       off += mr->offset;
+diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
+index 08944e2..6a2028a 100644
+--- a/drivers/infiniband/hw/qib/qib_mr.c
++++ b/drivers/infiniband/hw/qib/qib_mr.c
+@@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+       return container_of(ibfmr, struct qib_fmr, ibfmr);
+ }
++static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
++      int count)
++{
++      int m, i = 0;
++      int rval = 0;
++
++      m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
++      for (; i < m; i++) {
++              mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
++              if (!mr->map[i])
++                      goto bail;
++      }
++      mr->mapsz = m;
++      init_completion(&mr->comp);
++      /* count returning the ptr to user */
++      atomic_set(&mr->refcount, 1);
++      mr->pd = pd;
++      mr->max_segs = count;
++out:
++      return rval;
++bail:
++      while (i)
++              kfree(mr->map[--i]);
++      rval = -ENOMEM;
++      goto out;
++}
++
++static void deinit_qib_mregion(struct qib_mregion *mr)
++{
++      int i = mr->mapsz;
++
++      mr->mapsz = 0;
++      while (i)
++              kfree(mr->map[--i]);
++}
++
++
+ /**
+  * qib_get_dma_mr - get a DMA memory region
+  * @pd: protection domain for this memory region
+@@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+  */
+ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+ {
+-      struct qib_ibdev *dev = to_idev(pd->device);
+-      struct qib_mr *mr;
++      struct qib_mr *mr = NULL;
+       struct ib_mr *ret;
+-      unsigned long flags;
++      int rval;
+       if (to_ipd(pd)->user) {
+               ret = ERR_PTR(-EPERM);
+@@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+               goto bail;
+       }
+-      mr->mr.access_flags = acc;
+-      atomic_set(&mr->mr.refcount, 0);
++      rval = init_qib_mregion(&mr->mr, pd, 0);
++      if (rval) {
++              ret = ERR_PTR(rval);
++              goto bail;
++      }
+-      spin_lock_irqsave(&dev->lk_table.lock, flags);
+-      if (!dev->dma_mr)
+-              dev->dma_mr = &mr->mr;
+-      spin_unlock_irqrestore(&dev->lk_table.lock, flags);
++      rval = qib_alloc_lkey(&mr->mr, 1);
++      if (rval) {
++              ret = ERR_PTR(rval);
++              goto bail_mregion;
++      }
++
++      mr->mr.access_flags = acc;
+       ret = &mr->ibmr;
++done:
++      return ret;
++bail_mregion:
++      deinit_qib_mregion(&mr->mr);
+ bail:
+-      return ret;
++      kfree(mr);
++      goto done;
+ }
+-static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
++static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
+ {
+       struct qib_mr *mr;
+-      int m, i = 0;
++      int rval = -ENOMEM;
++      int m;
+       /* Allocate struct plus pointers to first level page tables. */
+       m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+-      mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
++      mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+       if (!mr)
+-              goto done;
+-
+-      /* Allocate first level page tables. */
+-      for (; i < m; i++) {
+-              mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+-              if (!mr->mr.map[i])
+-                      goto bail;
+-      }
+-      mr->mr.mapsz = m;
+-      mr->mr.page_shift = 0;
+-      mr->mr.max_segs = count;
++              goto bail;
++      rval = init_qib_mregion(&mr->mr, pd, count);
++      if (rval)
++              goto bail;
+       /*
+        * ib_reg_phys_mr() will initialize mr->ibmr except for
+        * lkey and rkey.
+        */
+-      if (!qib_alloc_lkey(lk_table, &mr->mr))
+-              goto bail;
++      rval = qib_alloc_lkey(&mr->mr, 0);
++      if (rval)
++              goto bail_mregion;
+       mr->ibmr.lkey = mr->mr.lkey;
+       mr->ibmr.rkey = mr->mr.lkey;
++done:
++      return mr;
+-      atomic_set(&mr->mr.refcount, 0);
+-      goto done;
+-
++bail_mregion:
++      deinit_qib_mregion(&mr->mr);
+ bail:
+-      while (i)
+-              kfree(mr->mr.map[--i]);
+       kfree(mr);
+-      mr = NULL;
+-
+-done:
+-      return mr;
++      mr = ERR_PTR(rval);
++      goto done;
+ }
+ /**
+@@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+       int n, m, i;
+       struct ib_mr *ret;
+-      mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+-      if (mr == NULL) {
+-              ret = ERR_PTR(-ENOMEM);
++      mr = alloc_mr(num_phys_buf, pd);
++      if (IS_ERR(mr)) {
++              ret = (struct ib_mr *)mr;
+               goto bail;
+       }
+-      mr->mr.pd = pd;
+       mr->mr.user_base = *iova_start;
+       mr->mr.iova = *iova_start;
+-      mr->mr.length = 0;
+-      mr->mr.offset = 0;
+       mr->mr.access_flags = acc;
+-      mr->umem = NULL;
+       m = 0;
+       n = 0;
+@@ -186,7 +221,6 @@ bail:
+  * @pd: protection domain for this memory region
+  * @start: starting userspace address
+  * @length: length of region to register
+- * @virt_addr: virtual address to use (from HCA's point of view)
+  * @mr_access_flags: access flags for this memory region
+  * @udata: unused by the QLogic_IB driver
+  *
+@@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+       list_for_each_entry(chunk, &umem->chunk_list, list)
+               n += chunk->nents;
+-      mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+-      if (!mr) {
+-              ret = ERR_PTR(-ENOMEM);
++      mr = alloc_mr(n, pd);
++      if (IS_ERR(mr)) {
++              ret = (struct ib_mr *)mr;
+               ib_umem_release(umem);
+               goto bail;
+       }
+-      mr->mr.pd = pd;
+       mr->mr.user_base = start;
+       mr->mr.iova = virt_addr;
+       mr->mr.length = length;
+@@ -271,21 +304,25 @@ bail:
+ int qib_dereg_mr(struct ib_mr *ibmr)
+ {
+       struct qib_mr *mr = to_imr(ibmr);
+-      struct qib_ibdev *dev = to_idev(ibmr->device);
+-      int ret;
+-      int i;
+-
+-      ret = qib_free_lkey(dev, &mr->mr);
+-      if (ret)
+-              return ret;
+-
+-      i = mr->mr.mapsz;
+-      while (i)
+-              kfree(mr->mr.map[--i]);
++      int ret = 0;
++      unsigned long timeout;
++
++      qib_free_lkey(&mr->mr);
++
++      qib_put_mr(&mr->mr); /* will set completion if last */
++      timeout = wait_for_completion_timeout(&mr->mr.comp,
++              5 * HZ);
++      if (!timeout) {
++              qib_get_mr(&mr->mr);
++              ret = -EBUSY;
++              goto out;
++      }
++      deinit_qib_mregion(&mr->mr);
+       if (mr->umem)
+               ib_umem_release(mr->umem);
+       kfree(mr);
+-      return 0;
++out:
++      return ret;
+ }
+ /*
+@@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+ {
+       struct qib_mr *mr;
+-      mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
+-      if (mr == NULL)
+-              return ERR_PTR(-ENOMEM);
+-
+-      mr->mr.pd = pd;
+-      mr->mr.user_base = 0;
+-      mr->mr.iova = 0;
+-      mr->mr.length = 0;
+-      mr->mr.offset = 0;
+-      mr->mr.access_flags = 0;
+-      mr->umem = NULL;
++      mr = alloc_mr(max_page_list_len, pd);
++      if (IS_ERR(mr))
++              return (struct ib_mr *)mr;
+       return &mr->ibmr;
+ }
+@@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+       if (size > PAGE_SIZE)
+               return ERR_PTR(-EINVAL);
+-      pl = kmalloc(sizeof *pl, GFP_KERNEL);
++      pl = kzalloc(sizeof *pl, GFP_KERNEL);
+       if (!pl)
+               return ERR_PTR(-ENOMEM);
+-      pl->page_list = kmalloc(size, GFP_KERNEL);
++      pl->page_list = kzalloc(size, GFP_KERNEL);
+       if (!pl->page_list)
+               goto err_free;
+@@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+                            struct ib_fmr_attr *fmr_attr)
+ {
+       struct qib_fmr *fmr;
+-      int m, i = 0;
++      int m;
+       struct ib_fmr *ret;
++      int rval = -ENOMEM;
+       /* Allocate struct plus pointers to first level page tables. */
+       m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
+-      fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
++      fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+       if (!fmr)
+               goto bail;
+-      /* Allocate first level page tables. */
+-      for (; i < m; i++) {
+-              fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+-                                       GFP_KERNEL);
+-              if (!fmr->mr.map[i])
+-                      goto bail;
+-      }
+-      fmr->mr.mapsz = m;
++      rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
++      if (rval)
++              goto bail;
+       /*
+        * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+        * rkey.
+        */
+-      if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+-              goto bail;
++      rval = qib_alloc_lkey(&fmr->mr, 0);
++      if (rval)
++              goto bail_mregion;
+       fmr->ibfmr.rkey = fmr->mr.lkey;
+       fmr->ibfmr.lkey = fmr->mr.lkey;
+       /*
+        * Resources are allocated but no valid mapping (RKEY can't be
+        * used).
+        */
+-      fmr->mr.pd = pd;
+-      fmr->mr.user_base = 0;
+-      fmr->mr.iova = 0;
+-      fmr->mr.length = 0;
+-      fmr->mr.offset = 0;
+       fmr->mr.access_flags = mr_access_flags;
+       fmr->mr.max_segs = fmr_attr->max_pages;
+       fmr->mr.page_shift = fmr_attr->page_shift;
+-      atomic_set(&fmr->mr.refcount, 0);
+       ret = &fmr->ibfmr;
+-      goto done;
++done:
++      return ret;
++bail_mregion:
++      deinit_qib_mregion(&fmr->mr);
+ bail:
+-      while (i)
+-              kfree(fmr->mr.map[--i]);
+       kfree(fmr);
+-      ret = ERR_PTR(-ENOMEM);
+-
+-done:
+-      return ret;
++      ret = ERR_PTR(rval);
++      goto done;
+ }
+ /**
+@@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+       u32 ps;
+       int ret;
+-      if (atomic_read(&fmr->mr.refcount))
++      i = atomic_read(&fmr->mr.refcount);
++      if (i > 2)
+               return -EBUSY;
+       if (list_len > fmr->mr.max_segs) {
+@@ -490,16 +510,20 @@ int qib_unmap_fmr(struct list_head *fmr_list)
+ int qib_dealloc_fmr(struct ib_fmr *ibfmr)
+ {
+       struct qib_fmr *fmr = to_ifmr(ibfmr);
+-      int ret;
+-      int i;
+-
+-      ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
+-      if (ret)
+-              return ret;
+-
+-      i = fmr->mr.mapsz;
+-      while (i)
+-              kfree(fmr->mr.map[--i]);
++      int ret = 0;
++      unsigned long timeout;
++
++      qib_free_lkey(&fmr->mr);
++      qib_put_mr(&fmr->mr); /* will set completion if last */
++      timeout = wait_for_completion_timeout(&fmr->mr.comp,
++              5 * HZ);
++      if (!timeout) {
++              qib_get_mr(&fmr->mr);
++              ret = -EBUSY;
++              goto out;
++      }
++      deinit_qib_mregion(&fmr->mr);
+       kfree(fmr);
+-      return 0;
++out:
++      return ret;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 1ce56b5..693041b 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -406,18 +406,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+       unsigned n;
+       if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+-              while (qp->s_rdma_read_sge.num_sge) {
+-                      atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+-                      if (--qp->s_rdma_read_sge.num_sge)
+-                              qp->s_rdma_read_sge.sge =
+-                                      *qp->s_rdma_read_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->s_rdma_read_sge);
+-      while (qp->r_sge.num_sge) {
+-              atomic_dec(&qp->r_sge.sge.mr->refcount);
+-              if (--qp->r_sge.num_sge)
+-                      qp->r_sge.sge = *qp->r_sge.sg_list++;
+-      }
++      qib_put_ss(&qp->r_sge);
+       if (clr_sends) {
+               while (qp->s_last != qp->s_head) {
+@@ -427,7 +418,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+                       for (i = 0; i < wqe->wr.num_sge; i++) {
+                               struct qib_sge *sge = &wqe->sg_list[i];
+-                              atomic_dec(&sge->mr->refcount);
++                              qib_put_mr(sge->mr);
+                       }
+                       if (qp->ibqp.qp_type == IB_QPT_UD ||
+                           qp->ibqp.qp_type == IB_QPT_SMI ||
+@@ -437,7 +428,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+                               qp->s_last = 0;
+               }
+               if (qp->s_rdma_mr) {
+-                      atomic_dec(&qp->s_rdma_mr->refcount);
++                      qib_put_mr(qp->s_rdma_mr);
+                       qp->s_rdma_mr = NULL;
+               }
+       }
+@@ -450,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+               if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+                   e->rdma_sge.mr) {
+-                      atomic_dec(&e->rdma_sge.mr->refcount);
++                      qib_put_mr(e->rdma_sge.mr);
+                       e->rdma_sge.mr = NULL;
+               }
+       }
+@@ -495,7 +486,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+       if (!(qp->s_flags & QIB_S_BUSY)) {
+               qp->s_hdrwords = 0;
+               if (qp->s_rdma_mr) {
+-                      atomic_dec(&qp->s_rdma_mr->refcount);
++                      qib_put_mr(qp->s_rdma_mr);
+                       qp->s_rdma_mr = NULL;
+               }
+               if (qp->s_tx) {
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index b641416..3ab3413 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+       case OP(RDMA_READ_RESPONSE_ONLY):
+               e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+               if (e->rdma_sge.mr) {
+-                      atomic_dec(&e->rdma_sge.mr->refcount);
++                      qib_put_mr(e->rdma_sge.mr);
+                       e->rdma_sge.mr = NULL;
+               }
+               /* FALLTHROUGH */
+@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+                       /* Copy SGE state in case we need to resend */
+                       qp->s_rdma_mr = e->rdma_sge.mr;
+                       if (qp->s_rdma_mr)
+-                              atomic_inc(&qp->s_rdma_mr->refcount);
++                              qib_get_mr(qp->s_rdma_mr);
+                       qp->s_ack_rdma_sge.sge = e->rdma_sge;
+                       qp->s_ack_rdma_sge.num_sge = 1;
+                       qp->s_cur_sge = &qp->s_ack_rdma_sge;
+@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+               qp->s_cur_sge = &qp->s_ack_rdma_sge;
+               qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+               if (qp->s_rdma_mr)
+-                      atomic_inc(&qp->s_rdma_mr->refcount);
++                      qib_get_mr(qp->s_rdma_mr);
+               len = qp->s_ack_rdma_sge.sge.sge_length;
+               if (len > pmtu)
+                       len = pmtu;
+@@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+               for (i = 0; i < wqe->wr.num_sge; i++) {
+                       struct qib_sge *sge = &wqe->sg_list[i];
+-                      atomic_dec(&sge->mr->refcount);
++                      qib_put_mr(sge->mr);
+               }
+               /* Post a send completion queue entry if requested. */
+               if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+@@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
+               for (i = 0; i < wqe->wr.num_sge; i++) {
+                       struct qib_sge *sge = &wqe->sg_list[i];
+-                      atomic_dec(&sge->mr->refcount);
++                      qib_put_mr(sge->mr);
+               }
+               /* Post a send completion queue entry if requested. */
+               if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+@@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+               if (unlikely(offset + len != e->rdma_sge.sge_length))
+                       goto unlock_done;
+               if (e->rdma_sge.mr) {
+-                      atomic_dec(&e->rdma_sge.mr->refcount);
++                      qib_put_mr(e->rdma_sge.mr);
+                       e->rdma_sge.mr = NULL;
+               }
+               if (len != 0) {
+@@ -2024,11 +2024,7 @@ send_last:
+               if (unlikely(wc.byte_len > qp->r_len))
+                       goto nack_inv;
+               qib_copy_sge(&qp->r_sge, data, tlen, 1);
+-              while (qp->r_sge.num_sge) {
+-                      atomic_dec(&qp->r_sge.sge.mr->refcount);
+-                      if (--qp->r_sge.num_sge)
+-                              qp->r_sge.sge = *qp->r_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->r_sge);
+               qp->r_msn++;
+               if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+                       break;
+@@ -2116,7 +2112,7 @@ send_last:
+               }
+               e = &qp->s_ack_queue[qp->r_head_ack_queue];
+               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+-                      atomic_dec(&e->rdma_sge.mr->refcount);
++                      qib_put_mr(e->rdma_sge.mr);
+                       e->rdma_sge.mr = NULL;
+               }
+               reth = &ohdr->u.rc.reth;
+@@ -2188,7 +2184,7 @@ send_last:
+               }
+               e = &qp->s_ack_queue[qp->r_head_ack_queue];
+               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+-                      atomic_dec(&e->rdma_sge.mr->refcount);
++                      qib_put_mr(e->rdma_sge.mr);
+                       e->rdma_sge.mr = NULL;
+               }
+               ateth = &ohdr->u.atomic_eth;
+@@ -2210,7 +2206,7 @@ send_last:
+                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+                                     be64_to_cpu(ateth->compare_data),
+                                     sdata);
+-              atomic_dec(&qp->r_sge.sge.mr->refcount);
++              qib_put_mr(qp->r_sge.sge.mr);
+               qp->r_sge.num_sge = 0;
+               e->opcode = opcode;
+               e->sent = 0;
+diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
+index c0ee7e0..357b6cf 100644
+--- a/drivers/infiniband/hw/qib/qib_ruc.c
++++ b/drivers/infiniband/hw/qib/qib_ruc.c
+@@ -110,7 +110,7 @@ bad_lkey:
+       while (j) {
+               struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+-              atomic_dec(&sge->mr->refcount);
++              qib_put_mr(sge->mr);
+       }
+       ss->num_sge = 0;
+       memset(&wc, 0, sizeof(wc));
+@@ -501,7 +501,7 @@ again:
+                       (u64) atomic64_add_return(sdata, maddr) - sdata :
+                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+                                     sdata, wqe->wr.wr.atomic.swap);
+-              atomic_dec(&qp->r_sge.sge.mr->refcount);
++              qib_put_mr(qp->r_sge.sge.mr);
+               qp->r_sge.num_sge = 0;
+               goto send_comp;
+@@ -525,7 +525,7 @@ again:
+               sge->sge_length -= len;
+               if (sge->sge_length == 0) {
+                       if (!release)
+-                              atomic_dec(&sge->mr->refcount);
++                              qib_put_mr(sge->mr);
+                       if (--sqp->s_sge.num_sge)
+                               *sge = *sqp->s_sge.sg_list++;
+               } else if (sge->length == 0 && sge->mr->lkey) {
+@@ -542,11 +542,7 @@ again:
+               sqp->s_len -= len;
+       }
+       if (release)
+-              while (qp->r_sge.num_sge) {
+-                      atomic_dec(&qp->r_sge.sge.mr->refcount);
+-                      if (--qp->r_sge.num_sge)
+-                              qp->r_sge.sge = *qp->r_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->r_sge);
+       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+               goto send_comp;
+@@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+       for (i = 0; i < wqe->wr.num_sge; i++) {
+               struct qib_sge *sge = &wqe->sg_list[i];
+-              atomic_dec(&sge->mr->refcount);
++              qib_put_mr(sge->mr);
+       }
+       if (qp->ibqp.qp_type == IB_QPT_UD ||
+           qp->ibqp.qp_type == IB_QPT_SMI ||
+diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
+index 70b4cb7..aa3a803 100644
+--- a/drivers/infiniband/hw/qib/qib_uc.c
++++ b/drivers/infiniband/hw/qib/qib_uc.c
+@@ -281,11 +281,7 @@ inv:
+                       set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+                       qp->r_sge.num_sge = 0;
+               } else
+-                      while (qp->r_sge.num_sge) {
+-                              atomic_dec(&qp->r_sge.sge.mr->refcount);
+-                              if (--qp->r_sge.num_sge)
+-                                      qp->r_sge.sge = *qp->r_sge.sg_list++;
+-                      }
++                      qib_put_ss(&qp->r_sge);
+               qp->r_state = OP(SEND_LAST);
+               switch (opcode) {
+               case OP(SEND_FIRST):
+@@ -404,12 +400,7 @@ send_last:
+                       goto rewind;
+               wc.opcode = IB_WC_RECV;
+               qib_copy_sge(&qp->r_sge, data, tlen, 0);
+-              while (qp->s_rdma_read_sge.num_sge) {
+-                      atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+-                      if (--qp->s_rdma_read_sge.num_sge)
+-                              qp->s_rdma_read_sge.sge =
+-                                      *qp->s_rdma_read_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->s_rdma_read_sge);
+ last_imm:
+               wc.wr_id = qp->r_wr_id;
+               wc.status = IB_WC_SUCCESS;
+@@ -493,13 +484,7 @@ rdma_last_imm:
+               if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+                       goto drop;
+               if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+-                      while (qp->s_rdma_read_sge.num_sge) {
+-                              atomic_dec(&qp->s_rdma_read_sge.sge.mr->
+-                                         refcount);
+-                              if (--qp->s_rdma_read_sge.num_sge)
+-                                      qp->s_rdma_read_sge.sge =
+-                                              *qp->s_rdma_read_sge.sg_list++;
+-                      }
++                      qib_put_ss(&qp->s_rdma_read_sge);
+               else {
+                       ret = qib_get_rwqe(qp, 1);
+                       if (ret < 0)
+@@ -510,11 +495,7 @@ rdma_last_imm:
+               wc.byte_len = qp->r_len;
+               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+               qib_copy_sge(&qp->r_sge, data, tlen, 1);
+-              while (qp->r_sge.num_sge) {
+-                      atomic_dec(&qp->r_sge.sge.mr->refcount);
+-                      if (--qp->r_sge.num_sge)
+-                              qp->r_sge.sge = *qp->r_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->r_sge);
+               goto last_imm;
+       case OP(RDMA_WRITE_LAST):
+@@ -530,11 +511,7 @@ rdma_last:
+               if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+                       goto drop;
+               qib_copy_sge(&qp->r_sge, data, tlen, 1);
+-              while (qp->r_sge.num_sge) {
+-                      atomic_dec(&qp->r_sge.sge.mr->refcount);
+-                      if (--qp->r_sge.num_sge)
+-                              qp->r_sge.sge = *qp->r_sge.sg_list++;
+-              }
++              qib_put_ss(&qp->r_sge);
+               break;
+       default:
+diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
+index a468bf2..d6c7fe7 100644
+--- a/drivers/infiniband/hw/qib/qib_ud.c
++++ b/drivers/infiniband/hw/qib/qib_ud.c
+@@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+               }
+               length -= len;
+       }
+-      while (qp->r_sge.num_sge) {
+-              atomic_dec(&qp->r_sge.sge.mr->refcount);
+-              if (--qp->r_sge.num_sge)
+-                      qp->r_sge.sge = *qp->r_sge.sg_list++;
+-      }
++      qib_put_ss(&qp->r_sge);
+       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+               goto bail_unlock;
+       wc.wr_id = qp->r_wr_id;
+@@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+       } else
+               qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+       qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+-      while (qp->r_sge.num_sge) {
+-              atomic_dec(&qp->r_sge.sge.mr->refcount);
+-              if (--qp->r_sge.num_sge)
+-                      qp->r_sge.sge = *qp->r_sge.sg_list++;
+-      }
++      qib_put_ss(&qp->r_sge);
+       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+               return;
+       wc.wr_id = qp->r_wr_id;
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index 7b6c3bf..76d7ce8 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+               sge->sge_length -= len;
+               if (sge->sge_length == 0) {
+                       if (release)
+-                              atomic_dec(&sge->mr->refcount);
++                              qib_put_mr(sge->mr);
+                       if (--ss->num_sge)
+                               *sge = *ss->sg_list++;
+               } else if (sge->length == 0 && sge->mr->lkey) {
+@@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+               sge->sge_length -= len;
+               if (sge->sge_length == 0) {
+                       if (release)
+-                              atomic_dec(&sge->mr->refcount);
++                              qib_put_mr(sge->mr);
+                       if (--ss->num_sge)
+                               *sge = *ss->sg_list++;
+               } else if (sge->length == 0 && sge->mr->lkey) {
+@@ -435,7 +435,7 @@ bail_inval_free:
+       while (j) {
+               struct qib_sge *sge = &wqe->sg_list[--j];
+-              atomic_dec(&sge->mr->refcount);
++              qib_put_mr(sge->mr);
+       }
+ bail_inval:
+       ret = -EINVAL;
+@@ -978,7 +978,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
+       if (atomic_dec_and_test(&qp->refcount))
+               wake_up(&qp->wait);
+       if (tx->mr) {
+-              atomic_dec(&tx->mr->refcount);
++              qib_put_mr(tx->mr);
+               tx->mr = NULL;
+       }
+       if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
+@@ -1336,7 +1336,7 @@ done:
+       }
+       qib_sendbuf_done(dd, pbufn);
+       if (qp->s_rdma_mr) {
+-              atomic_dec(&qp->s_rdma_mr->refcount);
++              qib_put_mr(qp->s_rdma_mr);
+               qp->s_rdma_mr = NULL;
+       }
+       if (qp->s_wqe) {
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
+index 4876060..4a2277b 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.h
++++ b/drivers/infiniband/hw/qib/qib_verbs.h
+@@ -41,6 +41,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/kref.h>
+ #include <linux/workqueue.h>
++#include <linux/completion.h>
+ #include <rdma/ib_pack.h>
+ #include <rdma/ib_user_verbs.h>
+@@ -302,6 +303,8 @@ struct qib_mregion {
+       u32 max_segs;           /* number of qib_segs in all the arrays */
+       u32 mapsz;              /* size of the map array */
+       u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
++      u8  lkey_published;     /* in global table */
++      struct completion comp; /* complete when refcount goes to zero */
+       atomic_t refcount;
+       struct qib_segarray *map[0];    /* the segments */
+ };
+@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+               int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+-int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
++int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
+-int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
++void qib_free_lkey(struct qib_mregion *mr);
+ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+               struct qib_sge *isge, struct ib_sge *sge, int acc);
+@@ -1014,6 +1017,27 @@ int qib_unmap_fmr(struct list_head *fmr_list);
+ int qib_dealloc_fmr(struct ib_fmr *ibfmr);
++static inline void qib_get_mr(struct qib_mregion *mr)
++{
++      atomic_inc(&mr->refcount);
++}
++
++static inline void qib_put_mr(struct qib_mregion *mr)
++{
++      if (unlikely(atomic_dec_and_test(&mr->refcount)))
++              complete(&mr->comp);
++}
++
++static inline void qib_put_ss(struct qib_sge_state *ss)
++{
++      while (ss->num_sge) {
++              qib_put_mr(ss->sge.mr);
++              if (--ss->num_sge)
++                      ss->sge = *ss->sg_list++;
++      }
++}
++
++
+ void qib_release_mmap_info(struct kref *ref);
+ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
+-- 
+1.7.9.5
+
diff --git a/linux-next-pending/0005-IB-qib-portinfo-compliance.patch b/linux-next-pending/0005-IB-qib-portinfo-compliance.patch
deleted file mode 100644 (file)
index 5da8689..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-IB/qib: fix compliance test failure C14-024#06_LocalPortNum
-
-From: Mike Marciniszyn <mike.marciniszyn@intel.com>
-
-Commit 3236b2d4 ("IB/qib: MADs with misset M_Keys should return failure")
-introduced a return code assignment that unfortunately introduced
-an unconditional exit for the routine due to the lack of braces.
-
-This patch adds the braces to correct the original patch.
-
-Reviewed-by: Dean Luick <dean.luick@intel.com>
-Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
----
- drivers/infiniband/hw/qib/qib_mad.c |    3 ++-
- 1 files changed, 2 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
-index 4339021..3e55415 100644
---- a/drivers/infiniband/hw/qib/qib_mad.c
-+++ b/drivers/infiniband/hw/qib/qib_mad.c
-@@ -463,9 +463,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
-               if (port_num != port) {
-                       ibp = to_iport(ibdev, port_num);
-                       ret = check_mkey(ibp, smp, 0);
--                      if (ret)
-+                      if (ret) {
-                               ret = IB_MAD_RESULT_FAILURE;
-                               goto bail;
-+                      }
-               }
-       }
diff --git a/linux-next-pending/0006-IB-qib-mr-ebusy.patch b/linux-next-pending/0006-IB-qib-mr-ebusy.patch
deleted file mode 100644 (file)
index 8251564..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-IB/qib: Fix UC MR refs for immediate operations
-
-From: Mike Marciniszyn <mike.marciniszyn@intel.com>
-
-An MR reference leak exists when handling UC RDMA writes with
-immediate data because we manipulate the reference counts as if the
-operation had been a send.
-
-This patch moves the last_imm label so that the RDMA write operations
-with immediate data converge at the cq building code.  The copy/mr
-deref code is now done correctly prior to the branch to last_imm.
-
-Reviewed-by: Edward Mascarenhas <edward.mascarenhas@intel.com>
-Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
-Signed-off-by: Roland Dreier <roland@purestorage.com>
----
- drivers/infiniband/hw/qib/qib_uc.c |    8 +++++++-
- 1 files changed, 7 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
-index ce7387f..70b4cb7 100644
---- a/drivers/infiniband/hw/qib/qib_uc.c
-+++ b/drivers/infiniband/hw/qib/qib_uc.c
-@@ -403,7 +403,6 @@ send_last:
-               if (unlikely(wc.byte_len > qp->r_len))
-                       goto rewind;
-               wc.opcode = IB_WC_RECV;
--last_imm:
-               qib_copy_sge(&qp->r_sge, data, tlen, 0);
-               while (qp->s_rdma_read_sge.num_sge) {
-                       atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
-@@ -411,6 +410,7 @@ last_imm:
-                               qp->s_rdma_read_sge.sge =
-                                       *qp->s_rdma_read_sge.sg_list++;
-               }
-+last_imm:
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-               wc.qp = &qp->ibqp;
-@@ -509,6 +509,12 @@ rdma_last_imm:
-               }
-               wc.byte_len = qp->r_len;
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-+              qib_copy_sge(&qp->r_sge, data, tlen, 1);
-+              while (qp->r_sge.num_sge) {
-+                      atomic_dec(&qp->r_sge.sge.mr->refcount);
-+                      if (--qp->r_sge.num_sge)
-+                              qp->r_sge.sge = *qp->r_sge.sg_list++;
-+              }
-               goto last_imm;
-       case OP(RDMA_WRITE_LAST):
diff --git a/linux-next-pending/0007-IB-qib-uc-refcount-leak.patch b/linux-next-pending/0007-IB-qib-uc-refcount-leak.patch
deleted file mode 100644 (file)
index a9248e5..0000000
+++ /dev/null
@@ -1,1035 +0,0 @@
-IB/qib: Avoid returning EBUSY from MR deregister
-
-From: Mike Marciniszyn <mike.marciniszyn@intel.com>
-
-A timing issue can occur where qib_mr_dereg can return -EBUSY if the
-MR use count is not zero.
-
-This can occur if the MR is de-registered while RDMA read response
-packets are being progressed from the SDMA ring.  The suspicion is
-that the peer sent an RDMA read request, which has already been copied
-across to the peer.  The peer sees the completion of his request and
-then communicates to the responder that the MR is not needed any
-longer.  The responder tries to de-register the MR, catching some
-responses remaining in the SDMA ring holding the MR use count.
-
-The code now uses a get/put paradigm to track MR use counts and
-coordinates with the MR de-registration process using a completion
-when the count has reached zero.  A timeout on the delay is in place
-to catch other EBUSY issues.
-
-The reference count protocol is as follows:
-- The return to the user counts as 1
-- A reference from the lk_table or the qib_ibdev counts as 1.
-- Transient I/O operations increase/decrease as necessary
-
-A lot of code duplication has been folded into the new routines
-init_qib_mregion() and deinit_qib_mregion().  Additionally, explicit
-initialization of fields to zero is now handled by kzalloc().
-
-Also, duplicated code 'while.*num_sge' that decrements reference
-counts have been consolidated in qib_put_ss().
-
-Reviewed-by: Ramkrishna Vepa <ramkrishna.vepa@intel.com>
-Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
-Signed-off-by: Roland Dreier <roland@purestorage.com>
----
- drivers/infiniband/hw/qib/qib_keys.c  |   84 +++++++----
- drivers/infiniband/hw/qib/qib_mr.c    |  242 ++++++++++++++++++---------------
- drivers/infiniband/hw/qib/qib_qp.c    |   21 +--
- drivers/infiniband/hw/qib/qib_rc.c    |   24 +--
- drivers/infiniband/hw/qib/qib_ruc.c   |   14 +-
- drivers/infiniband/hw/qib/qib_uc.c    |   33 +----
- drivers/infiniband/hw/qib/qib_ud.c    |   12 --
- drivers/infiniband/hw/qib/qib_verbs.c |   10 +
- drivers/infiniband/hw/qib/qib_verbs.h |   28 ++++
- 9 files changed, 244 insertions(+), 224 deletions(-)
-
-diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
-index 8fd19a4..8b5ee3a 100644
---- a/drivers/infiniband/hw/qib/qib_keys.c
-+++ b/drivers/infiniband/hw/qib/qib_keys.c
-@@ -35,21 +35,40 @@
- /**
-  * qib_alloc_lkey - allocate an lkey
-- * @rkt: lkey table in which to allocate the lkey
-  * @mr: memory region that this lkey protects
-+ * @dma_region: 0->normal key, 1->restricted DMA key
-+ *
-+ * Returns 0 if successful, otherwise returns -errno.
-+ *
-+ * Increments mr reference count and sets published
-+ * as required.
-+ *
-+ * Sets the lkey field mr for non-dma regions.
-  *
-- * Returns 1 if successful, otherwise returns 0.
-  */
--int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
-+int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
- {
-       unsigned long flags;
-       u32 r;
-       u32 n;
--      int ret;
-+      int ret = 0;
-+      struct qib_ibdev *dev = to_idev(mr->pd->device);
-+      struct qib_lkey_table *rkt = &dev->lk_table;
-       spin_lock_irqsave(&rkt->lock, flags);
-+      /* special case for dma_mr lkey == 0 */
-+      if (dma_region) {
-+              /* should the dma_mr be relative to the pd? */
-+              if (!dev->dma_mr) {
-+                      qib_get_mr(mr);
-+                      dev->dma_mr = mr;
-+                      mr->lkey_published = 1;
-+              }
-+              goto success;
-+      }
-+
-       /* Find the next available LKEY */
-       r = rkt->next;
-       n = r;
-@@ -57,11 +76,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
-               if (rkt->table[r] == NULL)
-                       break;
-               r = (r + 1) & (rkt->max - 1);
--              if (r == n) {
--                      spin_unlock_irqrestore(&rkt->lock, flags);
--                      ret = 0;
-+              if (r == n)
-                       goto bail;
--              }
-       }
-       rkt->next = (r + 1) & (rkt->max - 1);
-       /*
-@@ -76,46 +92,50 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
-               mr->lkey |= 1 << 8;
-               rkt->gen++;
-       }
-+      qib_get_mr(mr);
-       rkt->table[r] = mr;
-+      mr->lkey_published = 1;
-+success:
-       spin_unlock_irqrestore(&rkt->lock, flags);
--
--      ret = 1;
--
--bail:
-+out:
-       return ret;
-+bail:
-+      spin_unlock_irqrestore(&rkt->lock, flags);
-+      ret = -ENOMEM;
-+      goto out;
- }
- /**
-  * qib_free_lkey - free an lkey
-- * @rkt: table from which to free the lkey
-- * @lkey: lkey id to free
-+ * @mr: mr to free from tables
-  */
--int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
-+void qib_free_lkey(struct qib_mregion *mr)
- {
-       unsigned long flags;
-       u32 lkey = mr->lkey;
-       u32 r;
--      int ret;
-+      struct qib_ibdev *dev = to_idev(mr->pd->device);
-+      struct qib_lkey_table *rkt = &dev->lk_table;
-+
-+      spin_lock_irqsave(&rkt->lock, flags);
-+      if (!mr->lkey_published)
-+              goto out;
-+      mr->lkey_published = 0;
-+
-       spin_lock_irqsave(&dev->lk_table.lock, flags);
-       if (lkey == 0) {
-               if (dev->dma_mr && dev->dma_mr == mr) {
--                      ret = atomic_read(&dev->dma_mr->refcount);
--                      if (!ret)
--                              dev->dma_mr = NULL;
--              } else
--                      ret = 0;
-+                      qib_put_mr(dev->dma_mr);
-+                      dev->dma_mr = NULL;
-+              }
-       } else {
-               r = lkey >> (32 - ib_qib_lkey_table_size);
--              ret = atomic_read(&dev->lk_table.table[r]->refcount);
--              if (!ret)
--                      dev->lk_table.table[r] = NULL;
-+              qib_put_mr(dev->dma_mr);
-+              rkt->table[r] = NULL;
-       }
-+out:
-       spin_unlock_irqrestore(&dev->lk_table.lock, flags);
--
--      if (ret)
--              ret = -EBUSY;
--      return ret;
- }
- /**
-@@ -150,7 +170,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
-                       goto bail;
-               if (!dev->dma_mr)
-                       goto bail;
--              atomic_inc(&dev->dma_mr->refcount);
-+              qib_get_mr(dev->dma_mr);
-               spin_unlock_irqrestore(&rkt->lock, flags);
-               isge->mr = dev->dma_mr;
-@@ -171,7 +191,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
-                    off + sge->length > mr->length ||
-                    (mr->access_flags & acc) != acc))
-               goto bail;
--      atomic_inc(&mr->refcount);
-+      qib_get_mr(mr);
-       spin_unlock_irqrestore(&rkt->lock, flags);
-       off += mr->offset;
-@@ -245,7 +265,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
-                       goto bail;
-               if (!dev->dma_mr)
-                       goto bail;
--              atomic_inc(&dev->dma_mr->refcount);
-+              qib_get_mr(dev->dma_mr);
-               spin_unlock_irqrestore(&rkt->lock, flags);
-               sge->mr = dev->dma_mr;
-@@ -265,7 +285,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
-       if (unlikely(vaddr < mr->iova || off + len > mr->length ||
-                    (mr->access_flags & acc) == 0))
-               goto bail;
--      atomic_inc(&mr->refcount);
-+      qib_get_mr(mr);
-       spin_unlock_irqrestore(&rkt->lock, flags);
-       off += mr->offset;
-diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
-index 08944e2..6a2028a 100644
---- a/drivers/infiniband/hw/qib/qib_mr.c
-+++ b/drivers/infiniband/hw/qib/qib_mr.c
-@@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
-       return container_of(ibfmr, struct qib_fmr, ibfmr);
- }
-+static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
-+      int count)
-+{
-+      int m, i = 0;
-+      int rval = 0;
-+
-+      m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-+      for (; i < m; i++) {
-+              mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
-+              if (!mr->map[i])
-+                      goto bail;
-+      }
-+      mr->mapsz = m;
-+      init_completion(&mr->comp);
-+      /* count returning the ptr to user */
-+      atomic_set(&mr->refcount, 1);
-+      mr->pd = pd;
-+      mr->max_segs = count;
-+out:
-+      return rval;
-+bail:
-+      while (i)
-+              kfree(mr->map[--i]);
-+      rval = -ENOMEM;
-+      goto out;
-+}
-+
-+static void deinit_qib_mregion(struct qib_mregion *mr)
-+{
-+      int i = mr->mapsz;
-+
-+      mr->mapsz = 0;
-+      while (i)
-+              kfree(mr->map[--i]);
-+}
-+
-+
- /**
-  * qib_get_dma_mr - get a DMA memory region
-  * @pd: protection domain for this memory region
-@@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
-  */
- struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
- {
--      struct qib_ibdev *dev = to_idev(pd->device);
--      struct qib_mr *mr;
-+      struct qib_mr *mr = NULL;
-       struct ib_mr *ret;
--      unsigned long flags;
-+      int rval;
-       if (to_ipd(pd)->user) {
-               ret = ERR_PTR(-EPERM);
-@@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
-               goto bail;
-       }
--      mr->mr.access_flags = acc;
--      atomic_set(&mr->mr.refcount, 0);
-+      rval = init_qib_mregion(&mr->mr, pd, 0);
-+      if (rval) {
-+              ret = ERR_PTR(rval);
-+              goto bail;
-+      }
--      spin_lock_irqsave(&dev->lk_table.lock, flags);
--      if (!dev->dma_mr)
--              dev->dma_mr = &mr->mr;
--      spin_unlock_irqrestore(&dev->lk_table.lock, flags);
-+      rval = qib_alloc_lkey(&mr->mr, 1);
-+      if (rval) {
-+              ret = ERR_PTR(rval);
-+              goto bail_mregion;
-+      }
-+
-+      mr->mr.access_flags = acc;
-       ret = &mr->ibmr;
-+done:
-+      return ret;
-+bail_mregion:
-+      deinit_qib_mregion(&mr->mr);
- bail:
--      return ret;
-+      kfree(mr);
-+      goto done;
- }
--static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
-+static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
- {
-       struct qib_mr *mr;
--      int m, i = 0;
-+      int rval = -ENOMEM;
-+      int m;
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
--      mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
-+      mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
-       if (!mr)
--              goto done;
--
--      /* Allocate first level page tables. */
--      for (; i < m; i++) {
--              mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
--              if (!mr->mr.map[i])
--                      goto bail;
--      }
--      mr->mr.mapsz = m;
--      mr->mr.page_shift = 0;
--      mr->mr.max_segs = count;
-+              goto bail;
-+      rval = init_qib_mregion(&mr->mr, pd, count);
-+      if (rval)
-+              goto bail;
-       /*
-        * ib_reg_phys_mr() will initialize mr->ibmr except for
-        * lkey and rkey.
-        */
--      if (!qib_alloc_lkey(lk_table, &mr->mr))
--              goto bail;
-+      rval = qib_alloc_lkey(&mr->mr, 0);
-+      if (rval)
-+              goto bail_mregion;
-       mr->ibmr.lkey = mr->mr.lkey;
-       mr->ibmr.rkey = mr->mr.lkey;
-+done:
-+      return mr;
--      atomic_set(&mr->mr.refcount, 0);
--      goto done;
--
-+bail_mregion:
-+      deinit_qib_mregion(&mr->mr);
- bail:
--      while (i)
--              kfree(mr->mr.map[--i]);
-       kfree(mr);
--      mr = NULL;
--
--done:
--      return mr;
-+      mr = ERR_PTR(rval);
-+      goto done;
- }
- /**
-@@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
-       int n, m, i;
-       struct ib_mr *ret;
--      mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
--      if (mr == NULL) {
--              ret = ERR_PTR(-ENOMEM);
-+      mr = alloc_mr(num_phys_buf, pd);
-+      if (IS_ERR(mr)) {
-+              ret = (struct ib_mr *)mr;
-               goto bail;
-       }
--      mr->mr.pd = pd;
-       mr->mr.user_base = *iova_start;
-       mr->mr.iova = *iova_start;
--      mr->mr.length = 0;
--      mr->mr.offset = 0;
-       mr->mr.access_flags = acc;
--      mr->umem = NULL;
-       m = 0;
-       n = 0;
-@@ -186,7 +221,6 @@ bail:
-  * @pd: protection domain for this memory region
-  * @start: starting userspace address
-  * @length: length of region to register
-- * @virt_addr: virtual address to use (from HCA's point of view)
-  * @mr_access_flags: access flags for this memory region
-  * @udata: unused by the QLogic_IB driver
-  *
-@@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-       list_for_each_entry(chunk, &umem->chunk_list, list)
-               n += chunk->nents;
--      mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
--      if (!mr) {
--              ret = ERR_PTR(-ENOMEM);
-+      mr = alloc_mr(n, pd);
-+      if (IS_ERR(mr)) {
-+              ret = (struct ib_mr *)mr;
-               ib_umem_release(umem);
-               goto bail;
-       }
--      mr->mr.pd = pd;
-       mr->mr.user_base = start;
-       mr->mr.iova = virt_addr;
-       mr->mr.length = length;
-@@ -271,21 +304,25 @@ bail:
- int qib_dereg_mr(struct ib_mr *ibmr)
- {
-       struct qib_mr *mr = to_imr(ibmr);
--      struct qib_ibdev *dev = to_idev(ibmr->device);
--      int ret;
--      int i;
--
--      ret = qib_free_lkey(dev, &mr->mr);
--      if (ret)
--              return ret;
--
--      i = mr->mr.mapsz;
--      while (i)
--              kfree(mr->mr.map[--i]);
-+      int ret = 0;
-+      unsigned long timeout;
-+
-+      qib_free_lkey(&mr->mr);
-+
-+      qib_put_mr(&mr->mr); /* will set completion if last */
-+      timeout = wait_for_completion_timeout(&mr->mr.comp,
-+              5 * HZ);
-+      if (!timeout) {
-+              qib_get_mr(&mr->mr);
-+              ret = -EBUSY;
-+              goto out;
-+      }
-+      deinit_qib_mregion(&mr->mr);
-       if (mr->umem)
-               ib_umem_release(mr->umem);
-       kfree(mr);
--      return 0;
-+out:
-+      return ret;
- }
- /*
-@@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
- {
-       struct qib_mr *mr;
--      mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
--      if (mr == NULL)
--              return ERR_PTR(-ENOMEM);
--
--      mr->mr.pd = pd;
--      mr->mr.user_base = 0;
--      mr->mr.iova = 0;
--      mr->mr.length = 0;
--      mr->mr.offset = 0;
--      mr->mr.access_flags = 0;
--      mr->umem = NULL;
-+      mr = alloc_mr(max_page_list_len, pd);
-+      if (IS_ERR(mr))
-+              return (struct ib_mr *)mr;
-       return &mr->ibmr;
- }
-@@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
-       if (size > PAGE_SIZE)
-               return ERR_PTR(-EINVAL);
--      pl = kmalloc(sizeof *pl, GFP_KERNEL);
-+      pl = kzalloc(sizeof *pl, GFP_KERNEL);
-       if (!pl)
-               return ERR_PTR(-ENOMEM);
--      pl->page_list = kmalloc(size, GFP_KERNEL);
-+      pl->page_list = kzalloc(size, GFP_KERNEL);
-       if (!pl->page_list)
-               goto err_free;
-@@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                            struct ib_fmr_attr *fmr_attr)
- {
-       struct qib_fmr *fmr;
--      int m, i = 0;
-+      int m;
-       struct ib_fmr *ret;
-+      int rval = -ENOMEM;
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
--      fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
-+      fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
-       if (!fmr)
-               goto bail;
--      /* Allocate first level page tables. */
--      for (; i < m; i++) {
--              fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
--                                       GFP_KERNEL);
--              if (!fmr->mr.map[i])
--                      goto bail;
--      }
--      fmr->mr.mapsz = m;
-+      rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
-+      if (rval)
-+              goto bail;
-       /*
-        * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
-        * rkey.
-        */
--      if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
--              goto bail;
-+      rval = qib_alloc_lkey(&fmr->mr, 0);
-+      if (rval)
-+              goto bail_mregion;
-       fmr->ibfmr.rkey = fmr->mr.lkey;
-       fmr->ibfmr.lkey = fmr->mr.lkey;
-       /*
-        * Resources are allocated but no valid mapping (RKEY can't be
-        * used).
-        */
--      fmr->mr.pd = pd;
--      fmr->mr.user_base = 0;
--      fmr->mr.iova = 0;
--      fmr->mr.length = 0;
--      fmr->mr.offset = 0;
-       fmr->mr.access_flags = mr_access_flags;
-       fmr->mr.max_segs = fmr_attr->max_pages;
-       fmr->mr.page_shift = fmr_attr->page_shift;
--      atomic_set(&fmr->mr.refcount, 0);
-       ret = &fmr->ibfmr;
--      goto done;
-+done:
-+      return ret;
-+bail_mregion:
-+      deinit_qib_mregion(&fmr->mr);
- bail:
--      while (i)
--              kfree(fmr->mr.map[--i]);
-       kfree(fmr);
--      ret = ERR_PTR(-ENOMEM);
--
--done:
--      return ret;
-+      ret = ERR_PTR(rval);
-+      goto done;
- }
- /**
-@@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
-       u32 ps;
-       int ret;
--      if (atomic_read(&fmr->mr.refcount))
-+      i = atomic_read(&fmr->mr.refcount);
-+      if (i > 2)
-               return -EBUSY;
-       if (list_len > fmr->mr.max_segs) {
-@@ -490,16 +510,20 @@ int qib_unmap_fmr(struct list_head *fmr_list)
- int qib_dealloc_fmr(struct ib_fmr *ibfmr)
- {
-       struct qib_fmr *fmr = to_ifmr(ibfmr);
--      int ret;
--      int i;
--
--      ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
--      if (ret)
--              return ret;
--
--      i = fmr->mr.mapsz;
--      while (i)
--              kfree(fmr->mr.map[--i]);
-+      int ret = 0;
-+      unsigned long timeout;
-+
-+      qib_free_lkey(&fmr->mr);
-+      qib_put_mr(&fmr->mr); /* will set completion if last */
-+      timeout = wait_for_completion_timeout(&fmr->mr.comp,
-+              5 * HZ);
-+      if (!timeout) {
-+              qib_get_mr(&fmr->mr);
-+              ret = -EBUSY;
-+              goto out;
-+      }
-+      deinit_qib_mregion(&fmr->mr);
-       kfree(fmr);
--      return 0;
-+out:
-+      return ret;
- }
-diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
-index 1ce56b5..693041b 100644
---- a/drivers/infiniband/hw/qib/qib_qp.c
-+++ b/drivers/infiniband/hw/qib/qib_qp.c
-@@ -406,18 +406,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-       unsigned n;
-       if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
--              while (qp->s_rdma_read_sge.num_sge) {
--                      atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
--                      if (--qp->s_rdma_read_sge.num_sge)
--                              qp->s_rdma_read_sge.sge =
--                                      *qp->s_rdma_read_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->s_rdma_read_sge);
--      while (qp->r_sge.num_sge) {
--              atomic_dec(&qp->r_sge.sge.mr->refcount);
--              if (--qp->r_sge.num_sge)
--                      qp->r_sge.sge = *qp->r_sge.sg_list++;
--      }
-+      qib_put_ss(&qp->r_sge);
-       if (clr_sends) {
-               while (qp->s_last != qp->s_head) {
-@@ -427,7 +418,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-                       for (i = 0; i < wqe->wr.num_sge; i++) {
-                               struct qib_sge *sge = &wqe->sg_list[i];
--                              atomic_dec(&sge->mr->refcount);
-+                              qib_put_mr(sge->mr);
-                       }
-                       if (qp->ibqp.qp_type == IB_QPT_UD ||
-                           qp->ibqp.qp_type == IB_QPT_SMI ||
-@@ -437,7 +428,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-                               qp->s_last = 0;
-               }
-               if (qp->s_rdma_mr) {
--                      atomic_dec(&qp->s_rdma_mr->refcount);
-+                      qib_put_mr(qp->s_rdma_mr);
-                       qp->s_rdma_mr = NULL;
-               }
-       }
-@@ -450,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-               if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
-                   e->rdma_sge.mr) {
--                      atomic_dec(&e->rdma_sge.mr->refcount);
-+                      qib_put_mr(e->rdma_sge.mr);
-                       e->rdma_sge.mr = NULL;
-               }
-       }
-@@ -495,7 +486,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
-       if (!(qp->s_flags & QIB_S_BUSY)) {
-               qp->s_hdrwords = 0;
-               if (qp->s_rdma_mr) {
--                      atomic_dec(&qp->s_rdma_mr->refcount);
-+                      qib_put_mr(qp->s_rdma_mr);
-                       qp->s_rdma_mr = NULL;
-               }
-               if (qp->s_tx) {
-diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
-index b641416..3ab3413 100644
---- a/drivers/infiniband/hw/qib/qib_rc.c
-+++ b/drivers/infiniband/hw/qib/qib_rc.c
-@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
-       case OP(RDMA_READ_RESPONSE_ONLY):
-               e = &qp->s_ack_queue[qp->s_tail_ack_queue];
-               if (e->rdma_sge.mr) {
--                      atomic_dec(&e->rdma_sge.mr->refcount);
-+                      qib_put_mr(e->rdma_sge.mr);
-                       e->rdma_sge.mr = NULL;
-               }
-               /* FALLTHROUGH */
-@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
-                       /* Copy SGE state in case we need to resend */
-                       qp->s_rdma_mr = e->rdma_sge.mr;
-                       if (qp->s_rdma_mr)
--                              atomic_inc(&qp->s_rdma_mr->refcount);
-+                              qib_get_mr(qp->s_rdma_mr);
-                       qp->s_ack_rdma_sge.sge = e->rdma_sge;
-                       qp->s_ack_rdma_sge.num_sge = 1;
-                       qp->s_cur_sge = &qp->s_ack_rdma_sge;
-@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
-               qp->s_cur_sge = &qp->s_ack_rdma_sge;
-               qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
-               if (qp->s_rdma_mr)
--                      atomic_inc(&qp->s_rdma_mr->refcount);
-+                      qib_get_mr(qp->s_rdma_mr);
-               len = qp->s_ack_rdma_sge.sge.sge_length;
-               if (len > pmtu)
-                       len = pmtu;
-@@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
-               for (i = 0; i < wqe->wr.num_sge; i++) {
-                       struct qib_sge *sge = &wqe->sg_list[i];
--                      atomic_dec(&sge->mr->refcount);
-+                      qib_put_mr(sge->mr);
-               }
-               /* Post a send completion queue entry if requested. */
-               if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
-@@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
-               for (i = 0; i < wqe->wr.num_sge; i++) {
-                       struct qib_sge *sge = &wqe->sg_list[i];
--                      atomic_dec(&sge->mr->refcount);
-+                      qib_put_mr(sge->mr);
-               }
-               /* Post a send completion queue entry if requested. */
-               if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
-@@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
-               if (unlikely(offset + len != e->rdma_sge.sge_length))
-                       goto unlock_done;
-               if (e->rdma_sge.mr) {
--                      atomic_dec(&e->rdma_sge.mr->refcount);
-+                      qib_put_mr(e->rdma_sge.mr);
-                       e->rdma_sge.mr = NULL;
-               }
-               if (len != 0) {
-@@ -2024,11 +2024,7 @@ send_last:
-               if (unlikely(wc.byte_len > qp->r_len))
-                       goto nack_inv;
-               qib_copy_sge(&qp->r_sge, data, tlen, 1);
--              while (qp->r_sge.num_sge) {
--                      atomic_dec(&qp->r_sge.sge.mr->refcount);
--                      if (--qp->r_sge.num_sge)
--                              qp->r_sge.sge = *qp->r_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->r_sge);
-               qp->r_msn++;
-               if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
-                       break;
-@@ -2116,7 +2112,7 @@ send_last:
-               }
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
--                      atomic_dec(&e->rdma_sge.mr->refcount);
-+                      qib_put_mr(e->rdma_sge.mr);
-                       e->rdma_sge.mr = NULL;
-               }
-               reth = &ohdr->u.rc.reth;
-@@ -2188,7 +2184,7 @@ send_last:
-               }
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
--                      atomic_dec(&e->rdma_sge.mr->refcount);
-+                      qib_put_mr(e->rdma_sge.mr);
-                       e->rdma_sge.mr = NULL;
-               }
-               ateth = &ohdr->u.atomic_eth;
-@@ -2210,7 +2206,7 @@ send_last:
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     be64_to_cpu(ateth->compare_data),
-                                     sdata);
--              atomic_dec(&qp->r_sge.sge.mr->refcount);
-+              qib_put_mr(qp->r_sge.sge.mr);
-               qp->r_sge.num_sge = 0;
-               e->opcode = opcode;
-               e->sent = 0;
-diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
-index c0ee7e0..357b6cf 100644
---- a/drivers/infiniband/hw/qib/qib_ruc.c
-+++ b/drivers/infiniband/hw/qib/qib_ruc.c
-@@ -110,7 +110,7 @@ bad_lkey:
-       while (j) {
-               struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
--              atomic_dec(&sge->mr->refcount);
-+              qib_put_mr(sge->mr);
-       }
-       ss->num_sge = 0;
-       memset(&wc, 0, sizeof(wc));
-@@ -501,7 +501,7 @@ again:
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     sdata, wqe->wr.wr.atomic.swap);
--              atomic_dec(&qp->r_sge.sge.mr->refcount);
-+              qib_put_mr(qp->r_sge.sge.mr);
-               qp->r_sge.num_sge = 0;
-               goto send_comp;
-@@ -525,7 +525,7 @@ again:
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (!release)
--                              atomic_dec(&sge->mr->refcount);
-+                              qib_put_mr(sge->mr);
-                       if (--sqp->s_sge.num_sge)
-                               *sge = *sqp->s_sge.sg_list++;
-               } else if (sge->length == 0 && sge->mr->lkey) {
-@@ -542,11 +542,7 @@ again:
-               sqp->s_len -= len;
-       }
-       if (release)
--              while (qp->r_sge.num_sge) {
--                      atomic_dec(&qp->r_sge.sge.mr->refcount);
--                      if (--qp->r_sge.num_sge)
--                              qp->r_sge.sge = *qp->r_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->r_sge);
-       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
-               goto send_comp;
-@@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
-       for (i = 0; i < wqe->wr.num_sge; i++) {
-               struct qib_sge *sge = &wqe->sg_list[i];
--              atomic_dec(&sge->mr->refcount);
-+              qib_put_mr(sge->mr);
-       }
-       if (qp->ibqp.qp_type == IB_QPT_UD ||
-           qp->ibqp.qp_type == IB_QPT_SMI ||
-diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
-index 70b4cb7..aa3a803 100644
---- a/drivers/infiniband/hw/qib/qib_uc.c
-+++ b/drivers/infiniband/hw/qib/qib_uc.c
-@@ -281,11 +281,7 @@ inv:
-                       set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
-                       qp->r_sge.num_sge = 0;
-               } else
--                      while (qp->r_sge.num_sge) {
--                              atomic_dec(&qp->r_sge.sge.mr->refcount);
--                              if (--qp->r_sge.num_sge)
--                                      qp->r_sge.sge = *qp->r_sge.sg_list++;
--                      }
-+                      qib_put_ss(&qp->r_sge);
-               qp->r_state = OP(SEND_LAST);
-               switch (opcode) {
-               case OP(SEND_FIRST):
-@@ -404,12 +400,7 @@ send_last:
-                       goto rewind;
-               wc.opcode = IB_WC_RECV;
-               qib_copy_sge(&qp->r_sge, data, tlen, 0);
--              while (qp->s_rdma_read_sge.num_sge) {
--                      atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
--                      if (--qp->s_rdma_read_sge.num_sge)
--                              qp->s_rdma_read_sge.sge =
--                                      *qp->s_rdma_read_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->s_rdma_read_sge);
- last_imm:
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-@@ -493,13 +484,7 @@ rdma_last_imm:
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
-                       goto drop;
-               if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
--                      while (qp->s_rdma_read_sge.num_sge) {
--                              atomic_dec(&qp->s_rdma_read_sge.sge.mr->
--                                         refcount);
--                              if (--qp->s_rdma_read_sge.num_sge)
--                                      qp->s_rdma_read_sge.sge =
--                                              *qp->s_rdma_read_sge.sg_list++;
--                      }
-+                      qib_put_ss(&qp->s_rdma_read_sge);
-               else {
-                       ret = qib_get_rwqe(qp, 1);
-                       if (ret < 0)
-@@ -510,11 +495,7 @@ rdma_last_imm:
-               wc.byte_len = qp->r_len;
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-               qib_copy_sge(&qp->r_sge, data, tlen, 1);
--              while (qp->r_sge.num_sge) {
--                      atomic_dec(&qp->r_sge.sge.mr->refcount);
--                      if (--qp->r_sge.num_sge)
--                              qp->r_sge.sge = *qp->r_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->r_sge);
-               goto last_imm;
-       case OP(RDMA_WRITE_LAST):
-@@ -530,11 +511,7 @@ rdma_last:
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
-                       goto drop;
-               qib_copy_sge(&qp->r_sge, data, tlen, 1);
--              while (qp->r_sge.num_sge) {
--                      atomic_dec(&qp->r_sge.sge.mr->refcount);
--                      if (--qp->r_sge.num_sge)
--                              qp->r_sge.sge = *qp->r_sge.sg_list++;
--              }
-+              qib_put_ss(&qp->r_sge);
-               break;
-       default:
-diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
-index a468bf2..d6c7fe7 100644
---- a/drivers/infiniband/hw/qib/qib_ud.c
-+++ b/drivers/infiniband/hw/qib/qib_ud.c
-@@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
-               }
-               length -= len;
-       }
--      while (qp->r_sge.num_sge) {
--              atomic_dec(&qp->r_sge.sge.mr->refcount);
--              if (--qp->r_sge.num_sge)
--                      qp->r_sge.sge = *qp->r_sge.sg_list++;
--      }
-+      qib_put_ss(&qp->r_sge);
-       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
-               goto bail_unlock;
-       wc.wr_id = qp->r_wr_id;
-@@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
-       } else
-               qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
-       qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
--      while (qp->r_sge.num_sge) {
--              atomic_dec(&qp->r_sge.sge.mr->refcount);
--              if (--qp->r_sge.num_sge)
--                      qp->r_sge.sge = *qp->r_sge.sg_list++;
--      }
-+      qib_put_ss(&qp->r_sge);
-       if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
-               return;
-       wc.wr_id = qp->r_wr_id;
-diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
-index 7b6c3bf..76d7ce8 100644
---- a/drivers/infiniband/hw/qib/qib_verbs.c
-+++ b/drivers/infiniband/hw/qib/qib_verbs.c
-@@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (release)
--                              atomic_dec(&sge->mr->refcount);
-+                              qib_put_mr(sge->mr);
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr->lkey) {
-@@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (release)
--                              atomic_dec(&sge->mr->refcount);
-+                              qib_put_mr(sge->mr);
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr->lkey) {
-@@ -435,7 +435,7 @@ bail_inval_free:
-       while (j) {
-               struct qib_sge *sge = &wqe->sg_list[--j];
--              atomic_dec(&sge->mr->refcount);
-+              qib_put_mr(sge->mr);
-       }
- bail_inval:
-       ret = -EINVAL;
-@@ -978,7 +978,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-       if (tx->mr) {
--              atomic_dec(&tx->mr->refcount);
-+              qib_put_mr(tx->mr);
-               tx->mr = NULL;
-       }
-       if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
-@@ -1336,7 +1336,7 @@ done:
-       }
-       qib_sendbuf_done(dd, pbufn);
-       if (qp->s_rdma_mr) {
--              atomic_dec(&qp->s_rdma_mr->refcount);
-+              qib_put_mr(qp->s_rdma_mr);
-               qp->s_rdma_mr = NULL;
-       }
-       if (qp->s_wqe) {
-diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
-index 4876060..4a2277b 100644
---- a/drivers/infiniband/hw/qib/qib_verbs.h
-+++ b/drivers/infiniband/hw/qib/qib_verbs.h
-@@ -41,6 +41,7 @@
- #include <linux/interrupt.h>
- #include <linux/kref.h>
- #include <linux/workqueue.h>
-+#include <linux/completion.h>
- #include <rdma/ib_pack.h>
- #include <rdma/ib_user_verbs.h>
-@@ -302,6 +303,8 @@ struct qib_mregion {
-       u32 max_segs;           /* number of qib_segs in all the arrays */
-       u32 mapsz;              /* size of the map array */
-       u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
-+      u8  lkey_published;     /* in global table */
-+      struct completion comp; /* complete when refcount goes to zero */
-       atomic_t refcount;
-       struct qib_segarray *map[0];    /* the segments */
- };
-@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
- void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
-               int has_grh, void *data, u32 tlen, struct qib_qp *qp);
--int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
-+int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
--int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
-+void qib_free_lkey(struct qib_mregion *mr);
- int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
-               struct qib_sge *isge, struct ib_sge *sge, int acc);
-@@ -1014,6 +1017,27 @@ int qib_unmap_fmr(struct list_head *fmr_list);
- int qib_dealloc_fmr(struct ib_fmr *ibfmr);
-+static inline void qib_get_mr(struct qib_mregion *mr)
-+{
-+      atomic_inc(&mr->refcount);
-+}
-+
-+static inline void qib_put_mr(struct qib_mregion *mr)
-+{
-+      if (unlikely(atomic_dec_and_test(&mr->refcount)))
-+              complete(&mr->comp);
-+}
-+
-+static inline void qib_put_ss(struct qib_sge_state *ss)
-+{
-+      while (ss->num_sge) {
-+              qib_put_mr(ss->sge.mr);
-+              if (--ss->num_sge)
-+                      ss->sge = *ss->sg_list++;
-+      }
-+}
-+
-+
- void qib_release_mmap_info(struct kref *ref);
- struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
index a7703dcb2d84f839a5a3e6e32f0b345d2528fab9..5dc29b9980f8954b64e5564781c8fb335a40393e 100644 (file)
@@ -1,12 +1,12 @@
-From aab849d519467be361c92ecaf2c1c46df1d6466e Mon Sep 17 00:00:00 2001
+From b109b6da8557ef527abaf9c013bba5c636c552e9 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Tue, 10 Jul 2012 15:57:24 +0300
-Subject: [PATCH 01/21] ib_core: backport dst_fetch_ha
+Subject: [PATCH 01/25] ib_core: backport dst_fetch_ha
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/addr.c |   58 ++++++++++++++++++++++++++++++++++++++++
- 1 files changed, 58 insertions(+), 0 deletions(-)
+ 1 file changed, 58 insertions(+)
 
 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
 index 6ef660c..984f61c 100644
@@ -121,5 +121,5 @@ index 6ef660c..984f61c 100644
        dst_release(dst);
        return ret;
 -- 
-1.7.0.4
+1.7.9.5
 
index 780dc8776b8a404b35b52af3fb225825ba82d70d..c33d282cbc5319497cb63314292a2525098ebc35 100644 (file)
@@ -1,12 +1,12 @@
-From 026cb35caae800ec4823fd05fa4d5fd4b41b5153 Mon Sep 17 00:00:00 2001
+From b42650b2873504c84250c5876e684f8fc12b0d6b Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Tue, 10 Jul 2012 17:21:25 +0300
-Subject: [PATCH 02/21] ib/core: Backport flowi4 and flowi6
+Subject: [PATCH 02/25] ib/core: Backport flowi4 and flowi6
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/addr.c |   45 ++++++++++++++++++++++++++++++++++++++++
- 1 files changed, 45 insertions(+), 0 deletions(-)
+ 1 file changed, 45 insertions(+)
 
 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
 index 984f61c..089b076 100644
@@ -99,5 +99,5 @@ index 984f61c..089b076 100644
        if (dst->dev->flags & IFF_LOOPBACK) {
                ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
 -- 
-1.7.0.4
+1.7.9.5
 
index 92647db37a2e5b9c76470390585cd6ce2451e72d..21e8e286853a45936af92815e42f04f081b0d7dc 100644 (file)
@@ -1,12 +1,12 @@
-From 5fd491b49a69f933b529aa5d8f7e0a37c06799bb Mon Sep 17 00:00:00 2001
+From 92ea29718f50d89695ed46cf409f789097b82d03 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Mon, 25 Jun 2012 22:28:24 +0300
-Subject: [PATCH 03/21] BACKPORT: core/netlink for kernels <= 3.4
+Subject: [PATCH 03/25] BACKPORT: core/netlink for kernels <= 3.4
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/netlink.c |    6 ++++++
- 1 files changed, 6 insertions(+), 0 deletions(-)
+ 1 file changed, 6 insertions(+)
 
 diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
 index e497dfb..2ac867a 100644
@@ -32,5 +32,5 @@ index e497dfb..2ac867a 100644
        }
  
 -- 
-1.7.0.4
+1.7.9.5
 
index 9fb48955801817e5b32f911f37e423694c082c6b..b7ad39450b5da1dd4978265c37b04f36db1ea722 100644 (file)
@@ -1,12 +1,12 @@
-From b4ba8b62569098b2d80a8bdf007a13d762dbd66e Mon Sep 17 00:00:00 2001
+From 0fd581905c953e5c0b3543d70fdd64dc6b7c681a Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Tue, 10 Jul 2012 17:41:31 +0300
-Subject: [PATCH 04/21] ib/core: Backport pinned_vm for kernels <= 3.2
+Subject: [PATCH 04/25] ib/core: Backport pinned_vm for kernels <= 3.2
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/umem.c |   16 ++++++++++++++++
- 1 files changed, 16 insertions(+), 0 deletions(-)
+ 1 file changed, 16 insertions(+)
 
 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
 index a841123..3b6cb84 100644
@@ -61,5 +61,5 @@ index a841123..3b6cb84 100644
        mmput(mm);
        kfree(umem);
 -- 
-1.7.0.4
+1.7.9.5
 
index 8d0e04525b4d1afd932436c5ab61c123a6c80413..245484d9b7f0c9603c87f0dfe631476d0be18fae 100644 (file)
@@ -1,14 +1,14 @@
-From 114683737901661ad394d2eee81d500f702c4c36 Mon Sep 17 00:00:00 2001
+From c8179b3bb60fe0560f4b5558aca5e523df749f67 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Tue, 10 Jul 2012 18:06:16 +0300
-Subject: [PATCH 05/21] ib/core: Backport CLASS_ATTR for kernels < 2.6.34
+Subject: [PATCH 05/25] ib/core: Backport CLASS_ATTR for kernels < 2.6.34
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/ucm.c         |   20 ++++++++++++++++++++
  drivers/infiniband/core/user_mad.c    |   12 ++++++++++++
  drivers/infiniband/core/uverbs_main.c |   12 ++++++++++++
- 3 files changed, 44 insertions(+), 0 deletions(-)
+ 3 files changed, 44 insertions(+)
 
 diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
 index 06f0871..d74c8b3 100644
@@ -134,5 +134,5 @@ index 604556d..4b0d8f3 100644
                printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
                goto out_class;
 -- 
-1.7.0.4
+1.7.9.5
 
index b44a534b7bd881807b431a574aeae0a2f227dda0..84575b39325e9d43e8de7f58ab5eb375afba2812 100644 (file)
@@ -1,12 +1,12 @@
-From 8d863bb705600583bf70868bb5ab0ee41bfb4542 Mon Sep 17 00:00:00 2001
+From c41d75cc97ecf58160b708f328cae6765d933532 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Thu, 12 Jul 2012 22:31:24 +0300
-Subject: [PATCH 06/21] Backport mlx4_ib
+Subject: [PATCH 06/25] Backport mlx4_ib
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/hw/mlx4/main.c |    8 ++++++++
- 1 files changed, 8 insertions(+), 0 deletions(-)
+ 1 file changed, 8 insertions(+)
 
 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
 index 3530c41..765bf3b 100644
@@ -37,5 +37,5 @@ index 3530c41..765bf3b 100644
                        dev_put(ndev);
                }
 -- 
-1.7.0.4
+1.7.9.5
 
index 37dcb1ba9ac05f3b0b0897104cec8aee79fc445c..2dd972abc6b711add0d4dd9f203cc5f269201b89 100644 (file)
@@ -1,12 +1,12 @@
-From 1473b7194e0bce6d1dd06b17bf006a7fae99c448 Mon Sep 17 00:00:00 2001
+From 74445f348a5d44dd172f3da3794ad08ecd8dda47 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Fri, 13 Jul 2012 00:24:56 +0300
-Subject: [PATCH 07/21] BACKPORT ucma: Revert sysctl registrations
+Subject: [PATCH 07/25] BACKPORT ucma: Revert sysctl registrations
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
  drivers/infiniband/core/ucma.c |   10 ++++++++--
- 1 files changed, 8 insertions(+), 2 deletions(-)
+ 1 file changed, 8 insertions(+), 2 deletions(-)
 
 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
 index 8002ae6..5861cdb 100644
@@ -44,5 +44,5 @@ index 8002ae6..5861cdb 100644
        misc_deregister(&ucma_misc);
        idr_destroy(&ctx_idr);
 -- 
-1.7.0.4
+1.7.9.5
 
index 1bdda5b89e36ccf643a69e8912d53c5c62d6d470..c890fa31d1bf83a31dda5bb182876c2ea8cb8801 100644 (file)
@@ -1,7 +1,7 @@
-From a0b4e02814afe69cd050cda0c51fd52fd4e96cb6 Mon Sep 17 00:00:00 2001
+From 62e36abf5a81320929ecdfd96ec15ae7762843f3 Mon Sep 17 00:00:00 2001
 From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
 Date: Mon, 16 Jul 2012 17:14:31 +0300
-Subject: [PATCH 08/21] RDMA/nes: Backports for RHEL 6.2 and 6.3
+Subject: [PATCH 08/25] RDMA/nes: Backports for RHEL 6.2 and 6.3
 
 Backports for linux-3.5 nes to RHEL-6.2 and RHEL-6.3
 
@@ -127,5 +127,5 @@ index f3a3ecf..d54776f 100644
        nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
                        " nic_index = %d, logical_port = %d, mac_index = %d.\n",
 -- 
-1.7.0.4
+1.7.9.5
 
index 9eb638c4d965339c5736657c40931a75c9c2de98..82449ab2e4c4f7426ad79b26f314637d14fee522 100644 (file)
@@ -1,7 +1,8 @@
-From 0ea5b57fe57f435ea4ff638c9054d90564842a0e Mon Sep 17 00:00:00 2001
+From 049c8ba147e556fc1f1ad66a1e070aced80386e9 Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <vipul@chelsio.com>
 Date: Mon, 7 May 2012 12:43:41 +0530
-Subject: [PATCH 09/21] iw_cxgb3/iw_cxgb4: Enable header file inclusion with absolute path
+Subject: [PATCH 09/25] iw_cxgb3/iw_cxgb4: Enable header file inclusion with
+ absolute path
 
 iw_cxgb3/iw_cxgb4 driver includes header files of cxgb3/cxgb4 drivers
 respectively. OFED build environment is not able to find the header files
@@ -34,5 +35,5 @@ index e11cf72..2f9fbf3 100644
  obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
  
 -- 
-1.7.0.4
+1.7.9.5
 
index f2a38fef8c2cf355641d52e641b70cd492e93c74..a587000fd7ce14c25901615e8d160ca01f8c0bc0 100644 (file)
@@ -1,12 +1,12 @@
-From 4a1c370f23ed0c15fa8a15b7cdd72fc4b8e9590b Mon Sep 17 00:00:00 2001
+From a7e9289daf473d3bcddfea383e9d8f8aef90cf58 Mon Sep 17 00:00:00 2001
 From: Mike Marciniszyn <mike.marciniszyn@intel.com>
 Date: Thu, 2 Aug 2012 18:11:27 +0300
-Subject: [PATCH 10/21] IB/qib: backport qib_fs.c before 2.6.35
+Subject: [PATCH 10/25] IB/qib: backport qib_fs.c before 2.6.35
 
 Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
 ---
  drivers/infiniband/hw/qib/qib_fs.c |   29 +++++++++++++++++++++++++++++
- 1 files changed, 29 insertions(+), 0 deletions(-)
+ 1 file changed, 29 insertions(+)
 
 diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
 index 05e0f17..4ea3926 100644
@@ -87,5 +87,5 @@ index 05e0f17..4ea3926 100644
  };
  
 -- 
-1.7.0.4
+1.7.9.5
 
index 3961945df578e662552fe265540eb0acafa631b3..500ff4b3562bdc672643bb6cfb3bd472c9f14cb2 100644 (file)
@@ -1,7 +1,7 @@
-From 434d4d19533137c6936b9733743a934fd4946b63 Mon Sep 17 00:00:00 2001
+From d505c64dcda186c1cf7248bf261567284284ffd9 Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <root@silicon.(none)>
 Date: Mon, 30 Jul 2012 19:08:37 +0530
-Subject: [PATCH 11/21] cxgb3: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
+Subject: [PATCH 11/25] cxgb3: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
 
 Signed-off-by: Vipul Pandya <vipul@chelsio.com>
 ---
@@ -11,7 +11,7 @@ Signed-off-by: Vipul Pandya <vipul@chelsio.com>
  drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c |   25 +++
  drivers/net/ethernet/chelsio/cxgb3/l2t.c           |    4 +
  drivers/net/ethernet/chelsio/cxgb3/sge.c           |   49 ++++++
- drivers/net/ethernet/chelsio/cxgb3/xgmac.c         |   17 ++-
+ drivers/net/ethernet/chelsio/cxgb3/xgmac.c         |   17 +-
  7 files changed, 280 insertions(+), 2 deletions(-)
 
 diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -632,5 +632,5 @@ index 3af19a5..b3c99fd 100644
                                        hash_lo |= (1 << hash);
                                else
 -- 
-1.7.0.4
+1.7.9.5
 
index e63b31d9859ffb9c8caa998bfa92c2c104230e6f..282b038e4475378f36b2422fe7d7f627b8728fd2 100644 (file)
@@ -1,12 +1,12 @@
-From 3987484a97b9ab36fc841a3109aebb875dba5c8f Mon Sep 17 00:00:00 2001
+From e95d29e72e1da4a34fe9a99c111755ff7ae46eb9 Mon Sep 17 00:00:00 2001
 From: Mike Marciniszyn <mike.marciniszyn@intel.com>
 Date: Thu, 2 Aug 2012 18:11:27 +0300
-Subject: [PATCH 12/21] IB/qib: backport < 3.2 for pinned_vm field
+Subject: [PATCH 12/25] IB/qib: backport < 3.2 for pinned_vm field
 
 Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
 ---
  drivers/infiniband/hw/qib/qib_user_pages.c |    8 ++++++++
- 1 files changed, 8 insertions(+), 0 deletions(-)
+ 1 file changed, 8 insertions(+)
 
 diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
 index 2bc1d2b..80d6ea5 100644
@@ -37,5 +37,5 @@ index 2bc1d2b..80d6ea5 100644
        }
  }
 -- 
-1.7.0.4
+1.7.9.5
 
index dea45c78d59685bc1a4af3ebc6ceb1e173967a03..2fdcebbec7dbcde0b68cbd32ff4fd24bf0e360c7 100644 (file)
@@ -1,13 +1,13 @@
-From 7498293c8d344b70376649277926d72b4250c674 Mon Sep 17 00:00:00 2001
+From 67fc061694dfa829043570007ccec9ff1d655ee3 Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <vipul@chelsio.com>
 Date: Mon, 30 Jul 2012 12:06:12 +0530
-Subject: [PATCH 13/21] cxgb4: Backports for RHEL6.2 RHEL 6.3 and SLES11 SP2
+Subject: [PATCH 13/25] cxgb4: Backports for RHEL6.2 RHEL 6.3 and SLES11 SP2
 
 Signed-off-by: Vipul Pandya <vipul@chelsio.com>
 ---
  drivers/net/ethernet/chelsio/cxgb4/cxgb4.h      |   13 ++
  drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |  190 ++++++++++++++++++++++-
- drivers/net/ethernet/chelsio/cxgb4/sge.c        |   72 +++++++++-
+ drivers/net/ethernet/chelsio/cxgb4/sge.c        |   72 ++++++++-
  3 files changed, 273 insertions(+), 2 deletions(-)
 
 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -594,5 +594,5 @@ index e111d97..b97bea7 100644
                                restore_rx_bufs(&si, &rxq->fl, frags);
                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
 -- 
-1.7.0.4
+1.7.9.5
 
index cef25eb6ea5a277a65c44c5ef00977d14022665d..d3d4a5c3b7e3a5ea34e6a7b777de8160e880d5e9 100644 (file)
@@ -1,12 +1,12 @@
-From 52bf86dbd619044e40448eeb2edfaafbbeb71acd Mon Sep 17 00:00:00 2001
+From 9c65408d4d69a2cbfd1e1c1ea34c35cf7414e89a Mon Sep 17 00:00:00 2001
 From: Mike Marciniszyn <mike.marciniszyn@intel.com>
 Date: Thu, 2 Aug 2012 18:11:27 +0300
-Subject: [PATCH 14/21] IB/ipath: backport qib_fs.c before 2.6.35
+Subject: [PATCH 14/25] IB/ipath: backport qib_fs.c before 2.6.35
 
 Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
 ---
  drivers/infiniband/hw/ipath/ipath_fs.c |   26 ++++++++++++++++++++++++++
- 1 files changed, 26 insertions(+), 0 deletions(-)
+ 1 file changed, 26 insertions(+)
 
 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
 index a4de9d5..5ec9c66 100644
@@ -81,5 +81,5 @@ index a4de9d5..5ec9c66 100644
  };
  
 -- 
-1.7.0.4
+1.7.9.5
 
index d4a01fd353c9bf28d1e62587663d4693a9e3a083..2a499b6c8b7e3bec78fef842f0cf5ebae0279c09 100644 (file)
@@ -1,12 +1,12 @@
-From cbf29472b92bdc58e2c2fdb6d971ad843fe4bad5 Mon Sep 17 00:00:00 2001
+From 1cec339ba2a68302d41192df5103f5d95a33364b Mon Sep 17 00:00:00 2001
 From: Mike Marciniszyn <mike.marciniszyn@intel.com>
 Date: Thu, 2 Aug 2012 18:11:27 +0300
-Subject: [PATCH 15/21] IB/ipath: backport < 3.2 for pinned_vm field
+Subject: [PATCH 15/25] IB/ipath: backport < 3.2 for pinned_vm field
 
 Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
 ---
  drivers/infiniband/hw/ipath/ipath_user_pages.c |   12 ++++++++++++
- 1 files changed, 12 insertions(+), 0 deletions(-)
+ 1 file changed, 12 insertions(+)
 
 diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
 index dc66c45..f2858e6 100644
@@ -49,5 +49,5 @@ index dc66c45..f2858e6 100644
        mmput(work->mm);
        kfree(work);
 -- 
-1.7.0.4
+1.7.9.5
 
index b54f5bbeaac4b8e56a841de61c7ed07847b120d0..22d1e648c566abe6b4333df5319ef047240c085b 100644 (file)
@@ -1,12 +1,13 @@
-From 32509a879b4662c90805662b055521a8b2541cd3 Mon Sep 17 00:00:00 2001
+From b596dfe6f1296cb96361f7f95e49ddc641095be9 Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <vipul@chelsio.com>
 Date: Mon, 30 Jul 2012 12:38:36 +0530
-Subject: [PATCH 16/21] iw_cxgb3: Backports for RHEL6.2 RHEL 6.3 and SLES11 SP2
+Subject: [PATCH 16/25] iw_cxgb3: Backports for RHEL6.2 RHEL 6.3 and SLES11
+ SP2
 
 Signed-off-by: Vipul Pandya <vipul@chelsio.com>
 ---
  drivers/infiniband/hw/cxgb3/iwch_cm.c |   29 +++++++++++++++++++++++++++++
- 1 files changed, 29 insertions(+), 0 deletions(-)
+ 1 file changed, 29 insertions(+)
 
 diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
 index 740dcc0..c0b492f 100644
@@ -71,5 +72,5 @@ index 740dcc0..c0b492f 100644
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
 -- 
-1.7.0.4
+1.7.9.5
 
index 5b84ff0886236e19d177fcea6c1d72076407f2d7..f06d32a9ab99ae77e9dde7eec181c465dc6ffbdd 100644 (file)
@@ -1,17 +1,17 @@
-From a31706eefdd5fa6cbcfd5e9a85ccdef74dba374e Mon Sep 17 00:00:00 2001
+From e5eec669a7c0b85a2a4eae05684b61ade26b077a Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <root@silicon.(none)>
 Date: Mon, 30 Jul 2012 16:59:42 +0530
-Subject: [PATCH 17/21] iw_cxgb4: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
+Subject: [PATCH 17/25] iw_cxgb4: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
 
 Signed-off-by: Vipul Pandya <root@silicon.(none)>
 ---
  drivers/infiniband/hw/cxgb4/cm.c       |   45 ++++++++++++++++++++++++++++++++
- drivers/infiniband/hw/cxgb4/cq.c       |   12 ++++++++
+ drivers/infiniband/hw/cxgb4/cq.c       |   12 +++++++++
  drivers/infiniband/hw/cxgb4/iw_cxgb4.h |    4 +++
- drivers/infiniband/hw/cxgb4/mem.c      |    8 +++++
+ drivers/infiniband/hw/cxgb4/mem.c      |    8 ++++++
  drivers/infiniband/hw/cxgb4/qp.c       |   17 +++++++++++-
- drivers/infiniband/hw/cxgb4/t4.h       |   12 ++++++++
- 6 files changed, 97 insertions(+), 1 deletions(-)
+ drivers/infiniband/hw/cxgb4/t4.h       |   12 +++++++++
+ 6 files changed, 97 insertions(+), 1 deletion(-)
 
 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
 index b18870c..c1805bd 100644
@@ -289,5 +289,5 @@ index 16f26ab..cafdb7c 100644
        void __iomem *gts;
        struct c4iw_rdev *rdev;
 -- 
-1.7.0.4
+1.7.9.5
 
index b5354abe189ec33cff11e7eaf9a7dfd39f3c2bc0..0193ffdba6d629031dce09e2163a2520f001c3b3 100644 (file)
@@ -1,7 +1,7 @@
-From a5eccddfb48b63a8cddec33b39544136aa3378ee Mon Sep 17 00:00:00 2001
+From 23506f2107b334607305e2d34b760f2af6515186 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Thu, 2 Aug 2012 16:12:39 +0300
-Subject: [PATCH 18/21] IPoIB: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
+Subject: [PATCH 18/25] IPoIB: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
@@ -9,7 +9,7 @@ Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
  drivers/infiniband/ulp/ipoib/ipoib_cm.c        |   21 +++++++++++++++++++++
  drivers/infiniband/ulp/ipoib/ipoib_main.c      |   18 ++++++++++++++++++
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   19 +++++++++++++++++++
- 4 files changed, 61 insertions(+), 0 deletions(-)
+ 4 files changed, 61 insertions(+)
 
 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
 index 86df632..a876f24 100644
@@ -202,5 +202,5 @@ index 20ebc6f..4a4c64a 100644
                mcast = __ipoib_mcast_find(dev, &mgid);
                if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
 -- 
-1.7.0.4
+1.7.9.5
 
index 1e64d20286d8ead4f428f174c2b1daa7cf024876..3763c8a1fb7379c97d96d881299fb951b1fc20c2 100644 (file)
@@ -1,7 +1,7 @@
-From 4c1b99148b5eeb1cdb2e251919a083b03aca35a4 Mon Sep 17 00:00:00 2001
+From 1625ace4f5f1fa23ac03766ad4a201684fc998a6 Mon Sep 17 00:00:00 2001
 From: Vladimir Sokolovsky <vlad@mellanox.com>
 Date: Sun, 5 Aug 2012 22:00:42 +0300
-Subject: [PATCH 19/21] mlx4_en: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
+Subject: [PATCH 19/25] mlx4_en: Backports for RHEL6.2 RHEL6.3 and SLES11 SP2
 
 Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
 ---
@@ -9,7 +9,7 @@ Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
  drivers/net/ethernet/mellanox/mlx4/en_netdev.c  |   29 ++++++++++++
  drivers/net/ethernet/mellanox/mlx4/en_rx.c      |   56 +++++++++++++++++++++++
  drivers/net/ethernet/mellanox/mlx4/en_tx.c      |    4 ++
- 4 files changed, 93 insertions(+), 0 deletions(-)
+ 4 files changed, 93 insertions(+)
 
 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
 index 72901ce..332e851 100644
@@ -325,5 +325,5 @@ index 019d856..5a2bddd 100644
  
        if (vlan_tx_tag_present(skb))
 -- 
-1.7.0.4
+1.7.9.5
 
index ea23b5abb0937696ea5fb917f5a74503a831989b..1e542da8296e0aabeb609306f89332e5ea1c0f93 100644 (file)
@@ -1,14 +1,14 @@
-From 281fbad725657ca5c8593a849010529fd78f3988 Mon Sep 17 00:00:00 2001
+From b824c830e756c1466a7a84109b9954d2772586b9 Mon Sep 17 00:00:00 2001
 From: Jeff Becker <jeffrey.c.becker@nasa.gov>
 Date: Fri, 24 Aug 2012 11:18:23 -0700
-Subject: [PATCH 20/21] NFSRDMA: RHEL6.3 and SLES11 SP2 backport
+Subject: [PATCH 20/25] NFSRDMA: RHEL6.3 and SLES11 SP2 backport
 
 Signed-off-by: Jeff Becker <Jeffrey.C.Becker@nasa.gov>
 ---
  net/sunrpc/xprtrdma/rpc_rdma.c           |   16 ++++++++++++++++
  net/sunrpc/xprtrdma/svc_rdma_transport.c |    4 ++++
  net/sunrpc/xprtrdma/transport.c          |   12 ++++++++++++
- 3 files changed, 32 insertions(+), 0 deletions(-)
+ 3 files changed, 32 insertions(+)
 
 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
 index 558fbab..0a6c1db 100644
@@ -109,5 +109,5 @@ index b446e10..9419f34 100644
  
  /*
 -- 
-1.7.0.4
+1.7.9.5
 
index ca0bbbcbd548e2fb1a43b4e38d89c0d37c273efd..f47d3ad07e1f9dace27e84b14f11d493fa0204a0 100644 (file)
@@ -1,15 +1,15 @@
-From abd0f3494cc9ac2653dc3b451429cddd7c25def9 Mon Sep 17 00:00:00 2001
+From 40378c271c9fbbb445c7c1233444c795004d6232 Mon Sep 17 00:00:00 2001
 From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
 Date: Sat, 25 Aug 2012 20:39:12 -0500
-Subject: [PATCH 21/21] RDMA/nes: Updated backports
+Subject: [PATCH 21/25] RDMA/nes: Updated backports
 
 OFED-3.5/NES: Updated backports to be applied on top of the previously submitted backport patch.
 
 Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
 ---
  drivers/infiniband/hw/nes/nes_hw.c  |   11 ++++++++
- drivers/infiniband/hw/nes/nes_hw.h  |    3 ++
- drivers/infiniband/hw/nes/nes_nic.c |   49 ++++++++++++++++++++++++++++------
+ drivers/infiniband/hw/nes/nes_hw.h  |    3 +++
+ drivers/infiniband/hw/nes/nes_nic.c |   49 ++++++++++++++++++++++++++++-------
  3 files changed, 54 insertions(+), 9 deletions(-)
 
 diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
@@ -157,5 +157,5 @@ index d54776f..696b80e 100644
  }
  
 -- 
-1.7.0.4
+1.7.9.5
 
index 3be89ba2c57f6e305babefb50be362dae4c44d11..0995e247c4824567cfd8a2186dbe726b664b910a 100644 (file)
@@ -1,7 +1,7 @@
-From 6086904c02bba993fc4c8da95e2c6ca115469fac Mon Sep 17 00:00:00 2001
+From 9a2a5815ae4484d558d115380256a092ee08554e Mon Sep 17 00:00:00 2001
 From: Vipul Pandya <vipul@chelsio.com>
 Date: Fri, 21 Sep 2012 04:53:36 -0700
-Subject: [PATCH] iw_cxgb4: Fix bug 2369 in OFED bugzilla
+Subject: [PATCH 22/25] iw_cxgb4: Fix bug 2369 in OFED bugzilla
 
 Bug 2369 - [OFED-3.5] Trace observed in dmesg while running IMB-MPI1 using
 openmpi-1.6
@@ -15,7 +15,7 @@ which was resulting in the trace.
 Signed-off-by: Vipul Pandya <vipul@chelsio.com>
 ---
  drivers/infiniband/hw/cxgb4/cm.c |    2 ++
- 1 files changed, 2 insertions(+), 0 deletions(-)
+ 1 file changed, 2 insertions(+)
 
 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
 index c1805bd..d6fea23 100644
@@ -32,5 +32,5 @@ index c1805bd..d6fea23 100644
        return err;
  }
 -- 
-1.7.0.4
+1.7.9.5
 
diff --git a/patches/0023-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch b/patches/0023-RDMA-nes-Fix-for-loopback-MAC-address-Backport-chang.patch
new file mode 100644 (file)
index 0000000..31a4aa9
--- /dev/null
@@ -0,0 +1,34 @@
+From 5ebf4547e291d3f94ae4e13956a7c3635896520a Mon Sep 17 00:00:00 2001
+From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+Date: Fri, 7 Sep 2012 19:41:29 -0500
+Subject: [PATCH 23/25] RDMA/nes: Fix for loopback MAC address - Backport
+ change
+
+RDMA/nes: Fix for resolving correctly the MAC address for loopback connection
+(the patch should be applied on top of previous backports)
+
+Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+---
+ drivers/infiniband/hw/nes/nes_cm.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 0b5b1a9..d9fa245 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -1363,11 +1363,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
+       else
+               netdev = nesvnic->netdev;
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+-#else
+-      neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+-#endif
++
+       rcu_read_lock();
+       if (neigh) {
+               if (neigh->nud_state & NUD_VALID) {
+-- 
+1.7.9.5
+
diff --git a/patches/0024-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch b/patches/0024-RDMA-nes-TSO-is-enabled-again-for-linux-3.5-Backport.patch
new file mode 100644 (file)
index 0000000..3f6ef4e
--- /dev/null
@@ -0,0 +1,34 @@
+From 7bbcd56628309225e630e511e612cd3b82762294 Mon Sep 17 00:00:00 2001
+From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+Date: Fri, 7 Sep 2012 19:45:21 -0500
+Subject: [PATCH 24/25] RDMA/nes: TSO is enabled again for linux-3.5 -
+ Backport change
+
+RDMA/nes: TSO is enabled again for linux-3.5
+(the patch should be applied on top of previous backports)
+
+Signed-off-by: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+---
+ drivers/infiniband/hw/nes/nes_nic.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 696b80e..7674358 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -1760,12 +1760,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
+       netdev->features |= NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX;
+       if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
+-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,3,0))
+               netdev->features |= NETIF_F_TSO;
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               netdev->hw_features |= NETIF_F_TSO;
+ #endif
+-#endif
+       }
+       nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
+-- 
+1.7.9.5
+
diff --git a/patches/0025-ib_srp-Backport-to-older-kernels.patch b/patches/0025-ib_srp-Backport-to-older-kernels.patch
new file mode 100644 (file)
index 0000000..eb1945a
--- /dev/null
@@ -0,0 +1,186 @@
+From a9832b255f02b99997a13e2d25698f1228292cf4 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 12 Sep 2012 14:31:04 +0200
+Subject: [PATCH 25/25] ib_srp: Backport to older kernels
+
+This patch has been tested on RHEL 6.0, RHEL 6.1, RHEL 6.2, RHEL 6.3
+and Ubuntu 10.04.
+
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |  111 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 108 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index bcbf22e..fab74e0 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -32,6 +32,10 @@
+ #define pr_fmt(fmt) PFX fmt
++#define DRV_NAME      "ib_srp"
++#define PFX           DRV_NAME ": "
++
++#include <linux/version.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -41,7 +45,11 @@
+ #include <linux/random.h>
+ #include <linux/jiffies.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
+ #include <linux/atomic.h>
++#else
++#include <asm/atomic.h>
++#endif
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_device.h>
+@@ -51,11 +59,54 @@
+ #include "ib_srp.h"
+-#define DRV_NAME      "ib_srp"
+-#define PFX           DRV_NAME ": "
+ #define DRV_VERSION   "0.2"
+ #define DRV_RELDATE   "November 1, 2005"
++#ifndef pr_warn
++#define pr_warn pr_warning
++#endif
++
++#if !defined(RHEL_MAJOR) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) || \
++      RHEL_MAJOR -0 < 6 || RHEL_MAJOR -0 == 6 && RHEL_MINOR -0 == 0
++struct srp_cred_req {
++      u8      opcode;
++      u8      sol_not;
++      u8      reserved[2];
++      __be32  req_lim_delta;
++      u64     tag;
++};
++
++struct srp_cred_rsp {
++      u8      opcode;
++      u8      reserved[7];
++      u64     tag;
++};
++
++/*
++ * The SRP spec defines the fixed portion of the AER_REQ structure to be
++ * 36 bytes, so it needs to be packed to avoid having it padded to 40 bytes
++ * on 64-bit architectures.
++ */
++struct srp_aer_req {
++      u8      opcode;
++      u8      sol_not;
++      u8      reserved[2];
++      __be32  req_lim_delta;
++      u64     tag;
++      u32     reserved2;
++      __be64  lun;
++      __be32  sense_data_len;
++      u32     reserved3;
++      u8      sense_data[0];
++} __attribute__((packed));
++
++struct srp_aer_rsp {
++      u8      opcode;
++      u8      reserved[7];
++      u64     tag;
++};
++#endif
++
+ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
+                  "v" DRV_VERSION " (" DRV_RELDATE ")");
+@@ -675,7 +726,11 @@ err:
+       if (target->state == SRP_TARGET_CONNECTING) {
+               target->state = SRP_TARGET_DEAD;
+               INIT_WORK(&target->work, srp_remove_work);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+               queue_work(ib_wq, &target->work);
++#else
++              schedule_work(&target->work);
++#endif
+       }
+       spin_unlock_irq(&target->lock);
+@@ -1254,7 +1309,50 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
+       }
+ }
+-static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
++/*
++ * Kernel with host lock push-down patch. See also upstream commit
++ * f281233d3eba15fb225d21ae2e228fd4553d824a.
++ */
++#define SRP_QUEUECOMMAND srp_queuecommand
++#elif defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 6 && RHEL_MINOR -0 >= 2
++/*
++ * Kernel with lockless SCSI command dispatching enabled.
++ * See also the RHEL 6.2 release notes (http://access.redhat.com/knowledge/docs/en-US/Red_Hat_Enterprise_Linux/6/html-single/6.2_Release_Notes/index.html).
++ */
++static int srp_queuecommand_wrk(struct Scsi_Host *shost,
++                              struct scsi_cmnd *scmnd);
++static int srp_queuecommand(struct scsi_cmnd *scmnd,
++                          void (*done)(struct scsi_cmnd *))
++{
++      scmnd->scsi_done = done;
++      return srp_queuecommand_wrk(scmnd->device->host, scmnd);
++}
++#define SRP_QUEUECOMMAND srp_queuecommand_wrk
++#else
++/*
++ * Kernel that invokes srp_queuecommand with the SCSI host lock held.
++ */
++static int srp_queuecommand_wrk(struct Scsi_Host *shost,
++                              struct scsi_cmnd *scmnd);
++static int srp_queuecommand(struct scsi_cmnd *scmnd,
++                          void (*done)(struct scsi_cmnd *))
++{
++      struct Scsi_Host *shost = scmnd->device->host;
++      int res;
++
++      spin_unlock_irq(shost->host_lock);
++
++      scmnd->scsi_done = done;
++      res = srp_queuecommand_wrk(shost, scmnd);
++
++      spin_lock_irq(shost->host_lock);
++      return res;
++}
++#define SRP_QUEUECOMMAND srp_queuecommand_wrk
++#endif
++
++static int SRP_QUEUECOMMAND(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+ {
+       struct srp_target_port *target = host_to_target(shost);
+       struct srp_request *req;
+@@ -1822,6 +1920,9 @@ static struct scsi_host_template srp_template = {
+       .name                           = "InfiniBand SRP initiator",
+       .proc_name                      = DRV_NAME,
+       .info                           = srp_target_info,
++#if defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 6 && RHEL_MINOR -0 >= 2
++      .lockless                       = true,
++#endif
+       .queuecommand                   = srp_queuecommand,
+       .eh_abort_handler               = srp_abort,
+       .eh_device_reset_handler        = srp_reset_device,
+@@ -2412,7 +2513,11 @@ static void srp_remove_one(struct ib_device *device)
+                * started before we marked our target ports as
+                * removed, and any target port removal tasks.
+                */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+               flush_workqueue(ib_wq);
++#else
++              flush_scheduled_work();
++#endif
+               list_for_each_entry_safe(target, tmp_target,
+                                        &host->target_list, list) {
+-- 
+1.7.9.5
+