]> git.openfabrics.org - ~emulex/compat-rdma.git/commitdiff
linux-next-pending: Pull pending iw_cxgb4 fixes
authorSteve Wise <swise@opengridcomputing.com>
Thu, 3 Apr 2014 17:47:23 +0000 (12:47 -0500)
committerSteve Wise <swise@opengridcomputing.com>
Thu, 3 Apr 2014 17:47:23 +0000 (12:47 -0500)
Pull in these commits from:

git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git

branch for-next:

1410e14 RDMA/cxgb4: Disable DSGL use.
13f17b7 RDMA/cxgb4: rx_data() needs to hold the ep mutex
925902f RDMA/cxgb4: Drop RX_DATA packets if the endpoint is gone
9306dcb RDMA/cxgb4: Lock around accept/reject downcalls
9c88aa0 RDMA/cxgb4: Update snd_seq when sending MPA messages
be13b2d RDMA/cxgb4: Connect_request_upcall fixes
70b9c66 RDMA/cxgb4: Ignore read reponse type 1 CQEs
1ce1d47 RDMA/cxgb4: Fix possible memory leak in RX_PKT processing
dbb084c RDMA/cxgb4: Don't leak skb in c4iw_uld_rx_handler()
eda6d1d RDMA/cxgb4: Save the correct map length for fast_reg_page_lists
df2d513 RDMA/cxgb4: Default peer2peer mode to 1
ba32de9 RDMA/cxgb4: Mind the sq_sig_all/sq_sig_type QP attributes
8a9c399 RDMA/cxgb4: Fix incorrect BUG_ON conditions
ebf0006 RDMA/cxgb4: Always release neigh entry
f8e8190 RDMA/cxgb4: Allow loopback connections
ffd4359 RDMA/cxgb4: Cap CQ size at T4_MAX_IQ_SIZE
e24a72a RDMA/cxgb4: Fix four byte info leak in c4iw_create_cq()
ff1706f RDMA/cxgb4: Fix underflows in c4iw_create_qp()

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
18 files changed:
linux-next-pending/0030-RDMA-cxgb4--Fix-underflows-in-c4iw_create_qp().patch [new file with mode: 0644]
linux-next-pending/0031-RDMA-cxgb4--Fix-four-byte-info-leak-in-c4iw_create_cq().patch [new file with mode: 0644]
linux-next-pending/0032-RDMA-cxgb4--Cap-CQ-size-at-T4_MAX_IQ_SIZE.patch [new file with mode: 0644]
linux-next-pending/0033-RDMA-cxgb4--Allow-loopback-connections.patch [new file with mode: 0644]
linux-next-pending/0034-RDMA-cxgb4--Always-release-neigh-entry.patch [new file with mode: 0644]
linux-next-pending/0035-RDMA-cxgb4--Fix-incorrect-BUG_ON-conditions.patch [new file with mode: 0644]
linux-next-pending/0036-RDMA-cxgb4--Mind-the-sq_sig_all-sq_sig_type-QP-attributes.patch [new file with mode: 0644]
linux-next-pending/0037-RDMA-cxgb4--Default-peer2peer-mode-to-1.patch [new file with mode: 0644]
linux-next-pending/0038-RDMA-cxgb4--Save-the-correct-map-length-for-fast_reg_page_lists.patch [new file with mode: 0644]
linux-next-pending/0039-RDMA-cxgb4--Don't-leak-skb-in-c4iw_uld_rx_handler().patch [new file with mode: 0644]
linux-next-pending/0040-RDMA-cxgb4--Fix-possible-memory-leak-in-RX_PKT-processing.patch [new file with mode: 0644]
linux-next-pending/0041-RDMA-cxgb4--Ignore-read-reponse-type-1-CQEs.patch [new file with mode: 0644]
linux-next-pending/0042-RDMA-cxgb4--Connect_request_upcall-fixes.patch [new file with mode: 0644]
linux-next-pending/0043-RDMA-cxgb4--Update-snd_seq-when-sending-MPA-messages.patch [new file with mode: 0644]
linux-next-pending/0044-RDMA-cxgb4--Lock-around-accept-reject-downcalls.patch [new file with mode: 0644]
linux-next-pending/0045-RDMA-cxgb4--Drop-RX_DATA-packets-if-the-endpoint-is-gone.patch [new file with mode: 0644]
linux-next-pending/0046-RDMA-cxgb4--rx_data()-needs-to-hold-the-ep-mutex.patch [new file with mode: 0644]
linux-next-pending/0047-RDMA-cxgb4--Disable-DSGL-use..patch [new file with mode: 0644]

diff --git a/linux-next-pending/0030-RDMA-cxgb4--Fix-underflows-in-c4iw_create_qp().patch b/linux-next-pending/0030-RDMA-cxgb4--Fix-underflows-in-c4iw_create_qp().patch
new file mode 100644 (file)
index 0000000..9a13110
--- /dev/null
@@ -0,0 +1,26 @@
+commit ff1706f4feb8e0e1a2e56a8dd57e17a4b45649b5
+Author: Dan Carpenter <dan.carpenter@oracle.com>
+Date:   Sat Oct 19 12:14:12 2013 +0300
+
+    RDMA/cxgb4: Fix underflows in c4iw_create_qp()
+    
+    These sizes should be unsigned so we don't allow negative values and
+    have underflow bugs.  These can come from the user so there may be
+    security implications, but I have not tested this.
+    
+    Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index 5829367..72ea152 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1533,7 +1533,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+       struct c4iw_cq *schp;
+       struct c4iw_cq *rchp;
+       struct c4iw_create_qp_resp uresp;
+-      int sqsize, rqsize;
++      unsigned int sqsize, rqsize;
+       struct c4iw_ucontext *ucontext;
+       int ret;
+       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
diff --git a/linux-next-pending/0031-RDMA-cxgb4--Fix-four-byte-info-leak-in-c4iw_create_cq().patch b/linux-next-pending/0031-RDMA-cxgb4--Fix-four-byte-info-leak-in-c4iw_create_cq().patch
new file mode 100644 (file)
index 0000000..0aced04
--- /dev/null
@@ -0,0 +1,24 @@
+commit e24a72a3302a638d4c6e77f0b40c45cc61c3f089
+Author: Dan Carpenter <dan.carpenter@oracle.com>
+Date:   Sat Oct 19 12:14:35 2013 +0300
+
+    RDMA/cxgb4: Fix four byte info leak in c4iw_create_cq()
+    
+    There is a four byte hole at the end of the "uresp" struct after the
+    ->qid_mask member.
+    
+    Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 88de3aa..e436ead 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -930,6 +930,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+               if (!mm2)
+                       goto err4;
++              memset(&uresp, 0, sizeof(uresp));
+               uresp.qid_mask = rhp->rdev.cqmask;
+               uresp.cqid = chp->cq.cqid;
+               uresp.size = chp->cq.size;
diff --git a/linux-next-pending/0032-RDMA-cxgb4--Cap-CQ-size-at-T4_MAX_IQ_SIZE.patch b/linux-next-pending/0032-RDMA-cxgb4--Cap-CQ-size-at-T4_MAX_IQ_SIZE.patch
new file mode 100644 (file)
index 0000000..67fac89
--- /dev/null
@@ -0,0 +1,22 @@
+commit ffd435924c86de055d33fe59941841819eef9f6a
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:38 2014 +0530
+
+    RDMA/cxgb4: Cap CQ size at T4_MAX_IQ_SIZE
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index e436ead..906119f 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -881,7 +881,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+       /*
+        * Make actual HW queue 2x to avoid cdix_inc overflows.
+        */
+-      hwentries = entries * 2;
++      hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+       /*
+        * Make HW queue at least 64 entries so GTS updates aren't too
diff --git a/linux-next-pending/0033-RDMA-cxgb4--Allow-loopback-connections.patch b/linux-next-pending/0033-RDMA-cxgb4--Allow-loopback-connections.patch
new file mode 100644 (file)
index 0000000..b6f9588
--- /dev/null
@@ -0,0 +1,25 @@
+commit f8e819081f797df355cffbdedb9301ea50ae76b2
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:39 2014 +0530
+
+    RDMA/cxgb4: Allow loopback connections
+    
+    find_route() must treat loopback as a valid egress interface.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index d286bde..360807e 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -400,7 +400,8 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
+       n = dst_neigh_lookup(&rt->dst, &peer_ip);
+       if (!n)
+               return NULL;
+-      if (!our_interface(dev, n->dev)) {
++      if (!our_interface(dev, n->dev) &&
++          !(n->dev->flags & IFF_LOOPBACK)) {
+               dst_release(&rt->dst);
+               return NULL;
+       }
diff --git a/linux-next-pending/0034-RDMA-cxgb4--Always-release-neigh-entry.patch b/linux-next-pending/0034-RDMA-cxgb4--Always-release-neigh-entry.patch
new file mode 100644 (file)
index 0000000..625021b
--- /dev/null
@@ -0,0 +1,38 @@
+commit ebf00060c33b9d0946384fa6f440df7ea35a569e
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:40 2014 +0530
+
+    RDMA/cxgb4: Always release neigh entry
+    
+    Always release the neigh entry in rx_pkt().
+    
+    Based on original work by Santosh Rastapur <santosh@chelsio.com>.
+
+    NOTE: patch modified to apply to OFED backports.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 360807e..2b2af96 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3426,15 +3426,15 @@ static int rx_pkt(struct c4iw_dev *dev,
+               pi = (struct port_info *)netdev_priv(pdev);
+               tx_chan = cxgb4_port_chan(pdev);
+       }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
++      neigh_release(neigh);
++#endif
+       if (!e) {
+               pr_err("%s - failed to allocate l2t entry!\n",
+                      __func__);
+               goto free_dst;
+       }
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+-      neigh_release(neigh);
+-#endif
+       step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+       rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
+       window = (__force u16) htons((__force u16)tcph->window);
diff --git a/linux-next-pending/0035-RDMA-cxgb4--Fix-incorrect-BUG_ON-conditions.patch b/linux-next-pending/0035-RDMA-cxgb4--Fix-incorrect-BUG_ON-conditions.patch
new file mode 100644 (file)
index 0000000..fc87887
--- /dev/null
@@ -0,0 +1,33 @@
+commit 8a9c399eeee8c2d99e22b975f6023001a1fde88f
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:42 2014 +0530
+
+    RDMA/cxgb4: Fix incorrect BUG_ON conditions
+    
+    Based on original work from Jay Hernandez <jay@chelsio.com>
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 906119f..d6a7db2 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -603,7 +603,7 @@ proc_cqe:
+        */
+       if (SQ_TYPE(hw_cqe)) {
+               int idx = CQE_WRID_SQ_IDX(hw_cqe);
+-              BUG_ON(idx > wq->sq.size);
++              BUG_ON(idx >= wq->sq.size);
+               /*
+               * Account for any unsignaled completions completed by
+@@ -617,7 +617,7 @@ proc_cqe:
+                       wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
+               else
+                       wq->sq.in_use -= idx - wq->sq.cidx;
+-              BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size);
++              BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
+               wq->sq.cidx = (uint16_t)idx;
+               PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
diff --git a/linux-next-pending/0036-RDMA-cxgb4--Mind-the-sq_sig_all-sq_sig_type-QP-attributes.patch b/linux-next-pending/0036-RDMA-cxgb4--Mind-the-sq_sig_all-sq_sig_type-QP-attributes.patch
new file mode 100644 (file)
index 0000000..e86a3ea
--- /dev/null
@@ -0,0 +1,52 @@
+commit ba32de9d8d8173a1d6dd1ed608c519d5d0a623bb
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:43 2014 +0530
+
+    RDMA/cxgb4: Mind the sq_sig_all/sq_sig_type QP attributes
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index 23eaeab..b810d2a 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -441,6 +441,7 @@ struct c4iw_qp {
+       atomic_t refcnt;
+       wait_queue_head_t wait;
+       struct timer_list timer;
++      int sq_sig_all;
+ };
+ static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index 72ea152..723ad29 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -675,7 +675,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+               fw_flags = 0;
+               if (wr->send_flags & IB_SEND_SOLICITED)
+                       fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
+-              if (wr->send_flags & IB_SEND_SIGNALED)
++              if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
+                       fw_flags |= FW_RI_COMPLETION_FLAG;
+               swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+               switch (wr->opcode) {
+@@ -736,7 +736,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+               }
+               swsqe->idx = qhp->wq.sq.pidx;
+               swsqe->complete = 0;
+-              swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
++              swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
++                                qhp->sq_sig_all;
+               swsqe->flushed = 0;
+               swsqe->wr_id = wr->wr_id;
+@@ -1605,6 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+       qhp->attr.enable_bind = 1;
+       qhp->attr.max_ord = 1;
+       qhp->attr.max_ird = 1;
++      qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
+       spin_lock_init(&qhp->lock);
+       mutex_init(&qhp->mutex);
+       init_waitqueue_head(&qhp->wait);
diff --git a/linux-next-pending/0037-RDMA-cxgb4--Default-peer2peer-mode-to-1.patch b/linux-next-pending/0037-RDMA-cxgb4--Default-peer2peer-mode-to-1.patch
new file mode 100644 (file)
index 0000000..f7cb05a
--- /dev/null
@@ -0,0 +1,25 @@
+commit df2d5130ece9118591c2f3fbf0ee4a79183b4ccc
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:44 2014 +0530
+
+    RDMA/cxgb4: Default peer2peer mode to 1
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 2b2af96..e1fc5c5 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -98,9 +98,9 @@ int c4iw_debug;
+ module_param(c4iw_debug, int, 0644);
+ MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+-static int peer2peer;
++static int peer2peer = 1;
+ module_param(peer2peer, int, 0644);
+-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
++MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
+ static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+ module_param(p2p_type, int, 0644);
diff --git a/linux-next-pending/0038-RDMA-cxgb4--Save-the-correct-map-length-for-fast_reg_page_lists.patch b/linux-next-pending/0038-RDMA-cxgb4--Save-the-correct-map-length-for-fast_reg_page_lists.patch
new file mode 100644 (file)
index 0000000..c5bd852
--- /dev/null
@@ -0,0 +1,59 @@
+commit eda6d1d1b7932f90d55583f8f3835dd7d6b32543
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Wed Mar 19 17:44:45 2014 +0530
+
+    RDMA/cxgb4: Save the correct map length for fast_reg_page_lists
+    
+    We cannot save the mapped length using the rdma max_page_list_len field
+    of the ib_fast_reg_page_list struct because the core code uses it.  This
+    results in an incorrect unmap of the page list in c4iw_free_fastreg_pbl().
+    
+    I found this with dma mapping debugging enabled in the kernel.  The
+    fix is to save the length in the c4iw_fr_page_list struct.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index b810d2a..a1e8f13 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -369,6 +369,7 @@ struct c4iw_fr_page_list {
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+       dma_addr_t dma_addr;
+       struct c4iw_dev *dev;
++      int pll_len;
+ };
+ static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 41b1195..22a2e3e 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -903,7 +903,11 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
+       dma_unmap_addr_set(c4pl, mapping, dma_addr);
+       c4pl->dma_addr = dma_addr;
+       c4pl->dev = dev;
+-      c4pl->ibpl.max_page_list_len = pll_len;
++      c4pl->pll_len = pll_len;
++
++      PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
++           __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
++           &c4pl->dma_addr);
+       return &c4pl->ibpl;
+ }
+@@ -912,8 +916,12 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+ {
+       struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
++      PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
++           __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
++           &c4pl->dma_addr);
++
+       dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
+-                        c4pl->ibpl.max_page_list_len,
++                        c4pl->pll_len,
+                         c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
+       kfree(c4pl);
+ }
diff --git a/linux-next-pending/0039-RDMA-cxgb4--Don't-leak-skb-in-c4iw_uld_rx_handler().patch b/linux-next-pending/0039-RDMA-cxgb4--Don't-leak-skb-in-c4iw_uld_rx_handler().patch
new file mode 100644 (file)
index 0000000..c0bf2f9
--- /dev/null
@@ -0,0 +1,29 @@
+commit dbb084cc5f52152f53b5fd22fa76b9bf69904594
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:30 2014 +0530
+
+    RDMA/cxgb4: Don't leak skb in c4iw_uld_rx_handler()
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 4a03385..982f815 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -897,11 +897,13 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
+       }
+       opcode = *(u8 *)rsp;
+-      if (c4iw_handlers[opcode])
++      if (c4iw_handlers[opcode]) {
+               c4iw_handlers[opcode](dev, skb);
+-      else
++      } else {
+               pr_info("%s no handler opcode 0x%x...\n", __func__,
+                      opcode);
++              kfree_skb(skb);
++      }
+       return 0;
+ nomem:
diff --git a/linux-next-pending/0040-RDMA-cxgb4--Fix-possible-memory-leak-in-RX_PKT-processing.patch b/linux-next-pending/0040-RDMA-cxgb4--Fix-possible-memory-leak-in-RX_PKT-processing.patch
new file mode 100644 (file)
index 0000000..a8d08cc
--- /dev/null
@@ -0,0 +1,39 @@
+commit 1ce1d471acb7ad8e8b8e3a2972de9fbb5f2be79a
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:31 2014 +0530
+
+    RDMA/cxgb4: Fix possible memory leak in RX_PKT processing
+    
+    If cxgb4_ofld_send() returns < 0, then send_fw_pass_open_req() must
+    free the request skb and the saved skb with the tcp header.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index e1fc5c5..773d010 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3204,6 +3204,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+       struct sk_buff *req_skb;
+       struct fw_ofld_connection_wr *req;
+       struct cpl_pass_accept_req *cpl = cplhdr(skb);
++      int ret;
+       req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+       req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
+@@ -3240,7 +3241,13 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+       req->cookie = (unsigned long)skb;
+       set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
+-      cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
++      ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
++      if (ret < 0) {
++              pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
++                     ret);
++              kfree_skb(skb);
++              kfree_skb(req_skb);
++      }
+ }
+ /*
diff --git a/linux-next-pending/0041-RDMA-cxgb4--Ignore-read-reponse-type-1-CQEs.patch b/linux-next-pending/0041-RDMA-cxgb4--Ignore-read-reponse-type-1-CQEs.patch
new file mode 100644 (file)
index 0000000..9ed94a1
--- /dev/null
@@ -0,0 +1,54 @@
+commit 70b9c66053ecadde421658b8ec808c981f2eef11
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:32 2014 +0530
+
+    RDMA/cxgb4: Ignore read reponse type 1 CQEs
+    
+    These are generated by HW in some error cases and need to be
+    silently discarded.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index d6a7db2..ce468e5 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -365,8 +365,14 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+               if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
+-                      /*
+-                       * drop peer2peer RTR reads.
++                      /* If we have reached here because of async
++                       * event or other error, and have egress error
++                       * then drop
++                       */
++                      if (CQE_TYPE(hw_cqe) == 1)
++                              goto next_cqe;
++
++                      /* drop peer2peer RTR reads.
+                        */
+                       if (CQE_WRID_STAG(hw_cqe) == 1)
+                               goto next_cqe;
+@@ -511,8 +517,18 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+        */
+       if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
+-              /*
+-               * If this is an unsolicited read response, then the read
++              /* If we have reached here because of async
++               * event or other error, and have egress error
++               * then drop
++               */
++              if (CQE_TYPE(hw_cqe) == 1) {
++                      if (CQE_STATUS(hw_cqe))
++                              t4_set_wq_in_error(wq);
++                      ret = -EAGAIN;
++                      goto skip_cqe;
++              }
++
++              /* If this is an unsolicited read response, then the read
+                * was generated by the kernel driver as part of peer-2-peer
+                * connection setup.  So ignore the completion.
+                */
diff --git a/linux-next-pending/0042-RDMA-cxgb4--Connect_request_upcall-fixes.patch b/linux-next-pending/0042-RDMA-cxgb4--Connect_request_upcall-fixes.patch
new file mode 100644 (file)
index 0000000..dde488c
--- /dev/null
@@ -0,0 +1,149 @@
+commit be13b2dff8c4e41846477b22cc5c164ea5a6ac2e
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:33 2014 +0530
+
+    RDMA/cxgb4: Connect_request_upcall fixes
+    
+    When processing an MPA Start Request, if the listening endpoint is
+    DEAD, then abort the connection.
+    
+    If the IWCM returns an error, then we must abort the connection and
+    release resources.  Also abort_connection() should not post a CLOSE
+    event, so clean that up too.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 773d010..6bfef31 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -968,13 +968,14 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+       return 0;
+ }
+-static void close_complete_upcall(struct c4iw_ep *ep)
++static void close_complete_upcall(struct c4iw_ep *ep, int status)
+ {
+       struct iw_cm_event event;
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+       memset(&event, 0, sizeof(event));
+       event.event = IW_CM_EVENT_CLOSE;
++      event.status = status;
+       if (ep->com.cm_id) {
+               PDBG("close complete delivered ep %p cm_id %p tid %u\n",
+                    ep, ep->com.cm_id, ep->hwtid);
+@@ -988,7 +989,6 @@ static void close_complete_upcall(struct c4iw_ep *ep)
+ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+ {
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+-      close_complete_upcall(ep);
+       state_set(&ep->com, ABORTING);
+       set_bit(ABORT_CONN, &ep->com.history);
+       return send_abort(ep, skb, gfp);
+@@ -1067,9 +1067,10 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
+       }
+ }
+-static void connect_request_upcall(struct c4iw_ep *ep)
++static int connect_request_upcall(struct c4iw_ep *ep)
+ {
+       struct iw_cm_event event;
++      int ret;
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+       memset(&event, 0, sizeof(event));
+@@ -1094,15 +1095,14 @@ static void connect_request_upcall(struct c4iw_ep *ep)
+               event.private_data_len = ep->plen;
+               event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+       }
+-      if (state_read(&ep->parent_ep->com) != DEAD) {
+-              c4iw_get_ep(&ep->com);
+-              ep->parent_ep->com.cm_id->event_handler(
+-                                              ep->parent_ep->com.cm_id,
+-                                              &event);
+-      }
++      c4iw_get_ep(&ep->com);
++      ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
++                                                    &event);
++      if (ret)
++              c4iw_put_ep(&ep->com);
+       set_bit(CONNREQ_UPCALL, &ep->com.history);
+       c4iw_put_ep(&ep->parent_ep->com);
+-      ep->parent_ep = NULL;
++      return ret;
+ }
+ static void established_upcall(struct c4iw_ep *ep)
+@@ -1401,7 +1401,6 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+               return;
+       PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+-      stop_ep_timer(ep);
+       mpa = (struct mpa_message *) ep->mpa_pkt;
+       /*
+@@ -1494,9 +1493,17 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+            ep->mpa_attr.p2p_type);
+       state_set(&ep->com, MPA_REQ_RCVD);
++      stop_ep_timer(ep);
+       /* drive upcall */
+-      connect_request_upcall(ep);
++      mutex_lock(&ep->parent_ep->com.mutex);
++      if (ep->parent_ep->com.state != DEAD) {
++              if (connect_request_upcall(ep))
++                      abort_connection(ep, skb, GFP_KERNEL);
++      } else {
++              abort_connection(ep, skb, GFP_KERNEL);
++      }
++      mutex_unlock(&ep->parent_ep->com.mutex);
+       return;
+ }
+@@ -2247,7 +2254,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
+                       c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+                                      C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+               }
+-              close_complete_upcall(ep);
++              close_complete_upcall(ep, 0);
+               __state_set(&ep->com, DEAD);
+               release = 1;
+               disconnect = 0;
+@@ -2426,7 +2433,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+                                            C4IW_QP_ATTR_NEXT_STATE,
+                                            &attrs, 1);
+               }
+-              close_complete_upcall(ep);
++              close_complete_upcall(ep, 0);
+               __state_set(&ep->com, DEAD);
+               release = 1;
+               break;
+@@ -2981,7 +2988,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+       rdev = &ep->com.dev->rdev;
+       if (c4iw_fatal_error(rdev)) {
+               fatal = 1;
+-              close_complete_upcall(ep);
++              close_complete_upcall(ep, -EIO);
+               ep->com.state = DEAD;
+       }
+       switch (ep->com.state) {
+@@ -3023,7 +3030,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+       if (close) {
+               if (abrupt) {
+                       set_bit(EP_DISC_ABORT, &ep->com.history);
+-                      close_complete_upcall(ep);
++                      close_complete_upcall(ep, -ECONNRESET);
+                       ret = send_abort(ep, NULL, gfp);
+               } else {
+                       set_bit(EP_DISC_CLOSE, &ep->com.history);
+@@ -3435,6 +3442,7 @@ static void process_timeout(struct c4iw_ep *ep)
+                                    &attrs, 1);
+               }
+               __state_set(&ep->com, ABORTING);
++              close_complete_upcall(ep, -ETIMEDOUT);
+               break;
+       default:
+               WARN(1, "%s unexpected state ep %p tid %u state %u\n",
diff --git a/linux-next-pending/0043-RDMA-cxgb4--Update-snd_seq-when-sending-MPA-messages.patch b/linux-next-pending/0043-RDMA-cxgb4--Update-snd_seq-when-sending-MPA-messages.patch
new file mode 100644 (file)
index 0000000..9d5a317
--- /dev/null
@@ -0,0 +1,37 @@
+commit 9c88aa003d26e9f1e9ea6e08511768c2ef666654
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:34 2014 +0530
+
+    RDMA/cxgb4: Update snd_seq when sending MPA messages
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 6bfef31..a1bc41d 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -762,6 +762,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+       start_ep_timer(ep);
+       state_set(&ep->com, MPA_REQ_SENT);
+       ep->mpa_attr.initiator = 1;
++      ep->snd_seq += mpalen;
+       return;
+ }
+@@ -841,6 +842,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+       t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+       BUG_ON(ep->mpa_skb);
+       ep->mpa_skb = skb;
++      ep->snd_seq += mpalen;
+       return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ }
+@@ -925,6 +927,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+       t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+       ep->mpa_skb = skb;
+       state_set(&ep->com, MPA_REP_SENT);
++      ep->snd_seq += mpalen;
+       return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ }
diff --git a/linux-next-pending/0044-RDMA-cxgb4--Lock-around-accept-reject-downcalls.patch b/linux-next-pending/0044-RDMA-cxgb4--Lock-around-accept-reject-downcalls.patch
new file mode 100644 (file)
index 0000000..9a93455
--- /dev/null
@@ -0,0 +1,120 @@
+commit 9306dcbc96f37e4dd6abe62b620a978792839195
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:35 2014 +0530
+
+    RDMA/cxgb4: Lock around accept/reject downcalls
+    
+    There is a race between ULP threads doing an accept/reject, and the
+    ingress processing thread handling close/abort for the same connection.
+    The accept/reject path needs to hold the lock to serialize these paths.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index a1bc41d..b8f9e75 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -760,7 +760,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+       ep->mpa_skb = skb;
+       c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+       start_ep_timer(ep);
+-      state_set(&ep->com, MPA_REQ_SENT);
++      __state_set(&ep->com, MPA_REQ_SENT);
+       ep->mpa_attr.initiator = 1;
+       ep->snd_seq += mpalen;
+       return;
+@@ -926,7 +926,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+       skb_get(skb);
+       t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+       ep->mpa_skb = skb;
+-      state_set(&ep->com, MPA_REP_SENT);
++      __state_set(&ep->com, MPA_REP_SENT);
+       ep->snd_seq += mpalen;
+       return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ }
+@@ -944,6 +944,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+       PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+            be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
++      mutex_lock(&ep->com.mutex);
+       dst_confirm(ep->dst);
+       /* setup the hwtid for this connection */
+@@ -967,7 +968,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+               send_mpa_req(ep, skb, 1);
+       else
+               send_mpa_req(ep, skb, mpa_rev);
+-
++      mutex_unlock(&ep->com.mutex);
+       return 0;
+ }
+@@ -2511,22 +2512,28 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
+ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+ {
+-      int err;
++      int err = 0;
++      int disconnect = 0;
+       struct c4iw_ep *ep = to_ep(cm_id);
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+-      if (state_read(&ep->com) == DEAD) {
++
++      mutex_lock(&ep->com.mutex);
++      if (ep->com.state == DEAD) {
+               c4iw_put_ep(&ep->com);
+               return -ECONNRESET;
+       }
+       set_bit(ULP_REJECT, &ep->com.history);
+-      BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
++      BUG_ON(ep->com.state != MPA_REQ_RCVD);
+       if (mpa_rev == 0)
+               abort_connection(ep, NULL, GFP_KERNEL);
+       else {
+               err = send_mpa_reject(ep, pdata, pdata_len);
+-              err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
++              disconnect = 1;
+       }
++      mutex_unlock(&ep->com.mutex);
++      if (disconnect)
++              err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+       c4iw_put_ep(&ep->com);
+       return 0;
+ }
+@@ -2541,12 +2548,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+-      if (state_read(&ep->com) == DEAD) {
++
++      mutex_lock(&ep->com.mutex);
++      if (ep->com.state == DEAD) {
+               err = -ECONNRESET;
+               goto err;
+       }
+-      BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
++      BUG_ON(ep->com.state != MPA_REQ_RCVD);
+       BUG_ON(!qp);
+       set_bit(ULP_ACCEPT, &ep->com.history);
+@@ -2615,14 +2624,16 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       if (err)
+               goto err1;
+-      state_set(&ep->com, FPDU_MODE);
++      __state_set(&ep->com, FPDU_MODE);
+       established_upcall(ep);
++      mutex_unlock(&ep->com.mutex);
+       c4iw_put_ep(&ep->com);
+       return 0;
+ err1:
+       ep->com.cm_id = NULL;
+       cm_id->rem_ref(cm_id);
+ err:
++      mutex_unlock(&ep->com.mutex);
+       c4iw_put_ep(&ep->com);
+       return err;
+ }
diff --git a/linux-next-pending/0045-RDMA-cxgb4--Drop-RX_DATA-packets-if-the-endpoint-is-gone.patch b/linux-next-pending/0045-RDMA-cxgb4--Drop-RX_DATA-packets-if-the-endpoint-is-gone.patch
new file mode 100644 (file)
index 0000000..de5bc72
--- /dev/null
@@ -0,0 +1,22 @@
+commit 925902f25dfe1c01581e7e6dfc72303b47c74b7e
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:36 2014 +0530
+
+    RDMA/cxgb4: Drop RX_DATA packets if the endpoint is gone
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index b8f9e75..57344a3 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1521,6 +1521,8 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+       __u8 status = hdr->status;
+       ep = lookup_tid(t, tid);
++      if (!ep)
++              return 0;
+       PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+       skb_pull(skb, sizeof(*hdr));
+       skb_trim(skb, dlen);
diff --git a/linux-next-pending/0046-RDMA-cxgb4--rx_data()-needs-to-hold-the-ep-mutex.patch b/linux-next-pending/0046-RDMA-cxgb4--rx_data()-needs-to-hold-the-ep-mutex.patch
new file mode 100644 (file)
index 0000000..ec1aa29
--- /dev/null
@@ -0,0 +1,92 @@
+commit 13f17b7fb4dd7d321f688b8a93b0dbc6d1120ed0
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Fri Mar 21 20:40:37 2014 +0530
+
+    RDMA/cxgb4: rx_data() needs to hold the ep mutex
+    
+    To avoid racing with other threads doing close/flush/whatever, rx_data()
+    should hold the endpoint mutex.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+    Signed-off-by: Roland Dreier <roland@purestorage.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 57344a3..fe5db3c 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1170,7 +1170,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+        * the connection.
+        */
+       stop_ep_timer(ep);
+-      if (state_read(&ep->com) != MPA_REQ_SENT)
++      if (ep->com.state != MPA_REQ_SENT)
+               return;
+       /*
+@@ -1245,7 +1245,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+        * start reply message including private data. And
+        * the MPA header is valid.
+        */
+-      state_set(&ep->com, FPDU_MODE);
++      __state_set(&ep->com, FPDU_MODE);
+       ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+       ep->mpa_attr.recv_marker_enabled = markers_enabled;
+       ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+@@ -1360,7 +1360,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+       }
+       goto out;
+ err:
+-      state_set(&ep->com, ABORTING);
++      __state_set(&ep->com, ABORTING);
+       send_abort(ep, skb, GFP_KERNEL);
+ out:
+       connect_reply_upcall(ep, err);
+@@ -1375,7 +1375,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+-      if (state_read(&ep->com) != MPA_REQ_WAIT)
++      if (ep->com.state != MPA_REQ_WAIT)
+               return;
+       /*
+@@ -1496,7 +1496,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+            ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+            ep->mpa_attr.p2p_type);
+-      state_set(&ep->com, MPA_REQ_RCVD);
++      __state_set(&ep->com, MPA_REQ_RCVD);
+       stop_ep_timer(ep);
+       /* drive upcall */
+@@ -1526,11 +1526,12 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+       PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+       skb_pull(skb, sizeof(*hdr));
+       skb_trim(skb, dlen);
++      mutex_lock(&ep->com.mutex);
+       /* update RX credits */
+       update_rx_credits(ep, dlen);
+-      switch (state_read(&ep->com)) {
++      switch (ep->com.state) {
+       case MPA_REQ_SENT:
+               ep->rcv_seq += dlen;
+               process_mpa_reply(ep, skb);
+@@ -1546,7 +1547,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+                       pr_err("%s Unexpected streaming data." \
+                              " qpid %u ep %p state %d tid %u status %d\n",
+                              __func__, ep->com.qp->wq.sq.qid, ep,
+-                             state_read(&ep->com), ep->hwtid, status);
++                             ep->com.state, ep->hwtid, status);
+               attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+@@ -1555,6 +1556,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+       default:
+               break;
+       }
++      mutex_unlock(&ep->com.mutex);
+       return 0;
+ }
diff --git a/linux-next-pending/0047-RDMA-cxgb4--Disable-DSGL-use..patch b/linux-next-pending/0047-RDMA-cxgb4--Disable-DSGL-use..patch
new file mode 100644 (file)
index 0000000..cd9cc84
--- /dev/null
@@ -0,0 +1,26 @@
+commit 1410e14b7394c00d8740affbfb212760744e8b45
+Author: Steve Wise <swise@opengridcomputing.com>
+Date:   Thu Mar 27 12:00:53 2014 -0500
+
+    RDMA/cxgb4: Disable DSGL use.
+    
+    Current hardware doesn't correctly support DSGL.
+    
+    Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 0989871a..bad9268 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -37,9 +37,9 @@
+ #include "iw_cxgb4.h"
+-int use_dsgl = 1;
++int use_dsgl = 0;
+ module_param(use_dsgl, int, 0644);
+-MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
++MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
+ #define T4_ULPTX_MIN_IO 32
+ #define C4IW_MAX_INLINE_SIZE 96