--- /dev/null
+commit 2b7bbc963da8d076f263574af4138b5df2e1581f
+Author: Chuck Lever <chuck.lever@oracle.com>
+Date: Wed Mar 12 12:51:30 2014 -0400
+
+ SUNRPC: Fix large reads on NFS/RDMA
+
+ After commit a11a2bf4, "SUNRPC: Optimise away unnecessary data moves
+ in xdr_align_pages", Thu Aug 2 13:21:43 2012, READs larger than a
+ few hundred bytes via NFS/RDMA no longer work. This commit exposed
+ a long-standing bug in rpcrdma_inline_fixup().
+
+ I reproduce this with an rsize=4096 mount using the cthon04 basic
+ tests. Test 5 fails with an EIO error.
+
+ For my reproducer, kernel log shows:
+
+ NFS: server cheating in read reply: count 4096 > recvd 0
+
+ rpcrdma_inline_fixup() is zeroing the xdr_stream::page_len field,
+ and xdr_align_pages() is now returning that value to the READ XDR
+ decoder function.
+
+ That field is set up by xdr_inline_pages() by the READ XDR encoder
+ function. As far as I can tell, it is supposed to be left alone
+ after that, as it describes the dimensions of the reply xdr_stream,
+ not the contents of that stream.
+
+ Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=68391
+ Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+ Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index e03725b..96ead52 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -649,9 +649,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
+ break;
+ page_base = 0;
+ }
+- rqst->rq_rcv_buf.page_len = olen - copy_len;
+- } else
+- rqst->rq_rcv_buf.page_len = 0;
++ }
+
+ if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
+ curlen = copy_len;
--- /dev/null
+Fix regression in NFSRDMA server
+
+From: Tom Tucker <tom@ogc.us>
+
+The server regression was caused by the addition of rq_next_page
+(afc59400d6c65bad66d4ad0b2daf879cbff8e23e). There were a few places that
+were missed with the update of the rq_respages array.
+
+NOTE: Patch modified to apply against OFED.
+
+Signed-off-by: Tom Tucker <tom@ogc.us>
+Tested-by: Steve Wise <swise@ogc.us>
+
+---
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2014-03-31 15:31:05.214903226 -0500
++++ a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2014-03-31 15:34:40.042047141 -0500
+@@ -90,6 +90,9 @@ static void rdma_build_arg_xdr(struct sv
+ sge_no++;
+ }
+ rqstp->rq_respages = &rqstp->rq_pages[sge_no];
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++ rqstp->rq_next_page = rqstp->rq_respages + 1;
++#endif
+
+ /* We should never run out of SGE because the limit is defined to
+ * support the max allowed RPC data length
+@@ -169,6 +172,9 @@ static int map_read_chunks(struct svcxpr
+ */
+ head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
+ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++ rqstp->rq_next_page = rqstp->rq_respages + 1;
++#endif
+
+ byte_count -= sge_bytes;
+ ch_bytes -= sge_bytes;
+@@ -276,6 +282,9 @@ static int fast_reg_read_chunks(struct s
+
+ /* rq_respages points one past arg pages */
+ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++ rqstp->rq_next_page = rqstp->rq_respages + 1;
++#endif
+
+ /* Create the reply and chunk maps */
+ offset = 0;
+@@ -527,9 +536,6 @@ next_sge:
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+ while (rqstp->rq_resused)
+ rqstp->rq_respages[--rqstp->rq_resused] = NULL;
+-#else
+- while (rqstp->rq_next_page != rqstp->rq_respages)
+- *(--rqstp->rq_next_page) = NULL;
+ #endif
+
+ return err;
+@@ -558,7 +564,7 @@ static int rdma_read_complete(struct svc
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+ rqstp->rq_resused = 0;
+ #else
+- rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
++ rqstp->rq_next_page = rqstp->rq_respages + 1;
+ #endif
+
+ /* Rebuild rq_arg head and tail. */