]> git.openfabrics.org - ~aditr/compat-rdma.git/commitdiff
Fix rstream timeouts on 4k_lat test
authortczeszun <tomasz.czeszun@intel.com>
Mon, 19 Jun 2017 10:07:01 +0000 (12:07 +0200)
committertczeszun <tomasz.czeszun@intel.com>
Mon, 19 Jun 2017 10:07:01 +0000 (12:07 +0200)
linux-next-cherry-picks/0053-IB-ipoib-move-back-IB-LL-address-into-the-hard-header.patch [new file with mode: 0644]
linux-next-cherry-picks/0054-IB-qib-Remove-qpt-mask-global.patch [new file with mode: 0644]
linux-next-cherry-picks/0055-IB-rdmavt-Correct-sparse-annotation.patch [new file with mode: 0644]
linux-next-cherry-picks/0056-IB-rdmavt-rdmavt-can-handle-non-aligned-page-maps.patch [new file with mode: 0644]

diff --git a/linux-next-cherry-picks/0053-IB-ipoib-move-back-IB-LL-address-into-the-hard-header.patch b/linux-next-cherry-picks/0053-IB-ipoib-move-back-IB-LL-address-into-the-hard-header.patch
new file mode 100644 (file)
index 0000000..fa4f899
--- /dev/null
@@ -0,0 +1,360 @@
+From fc791b6335152c5278dc4a4991bcb2d329f806f9 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 13 Oct 2016 18:26:56 +0200
+Subject: IB/ipoib: move back IB LL address into the hard header
+
+After the commit 9207f9d45b0a ("net: preserve IP control block
+during GSO segmentation"), the GSO CB and the IPoIB CB conflict.
+That destroy the IPoIB address information cached there,
+causing a severe performance regression, as better described here:
+
+http://marc.info/?l=linux-kernel&m=146787279825501&w=2
+
+This change moves the data cached by the IPoIB driver from the
+skb control lock into the IPoIB hard header, as done before
+the commit 936d7de3d736 ("IPoIB: Stop lying about hard_header_len
+and use skb->cb to stash LL addresses").
+In order to avoid GRO issue, on packet reception, the IPoIB driver
+stash into the skb a dummy pseudo header, so that the received
+packets have actually a hard header matching the declared length.
+To avoid changing the connected mode maximum mtu, the allocated
+head buffer size is increased by the pseudo header length.
+
+After this commit, IPoIB performances are back to pre-regression
+value.
+
+v2 -> v3: rebased
+v1 -> v2: avoid changing the max mtu, increasing the head buf size
+
+Fixes: 9207f9d45b0a ("net: preserve IP control block during GSO segmentation")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/infiniband/ulp/ipoib/ipoib.h           | 20 +++++++---
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c        | 15 +++----
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c        | 12 +++---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c      | 54 ++++++++++++++++----------
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c |  6 ++-
+ 5 files changed, 64 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index 7b8d2d9..da12717 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -63,6 +63,8 @@ enum ipoib_flush_level {
+ enum {
+       IPOIB_ENCAP_LEN           = 4,
++      IPOIB_PSEUDO_LEN          = 20,
++      IPOIB_HARD_LEN            = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
+       IPOIB_UD_HEAD_SIZE        = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+       IPOIB_UD_RX_SG            = 2, /* max buffer needed for 4K mtu */
+@@ -134,15 +136,21 @@ struct ipoib_header {
+       u16     reserved;
+ };
+-struct ipoib_cb {
+-      struct qdisc_skb_cb     qdisc_cb;
+-      u8                      hwaddr[INFINIBAND_ALEN];
++struct ipoib_pseudo_header {
++      u8      hwaddr[INFINIBAND_ALEN];
+ };
+-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
++static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
+ {
+-      BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+-      return (struct ipoib_cb *)skb->cb;
++      char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
++
++      /*
++       * only the ipoib header is present now, make room for a dummy
++       * pseudo header and set skb field accordingly
++       */
++      memset(data, 0, IPOIB_PSEUDO_LEN);
++      skb_reset_mac_header(skb);
++      skb_pull(skb, IPOIB_HARD_LEN);
+ }
+ /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 4ad297d..339a1ee 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
+ #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
+ #define IPOIB_CM_RX_UPDATE_MASK (0x3)
++#define IPOIB_CM_RX_RESERVE     (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
++
+ static struct ib_qp_attr ipoib_cm_err_attr = {
+       .qp_state = IB_QPS_ERR
+ };
+@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+       struct sk_buff *skb;
+       int i;
+-      skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
++      skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
+       if (unlikely(!skb))
+               return NULL;
+       /*
+-       * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
++       * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
+        * IP header to a multiple of 16.
+        */
+-      skb_reserve(skb, 12);
++      skb_reserve(skb, IPOIB_CM_RX_RESERVE);
+       mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
+                                      DMA_FROM_DEVICE);
+@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+       if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+               int dlen = wc->byte_len;
+-              small_skb = dev_alloc_skb(dlen + 12);
++              small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
+               if (small_skb) {
+-                      skb_reserve(small_skb, 12);
++                      skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
+                       ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+                                                  dlen, DMA_FROM_DEVICE);
+                       skb_copy_from_linear_data(skb, small_skb->data, dlen);
+@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ copied:
+       skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+-      skb_reset_mac_header(skb);
+-      skb_pull(skb, IPOIB_ENCAP_LEN);
++      skb_add_pseudo_hdr(skb);
+       ++dev->stats.rx_packets;
+       dev->stats.rx_bytes += skb->len;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index be11d5d..830fecb 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
+       buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+-      skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
++      skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
+       if (unlikely(!skb))
+               return NULL;
+       /*
+-       * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
+-       * header.  So we need 4 more bytes to get to 48 and align the
+-       * IP header to a multiple of 16.
++       * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
++       * 64 bytes aligned
+        */
+-      skb_reserve(skb, 4);
++      skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
+       mapping = priv->rx_ring[id].mapping;
+       mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+       skb_pull(skb, IB_GRH_BYTES);
+       skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+-      skb_reset_mac_header(skb);
+-      skb_pull(skb, IPOIB_ENCAP_LEN);
++      skb_add_pseudo_hdr(skb);
+       ++dev->stats.rx_packets;
+       dev->stats.rx_bytes += skb->len;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 5636fc3..b58d9dc 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
+                               ipoib_neigh_free(neigh);
+                               goto err_drop;
+                       }
+-                      if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
++                      if (skb_queue_len(&neigh->queue) <
++                          IPOIB_MAX_PATH_REC_QUEUE) {
++                              /* put pseudoheader back on for next time */
++                              skb_push(skb, IPOIB_PSEUDO_LEN);
+                               __skb_queue_tail(&neigh->queue, skb);
+-                      else {
++                      } else {
+                               ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+                                          skb_queue_len(&neigh->queue));
+                               goto err_drop;
+@@ -964,7 +967,7 @@ err_drop:
+ }
+ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+-                           struct ipoib_cb *cb)
++                           struct ipoib_pseudo_header *phdr)
+ {
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_path *path;
+@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+       spin_lock_irqsave(&priv->lock, flags);
+-      path = __path_find(dev, cb->hwaddr + 4);
++      path = __path_find(dev, phdr->hwaddr + 4);
+       if (!path || !path->valid) {
+               int new_path = 0;
+               if (!path) {
+-                      path = path_rec_create(dev, cb->hwaddr + 4);
++                      path = path_rec_create(dev, phdr->hwaddr + 4);
+                       new_path = 1;
+               }
+               if (path) {
+                       if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++                              /* put pseudoheader back on for next time */
++                              skb_push(skb, IPOIB_PSEUDO_LEN);
+                               __skb_queue_tail(&path->queue, skb);
+                       } else {
+                               ++dev->stats.tx_dropped;
+@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+                         be16_to_cpu(path->pathrec.dlid));
+               spin_unlock_irqrestore(&priv->lock, flags);
+-              ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
++              ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+               return;
+       } else if ((path->query || !path_rec_start(dev, path)) &&
+                  skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++              /* put pseudoheader back on for next time */
++              skb_push(skb, IPOIB_PSEUDO_LEN);
+               __skb_queue_tail(&path->queue, skb);
+       } else {
+               ++dev->stats.tx_dropped;
+@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_neigh *neigh;
+-      struct ipoib_cb *cb = ipoib_skb_cb(skb);
++      struct ipoib_pseudo_header *phdr;
+       struct ipoib_header *header;
+       unsigned long flags;
++      phdr = (struct ipoib_pseudo_header *) skb->data;
++      skb_pull(skb, sizeof(*phdr));
+       header = (struct ipoib_header *) skb->data;
+-      if (unlikely(cb->hwaddr[4] == 0xff)) {
++      if (unlikely(phdr->hwaddr[4] == 0xff)) {
+               /* multicast, arrange "if" according to probability */
+               if ((header->proto != htons(ETH_P_IP)) &&
+                   (header->proto != htons(ETH_P_IPV6)) &&
+@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+                       return NETDEV_TX_OK;
+               }
+               /* Add in the P_Key for multicast*/
+-              cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+-              cb->hwaddr[9] = priv->pkey & 0xff;
++              phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
++              phdr->hwaddr[9] = priv->pkey & 0xff;
+-              neigh = ipoib_neigh_get(dev, cb->hwaddr);
++              neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+               if (likely(neigh))
+                       goto send_using_neigh;
+-              ipoib_mcast_send(dev, cb->hwaddr, skb);
++              ipoib_mcast_send(dev, phdr->hwaddr, skb);
+               return NETDEV_TX_OK;
+       }
+@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+       case htons(ETH_P_TIPC):
+-              neigh = ipoib_neigh_get(dev, cb->hwaddr);
++              neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+               if (unlikely(!neigh)) {
+-                      neigh_add_path(skb, cb->hwaddr, dev);
++                      neigh_add_path(skb, phdr->hwaddr, dev);
+                       return NETDEV_TX_OK;
+               }
+               break;
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_RARP):
+               /* for unicast ARP and RARP should always perform path find */
+-              unicast_arp_send(skb, dev, cb);
++              unicast_arp_send(skb, dev, phdr);
+               return NETDEV_TX_OK;
+       default:
+               /* ethertype not supported by IPoIB */
+@@ -1086,11 +1095,13 @@ send_using_neigh:
+                       goto unref;
+               }
+       } else if (neigh->ah) {
+-              ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
++              ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
+               goto unref;
+       }
+       if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++              /* put pseudoheader back on for next time */
++              skb_push(skb, sizeof(*phdr));
+               spin_lock_irqsave(&priv->lock, flags);
+               __skb_queue_tail(&neigh->queue, skb);
+               spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
+                            unsigned short type,
+                            const void *daddr, const void *saddr, unsigned len)
+ {
++      struct ipoib_pseudo_header *phdr;
+       struct ipoib_header *header;
+-      struct ipoib_cb *cb = ipoib_skb_cb(skb);
+       header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
+       /*
+        * we don't rely on dst_entry structure,  always stuff the
+-       * destination address into skb->cb so we can figure out where
++       * destination address into skb hard header so we can figure out where
+        * to send the packet later.
+        */
+-      memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
++      phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
++      memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+-      return sizeof *header;
++      return IPOIB_HARD_LEN;
+ }
+ static void ipoib_set_mcast_list(struct net_device *dev)
+@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
+       dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
+-      dev->hard_header_len     = IPOIB_ENCAP_LEN;
++      dev->hard_header_len     = IPOIB_HARD_LEN;
+       dev->addr_len            = INFINIBAND_ALEN;
+       dev->type                = ARPHRD_INFINIBAND;
+       dev->tx_queue_len        = ipoib_sendq_size * 2;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index d3394b6..1909dd2 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
+                       __ipoib_mcast_add(dev, mcast);
+                       list_add_tail(&mcast->list, &priv->multicast_list);
+               }
+-              if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
++              if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
++                      /* put pseudoheader back on for next time */
++                      skb_push(skb, sizeof(struct ipoib_pseudo_header));
+                       skb_queue_tail(&mcast->pkt_queue, skb);
+-              else {
++              } else {
+                       ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+               }
+-- 
+cgit v1.1
+
diff --git a/linux-next-cherry-picks/0054-IB-qib-Remove-qpt-mask-global.patch b/linux-next-cherry-picks/0054-IB-qib-Remove-qpt-mask-global.patch
new file mode 100644 (file)
index 0000000..68f960e
--- /dev/null
@@ -0,0 +1,92 @@
+From 84b3adc2430eafd2eb703570075c3c141ea0ff13 Mon Sep 17 00:00:00 2001
+From: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Date: Sun, 25 Sep 2016 07:41:05 -0700
+Subject: IB/qib: Remove qpt_mask global
+
+There is no need to have a global qpt_mask as that does not support the
+multiple chip model which qib has. Instead rely on the value which
+exists already in the device data (dd).
+
+Fixes: 898fa52b4ac3 "IB/qib: Remove qpn, qp tables and related variables from qib"
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/qib/qib.h       |  1 -
+ drivers/infiniband/hw/qib/qib_qp.c    | 13 +++----------
+ drivers/infiniband/hw/qib/qib_verbs.c |  2 --
+ 3 files changed, 3 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index bebc9a5..a3e21a2 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -1132,7 +1132,6 @@ extern spinlock_t qib_devs_lock;
+ extern struct qib_devdata *qib_lookup(int unit);
+ extern u32 qib_cpulist_count;
+ extern unsigned long *qib_cpulist;
+-extern u16 qpt_mask;
+ extern unsigned qib_cc_table_size;
+ int qib_init(struct qib_devdata *, int);
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index f9b8cd2..99d31ef 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -41,14 +41,6 @@
+ #include "qib.h"
+-/*
+- * mask field which was present in now deleted qib_qpn_table
+- * is not present in rvt_qpn_table. Defining the same field
+- * as qpt_mask here instead of adding the mask field to
+- * rvt_qpn_table.
+- */
+-u16 qpt_mask;
+-
+ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+                             struct rvt_qpn_map *map, unsigned off)
+ {
+@@ -57,7 +49,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
+                                       struct rvt_qpn_map *map, unsigned off,
+-                                      unsigned n)
++                                      unsigned n, u16 qpt_mask)
+ {
+       if (qpt_mask) {
+               off++;
+@@ -179,6 +171,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+       struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+       struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+                                             verbs_dev);
++      u16 qpt_mask = dd->qpn_mask;
+       if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+               unsigned n;
+@@ -215,7 +208,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+                               goto bail;
+                       }
+                       offset = find_next_offset(qpt, map, offset,
+-                              dd->n_krcv_queues);
++                              dd->n_krcv_queues, qpt_mask);
+                       qpn = mk_qpn(qpt, map, offset);
+                       /*
+                        * This test differs from alloc_pidmap().
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index c12ec8f..876ebb4 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -1606,8 +1606,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
+       /* Only need to initialize non-zero fields. */
+       setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+-      qpt_mask = dd->qpn_mask;
+-
+       INIT_LIST_HEAD(&dev->piowait);
+       INIT_LIST_HEAD(&dev->dmawait);
+       INIT_LIST_HEAD(&dev->txwait);
+-- 
+cgit v1.1
+
diff --git a/linux-next-cherry-picks/0055-IB-rdmavt-Correct-sparse-annotation.patch b/linux-next-cherry-picks/0055-IB-rdmavt-Correct-sparse-annotation.patch
new file mode 100644 (file)
index 0000000..7467eaa
--- /dev/null
@@ -0,0 +1,43 @@
+From eefa1d8961584c5b76afded94960ca4344bc638b Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 6 Sep 2016 04:36:33 -0700
+Subject: IB/rdmavt: Correct sparse annotation
+
+The __must_hold() is sufficent to correct the sparse
+context imbalance inside a function.
+
+Per Documentation/sparse.txt:
+__must_hold - The specified lock is held on function entry and exit.
+
+Fixes: Commit c0a67f6ba356 ("IB/rdmavt: Annotate rvt_reset_qp()")
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index da5c8d6..80d2c50 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -500,12 +500,9 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+  */
+ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+                 enum ib_qp_type type)
+-      __releases(&qp->s_lock)
+-      __releases(&qp->s_hlock)
+-      __releases(&qp->r_lock)
+-      __acquires(&qp->r_lock)
+-      __acquires(&qp->s_hlock)
+-      __acquires(&qp->s_lock)
++      __must_hold(&qp->r_lock)
++      __must_hold(&qp->s_hlock)
++      __must_hold(&qp->s_lock)
+ {
+       if (qp->state != IB_QPS_RESET) {
+               qp->state = IB_QPS_RESET;
+-- 
+cgit v1.1
+
diff --git a/linux-next-cherry-picks/0056-IB-rdmavt-rdmavt-can-handle-non-aligned-page-maps.patch b/linux-next-cherry-picks/0056-IB-rdmavt-rdmavt-can-handle-non-aligned-page-maps.patch
new file mode 100644 (file)
index 0000000..ad614df
--- /dev/null
@@ -0,0 +1,41 @@
+From e1fafdcbe0e3e769c6a83317dd845bc99b4fe61d Mon Sep 17 00:00:00 2001
+From: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Date: Mon, 10 Oct 2016 06:14:45 -0700
+Subject: IB/rdmavt: rdmavt can handle non aligned page maps
+
+The initial code for rdmavt carried with it a restriction that was a
+vestige from the qib driver, that to dma map a page it had to be less
+than a page size. This is not the case on modern hardware, both qib and
+hfi1 will be just fine with unaligned map requests.
+
+This fixes a 4.8 regression where by an IPoIB transfer of > PAGE_SIZE
+will hang because the dma map page call always fails. This was
+introduced after commit 5faba5469522 ("IB/ipoib: Report SG feature
+regardless of HW UD CSUM capability") added the capability to use SG by
+default. Rather than override this, the HW supports it, so allow SG.
+
+Cc: Stable <stable@vger.kernel.org> # 4.8
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/sw/rdmavt/dma.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
+index 01f71ca..f2cefb0 100644
+--- a/drivers/infiniband/sw/rdmavt/dma.c
++++ b/drivers/infiniband/sw/rdmavt/dma.c
+@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
+       if (WARN_ON(!valid_dma_direction(direction)))
+               return BAD_DMA_ADDRESS;
+-      if (offset + size > PAGE_SIZE)
+-              return BAD_DMA_ADDRESS;
+-
+       addr = (u64)page_address(page);
+       if (addr)
+               addr += offset;
+-- 
+cgit v1.1
+