]> git.openfabrics.org - ~aditr/compat-rdma.git/commitdiff
Fixes for i40iw which have been included in kernels > 4.8
authorTatyana Nikolova <tatyana.e.nikolova@intel.com>
Wed, 19 Apr 2017 16:32:44 +0000 (09:32 -0700)
committerTatyana Nikolova <tatyana.e.nikolova@intel.com>
Wed, 19 Apr 2017 16:45:03 +0000 (09:45 -0700)
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
40 files changed:
linux-next-cherry-picks/0011-i40iw_cm-Remove-deprecated-create_singlethread_workq.patch [new file with mode: 0755]
linux-next-cherry-picks/0012-IB-i40iw-Remove-debug-prints-after-allocation-failur.patch [new file with mode: 0755]
linux-next-cherry-picks/0013-i40iw-Add-Quality-of-Service-support.patch [new file with mode: 0755]
linux-next-cherry-picks/0014-i40iw-Enable-message-packing.patch [new file with mode: 0755]
linux-next-cherry-picks/0015-i40iw-Remove-workaround-for-pre-production-errata.patch [new file with mode: 0755]
linux-next-cherry-picks/0016-i40iw-Set-MAX-IRD-MAX-ORD-size-to-max-supported-valu.patch [new file with mode: 0755]
linux-next-cherry-picks/0017-i40iw-Convert-page_size-to-encoded-value.patch [new file with mode: 0755]
linux-next-cherry-picks/0018-i40iw-Use-vector-when-creating-CQs.patch [new file with mode: 0755]
linux-next-cherry-picks/0019-i40iw-Correct-values-for-max_recv_sge-max_send_sge.patch [new file with mode: 0755]
linux-next-cherry-picks/0020-i40iw-Fix-for-LAN-handler-removal.patch [new file with mode: 0755]
linux-next-cherry-picks/0021-i40iw-Optimize-inline-data-copy.patch [new file with mode: 0755]
linux-next-cherry-picks/0022-i40iw-Query-device-accounts-for-internal-rsrc.patch [new file with mode: 0755]
linux-next-cherry-picks/0023-i40iw-Remove-checks-for-more-than-48-bytes-inline-da.patch [new file with mode: 0755]
linux-next-cherry-picks/0024-i40iw-Remove-NULL-check-for-cm_node-iwdev.patch [new file with mode: 0755]
linux-next-cherry-picks/0025-i40iw-Use-actual-page-size.patch [new file with mode: 0755]
linux-next-cherry-picks/0026-i40iw-Use-runtime-check-for-IS_ENABLED-CONFIG_IPV6.patch [new file with mode: 0755]
linux-next-cherry-picks/0027-i40iw-Remove-check-on-return-from-device_init_pestat.patch [new file with mode: 0755]
linux-next-cherry-picks/0028-i40iw-Remove-variable-flush_code-and-check-to-set-qp.patch [new file with mode: 0755]
linux-next-cherry-picks/0029-i40iw-Fix-incorrect-assignment-of-SQ-head.patch [new file with mode: 0755]
linux-next-cherry-picks/0030-i40iw-Utilize-physically-mapped-memory-regions.patch [new file with mode: 0755]
linux-next-cherry-picks/0031-i40iw-Add-2MB-page-support.patch [new file with mode: 0755]
linux-next-cherry-picks/0032-i40iw-Add-missing-cleanup-on-device-close.patch [new file with mode: 0755]
linux-next-cherry-picks/0033-i40iw-Add-IP-addr-handling-on-netdev-events.patch [new file with mode: 0755]
linux-next-cherry-picks/0034-i40iw-Replace-list_for_each_entry-macro-with-safe-ve.patch [new file with mode: 0755]
linux-next-cherry-picks/0035-i40iw-Add-NULL-check-for-ibqp-event-handler.patch [new file with mode: 0755]
linux-next-cherry-picks/0036-i40iw-Set-TOS-field-in-IP-header.patch [new file with mode: 0755]
linux-next-cherry-picks/0037-i40iw-Fill-in-IRD-value-when-on-connect-request.patch [new file with mode: 0755]
linux-next-cherry-picks/0038-i40iw-Correctly-fail-loopback-connection-if-no-liste.patch [new file with mode: 0755]
linux-next-cherry-picks/0039-i40iw-Code-cleanup-remove-check-of-PBLE-pages.patch [new file with mode: 0755]
linux-next-cherry-picks/0040-i40iw-Add-request-for-reset-on-CQP-timeout.patch [new file with mode: 0755]
linux-next-cherry-picks/0041-i40iw-Remove-macros-I40IW_STAG_KEY_FROM_STAG-and-I40.patch [new file with mode: 0755]
linux-next-cherry-picks/0042-i40iw-Use-correct-src-address-in-memcpy-to-rdma-stat.patch [new file with mode: 0755]
linux-next-cherry-picks/0043-i40iw-Fix-double-free-of-QP.patch [new file with mode: 0755]
linux-next-cherry-picks/0044-i40iw-Fix-QP-flush-to-not-hang-on-empty-queues-or-fa.patch [new file with mode: 0755]
linux-next-cherry-picks/0045-i40iw-Fix-memory-leak-in-CQP-destroy-when-in-reset.patch [new file with mode: 0755]
linux-next-cherry-picks/0046-i40iw-Fix-race-condition-in-terminate-timer-s-handle.patch [new file with mode: 0755]
linux-next-cherry-picks/0047-i40iw-Assign-MSS-only-when-it-is-a-new-MTU.patch [new file with mode: 0755]
linux-next-cherry-picks/0048-i40iw-Fix-incorrect-check-for-error.patch [new file with mode: 0755]
linux-next-cherry-picks/0049-i40iw-Reorganize-structures-to-align-with-HW-capabil.patch [new file with mode: 0755]
linux-next-cherry-picks/0051-i40iw-Set-128B-as-the-only-supported-RQ-WQE-size.patch [new file with mode: 0755]

diff --git a/linux-next-cherry-picks/0011-i40iw_cm-Remove-deprecated-create_singlethread_workq.patch b/linux-next-cherry-picks/0011-i40iw_cm-Remove-deprecated-create_singlethread_workq.patch
new file mode 100755 (executable)
index 0000000..9e234ba
--- /dev/null
@@ -0,0 +1,48 @@
+From 5e9ff9b0bf0a20e87c75506604a1233f6c37f335 Mon Sep 17 00:00:00 2001
+From: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
+Date: Mon, 15 Aug 2016 23:40:53 +0530
+Subject: [PATCH 11/52] i40iw_cm: Remove deprecated
+ create_singlethread_workqueue
+
+alloc_ordered_workqueue() with WQ_MEM_RECLAIM set, replaces
+deprecated create_singlethread_workqueue(). This is the identity
+conversion.
+
+The workqueue "event_wq" is involved in event handling and queues
+i40iw_cm_event_handler.
+
+The workqueue "disconn_wq" is involved in closing connection and queues
+i40iw_disconnect_worker.
+
+Both workqueues have been identity converted.
+
+WQ_MEM_RECLAIM has been set to ensure forward progress under memory
+pressure.
+
+Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 5026dc7..c490f8d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3166,8 +3166,11 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+       spin_lock_init(&cm_core->ht_lock);
+       spin_lock_init(&cm_core->listen_list_lock);
+-      cm_core->event_wq = create_singlethread_workqueue("iwewq");
+-      cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
++      cm_core->event_wq = alloc_ordered_workqueue("iwewq",
++                                                  WQ_MEM_RECLAIM);
++
++      cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
++                                                    WQ_MEM_RECLAIM);
+ }
+ /**
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0012-IB-i40iw-Remove-debug-prints-after-allocation-failur.patch b/linux-next-cherry-picks/0012-IB-i40iw-Remove-debug-prints-after-allocation-failur.patch
new file mode 100755 (executable)
index 0000000..a0391de
--- /dev/null
@@ -0,0 +1,69 @@
+From 315b41480bb956b223a04c6556f04c9b7c74c8c2 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leon@kernel.org>
+Date: Thu, 3 Nov 2016 16:44:17 +0200
+Subject: [PATCH 12/52] IB/i40iw: Remove debug prints after allocation failure
+
+The prints after [k|v][m|z|c]alloc() functions are not needed,
+because in case of failure, allocator will print their internal
+error prints anyway.
+
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    | 2 --
+ drivers/infiniband/hw/i40iw/i40iw_main.c  | 5 ++---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 1 -
+ 3 files changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 8563769..47d1bbc 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -1675,7 +1675,6 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+                                           "Allocating child listener %p\n",
+                                           child_listen_node);
+                               if (!child_listen_node) {
+-                                      i40iw_pr_err("listener memory allocation\n");
+                                       ret = I40IW_ERR_NO_MEMORY;
+                                       goto exit;
+                               }
+@@ -1751,7 +1750,6 @@ static enum i40iw_status_code i40iw_add_mqh_4(
+                                           "Allocating child listener %p\n",
+                                           child_listen_node);
+                               if (!child_listen_node) {
+-                                      i40iw_pr_err("listener memory allocation\n");
+                                       in_dev_put(idev);
+                                       ret = I40IW_ERR_NO_MEMORY;
+                                       goto exit;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index ac2f3cd..a6ad913 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1301,10 +1301,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+       size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+                               (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
+       iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+-      if (!iwdev->hmc_info_mem) {
+-              i40iw_pr_err("memory alloc fail\n");
++      if (!iwdev->hmc_info_mem)
+               return I40IW_ERR_NO_MEMORY;
+-      }
++
+       iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
+       dev->hmc_info = &iwdev->hw.hmc;
+       dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 6329c97..62e068b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -2654,7 +2654,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
+       iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
+       if (!iwibdev->ibdev.iwcm) {
+               ib_dealloc_device(&iwibdev->ibdev);
+-              i40iw_pr_err("iwcm == NULL\n");
+               return NULL;
+       }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0013-i40iw-Add-Quality-of-Service-support.patch b/linux-next-cherry-picks/0013-i40iw-Add-Quality-of-Service-support.patch
new file mode 100755 (executable)
index 0000000..cf5c5d3
--- /dev/null
@@ -0,0 +1,805 @@
+From 0fc2dc58896f182daeeb4a7b5fc8d763afec3117 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Mon, 10 Oct 2016 21:12:10 -0500
+Subject: [PATCH 13/52] i40iw: Add Quality of Service support
+
+Add support for QoS on QPs. Upon device initialization,
+a map is created from user priority to queue set
+handles. On QP creation, use ToS to look up the queue
+set handle for use with the QP.
+
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h       |   9 ++
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    |  30 +++++-
+ drivers/infiniband/hw/i40iw/i40iw_cm.h    |   2 +-
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c  | 151 +++++++++++++++++++++++++++++-
+ drivers/infiniband/hw/i40iw/i40iw_d.h     |   2 +
+ drivers/infiniband/hw/i40iw/i40iw_hw.c    |  25 ++---
+ drivers/infiniband/hw/i40iw/i40iw_main.c  |  66 +++++++++++--
+ drivers/infiniband/hw/i40iw/i40iw_osdep.h |   2 +
+ drivers/infiniband/hw/i40iw/i40iw_p.h     |   2 +
+ drivers/infiniband/hw/i40iw/i40iw_puda.c  |   3 +-
+ drivers/infiniband/hw/i40iw/i40iw_type.h  |  18 +++-
+ drivers/infiniband/hw/i40iw/i40iw_utils.c |  45 +++++++++
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c |   6 +-
+ 13 files changed, 325 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 8ec09e4..4a0c12b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -210,6 +210,12 @@ struct i40iw_msix_vector {
+       u32 ceq_id;
+ };
++struct l2params_work {
++      struct work_struct work;
++      struct i40iw_device *iwdev;
++      struct i40iw_l2params l2params;
++};
++
+ #define I40IW_MSIX_TABLE_SIZE   65
+ struct virtchnl_work {
+@@ -514,6 +520,9 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+                       struct i40iw_modify_qp_info *info, bool wait);
++void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
++                           struct i40iw_sc_qp *qp,
++                           bool suspend);
+ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+                                         struct i40iw_cm_info *cminfo,
+                                         enum i40iw_quad_entry_type etype,
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 8563769..24b22e9 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -221,6 +221,7 @@ static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
+       memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+       cm_info->loc_port = cm_node->loc_port;
+       cm_info->rem_port = cm_node->rem_port;
++      cm_info->user_pri = cm_node->user_pri;
+ }
+ /**
+@@ -396,6 +397,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+       u32 opts_len = 0;
+       u32 pd_len = 0;
+       u32 hdr_len = 0;
++      u16 vtag;
+       sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+       if (!sqbuf)
+@@ -445,7 +447,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+               ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+               if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+-                      ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
++                      vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
++                      ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+               } else {
+@@ -474,7 +477,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+               ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+               if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+-                      ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
++                      vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
++                      ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+               } else {
+                       ethh->h_proto = htons(ETH_P_IPV6);
+@@ -1880,6 +1884,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
+                       nfo.loc_port = listener->loc_port;
+                       nfo.ipv4 = listener->ipv4;
+                       nfo.vlan_id = listener->vlan_id;
++                      nfo.user_pri = listener->user_pri;
+                       if (!list_empty(&listener->child_listen_list)) {
+                               i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
+@@ -2138,6 +2143,11 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
+       /* set our node specific transport info */
+       cm_node->ipv4 = cm_info->ipv4;
+       cm_node->vlan_id = cm_info->vlan_id;
++      if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
++              cm_node->vlan_id = 0;
++      cm_node->user_pri = cm_info->user_pri;
++      if (listener)
++              cm_node->user_pri = listener->user_pri;
+       memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+       memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+       cm_node->loc_port = cm_info->loc_port;
+@@ -3055,6 +3065,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+       struct vlan_ethhdr *ethh;
++      u16 vtag;
+       /* if vlan, then maclen = 18 else 14 */
+       iph = (struct iphdr *)rbuf->iph;
+@@ -3068,7 +3079,9 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+       ethh = (struct vlan_ethhdr *)rbuf->mem.va;
+       if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+-              cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
++              vtag = ntohs(ethh->h_vlan_TCI);
++              cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
++              cm_info.vlan_id = vtag & VLAN_VID_MASK;
+               i40iw_debug(cm_core->dev,
+                           I40IW_DEBUG_CM,
+                           "%s vlan_id=%d\n",
+@@ -3309,6 +3322,8 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+       ctx_info->tcp_info_valid = true;
+       ctx_info->iwarp_info_valid = true;
++      ctx_info->add_to_qoslist = true;
++      ctx_info->user_pri = cm_node->user_pri;
+       i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
+       if (cm_node->snd_mark_en) {
+@@ -3326,6 +3341,7 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+       /* once tcp_info is set, no need to do it again */
+       ctx_info->tcp_info_valid = false;
+       ctx_info->iwarp_info_valid = false;
++      ctx_info->add_to_qoslist = false;
+ }
+ /**
+@@ -3759,6 +3775,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+               i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+       }
+       cm_info.cm_id = cm_id;
++      cm_info.user_pri = rt_tos2priority(cm_id->tos);
++      i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
++                  __func__, cm_id->tos, cm_info.user_pri);
+       if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+           (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+                                    raddr6->sin6_addr.in6_u.u6_addr32,
+@@ -3904,6 +3923,11 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+       cm_id->provider_data = cm_listen_node;
++      cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
++      cm_info.user_pri = cm_listen_node->user_pri;
++      i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
++                  __func__, cm_id->tos, cm_listen_node->user_pri);
++
+       if (!cm_listen_node->reused_node) {
+               if (wildcard) {
+                       if (cm_info.ipv4)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+index e9046d9..945ed26 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+@@ -368,7 +368,7 @@ struct i40iw_cm_info {
+       u32 rem_addr[4];
+       u16 vlan_id;
+       int backlog;
+-      u16 user_pri;
++      u8 user_pri;
+       bool ipv4;
+ };
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 2c4b4d0..31c4a0c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -223,6 +223,133 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+ }
+ /**
++ * i40iw_fill_qos_list - Change all unknown qs handles to available ones
++ * @qs_list: list of qs_handles to be fixed with valid qs_handles
++ */
++static void i40iw_fill_qos_list(u16 *qs_list)
++{
++      u16 qshandle = qs_list[0];
++      int i;
++
++      for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
++              if (qs_list[i] == QS_HANDLE_UNKNOWN)
++                      qs_list[i] = qshandle;
++              else
++                      qshandle = qs_list[i];
++      }
++}
++
++/**
++ * i40iw_qp_from_entry - Given entry, get to the qp structure
++ * @entry: Points to list of qp structure
++ */
++static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
++{
++      if (!entry)
++              return NULL;
++
++      return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
++}
++
++/**
++ * i40iw_get_qp - get the next qp from the list given current qp
++ * @head: Listhead of qp's
++ * @qp: current qp
++ */
++static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
++{
++      struct list_head *entry = NULL;
++      struct list_head *lastentry;
++
++      if (list_empty(head))
++              return NULL;
++
++      if (!qp) {
++              entry = head->next;
++      } else {
++              lastentry = &qp->list;
++              entry = (lastentry != head) ? lastentry->next : NULL;
++      }
++
++      return i40iw_qp_from_entry(entry);
++}
++
++/**
++ * i40iw_change_l2params - given the new l2 parameters, change all qp
++ * @dev: IWARP device pointer
++ * @l2params: New paramaters from l2
++ */
++void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2params)
++{
++      struct i40iw_sc_qp *qp = NULL;
++      bool qs_handle_change = false;
++      bool mss_change = false;
++      unsigned long flags;
++      u16 qs_handle;
++      int i;
++
++      if (dev->mss != l2params->mss) {
++              mss_change = true;
++              dev->mss = l2params->mss;
++      }
++
++      i40iw_fill_qos_list(l2params->qs_handle_list);
++      for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
++              qs_handle = l2params->qs_handle_list[i];
++              if (dev->qos[i].qs_handle != qs_handle)
++                      qs_handle_change = true;
++              else if (!mss_change)
++                      continue;       /* no MSS nor qs handle change */
++              spin_lock_irqsave(&dev->qos[i].lock, flags);
++              qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
++              while (qp) {
++                      if (mss_change)
++                              i40iw_qp_mss_modify(dev, qp);
++                      if (qs_handle_change) {
++                              qp->qs_handle = qs_handle;
++                              /* issue cqp suspend command */
++                              i40iw_qp_suspend_resume(dev, qp, true);
++                      }
++                      qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
++              }
++              spin_unlock_irqrestore(&dev->qos[i].lock, flags);
++              dev->qos[i].qs_handle = qs_handle;
++      }
++}
++
++/**
++ * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
++ * @dev: IWARP device pointer
++ * @qp: qp to be removed from qos
++ */
++static void i40iw_qp_rem_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++{
++      unsigned long flags;
++
++      if (!qp->on_qoslist)
++              return;
++      spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
++      list_del(&qp->list);
++      spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
++}
++
++/**
++ * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
++ * @dev: IWARP device pointer
++ * @qp: qp to be added to qos
++ */
++void i40iw_qp_add_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
++      qp->qs_handle = dev->qos[qp->user_pri].qs_handle;
++      list_add(&qp->list, &dev->qos[qp->user_pri].qplist);
++      qp->on_qoslist = true;
++      spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
++}
++
++/**
+  * i40iw_sc_pd_init - initialize sc pd struct
+  * @dev: sc device struct
+  * @pd: sc pd ptr
+@@ -1082,7 +1209,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+                             LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+                             LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+       }
+-      qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
++      qw2 = LS_64(cqp->dev->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+       if (info->vlan_valid)
+               qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
+       set_64bit_val(wqe, 16, qw2);
+@@ -2151,7 +2278,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+       qp->rq_tph_en = info->rq_tph_en;
+       qp->rcv_tph_en = info->rcv_tph_en;
+       qp->xmit_tph_en = info->xmit_tph_en;
+-      qp->qs_handle = qp->pd->dev->qs_handle;
++      qp->qs_handle = qp->pd->dev->qos[qp->user_pri].qs_handle;
+       qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
+       return 0;
+@@ -2296,6 +2423,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
++      i40iw_qp_rem_qos(qp->pd->dev, qp);
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+@@ -2447,6 +2575,12 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
+       iw = info->iwarp_info;
+       tcp = info->tcp_info;
++      if (info->add_to_qoslist) {
++              qp->user_pri = info->user_pri;
++              i40iw_qp_add_qos(qp->pd->dev, qp);
++              i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
++                          __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
++      }
+       qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
+             LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
+             LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
+@@ -3959,7 +4093,7 @@ enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+                                            struct cqp_commands_info *pcmdinfo)
+ {
+       enum i40iw_status_code status = 0;
+-      unsigned long   flags;
++      unsigned long flags;
+       spin_lock_irqsave(&dev->cqp_lock, flags);
+       if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
+@@ -3978,7 +4112,7 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
+ {
+       enum i40iw_status_code status = 0;
+       struct cqp_commands_info *pcmdinfo;
+-      unsigned long   flags;
++      unsigned long flags;
+       spin_lock_irqsave(&dev->cqp_lock, flags);
+       while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
+@@ -4742,6 +4876,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+       u16 hmc_fcn = 0;
+       enum i40iw_status_code ret_code = 0;
+       u8 db_size;
++      int i;
+       spin_lock_init(&dev->cqp_lock);
+       INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
+@@ -4757,7 +4892,13 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+               return ret_code;
+       }
+       dev->hmc_fn_id = info->hmc_fn_id;
+-      dev->qs_handle = info->qs_handle;
++      i40iw_fill_qos_list(info->l2params.qs_handle_list);
++      for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
++              dev->qos[i].qs_handle = info->l2params.qs_handle_list[i];
++              i40iw_debug(dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, dev->qos[i].qs_handle);
++              spin_lock_init(&dev->qos[i].lock);
++              INIT_LIST_HEAD(&dev->qos[i].qplist);
++      }
+       dev->exception_lan_queue = info->exception_lan_queue;
+       dev->is_pf = info->is_pf;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
+index 2fac1db..e184c0e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
+@@ -74,6 +74,8 @@
+ #define RS_32_1(val, bits)      (u32)(val >> bits)
+ #define I40E_HI_DWORD(x)        ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
++#define QS_HANDLE_UNKNOWN       0xffff
++
+ #define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
+ #define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index 0c92a40..b94727f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -359,6 +359,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+                               continue;
+                       i40iw_cm_disconn(iwqp);
+                       break;
++              case I40IW_AE_QP_SUSPEND_COMPLETE:
++                      i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
++                      break;
+               case I40IW_AE_TERMINATE_SENT:
+                       i40iw_terminate_send_fin(qp);
+                       break;
+@@ -404,19 +407,18 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+               case I40IW_AE_LCE_CQ_CATASTROPHIC:
+               case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+               case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+-              case I40IW_AE_QP_SUSPEND_COMPLETE:
+                       ctx_info->err_rq_idx_valid = false;
+               default:
+-                              if (!info->sq && ctx_info->err_rq_idx_valid) {
+-                                      ctx_info->err_rq_idx = info->wqe_idx;
+-                                      ctx_info->tcp_info_valid = false;
+-                                      ctx_info->iwarp_info_valid = false;
+-                                      ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+-                                                                           iwqp->host_ctx.va,
+-                                                                           ctx_info);
+-                              }
+-                              i40iw_terminate_connection(qp, info);
+-                              break;
++                      if (!info->sq && ctx_info->err_rq_idx_valid) {
++                              ctx_info->err_rq_idx = info->wqe_idx;
++                              ctx_info->tcp_info_valid = false;
++                              ctx_info->iwarp_info_valid = false;
++                              ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
++                                                                   iwqp->host_ctx.va,
++                                                                   ctx_info);
++                      }
++                      i40iw_terminate_connection(qp, info);
++                      break;
+               }
+               if (info->qp)
+                       i40iw_rem_ref(&iwqp->ibqp);
+@@ -560,6 +562,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+       }
+       info->ipv4_valid = cminfo->ipv4;
++      info->user_pri = cminfo->user_pri;
+       ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+       info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+       info->dest_port = cpu_to_le16(cminfo->loc_port);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index ac2f3cd..40aac87 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -939,7 +939,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+       info.rq_size = 8192;
+       info.buf_size = 1024;
+       info.tx_buf_cnt = 16384;
+-      info.mss = iwdev->mss;
++      info.mss = iwdev->sc_dev.mss;
+       info.receive = i40iw_receive_ilq;
+       info.xmit_complete = i40iw_free_sqbuf;
+       status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+@@ -967,7 +967,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+       info.sq_size = 8192;
+       info.rq_size = 8192;
+       info.buf_size = 2048;
+-      info.mss = iwdev->mss;
++      info.mss = iwdev->sc_dev.mss;
+       info.tx_buf_cnt = 16384;
+       status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+       if (status)
+@@ -1296,6 +1296,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+       struct i40iw_device_init_info info;
+       struct i40iw_dma_mem mem;
+       u32 size;
++      u16 last_qset = I40IW_NO_QSET;
++      u16 qset;
++      u32 i;
+       memset(&info, 0, sizeof(info));
+       size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+@@ -1325,7 +1328,16 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+       info.bar0 = ldev->hw_addr;
+       info.hw = &iwdev->hw;
+       info.debug_mask = debug;
+-      info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
++      info.l2params.mss =
++              (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
++      for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
++              qset = ldev->params.qos.prio_qos[i].qs_handle;
++              info.l2params.qs_handle_list[i] = qset;
++              if (last_qset == I40IW_NO_QSET)
++                      last_qset = qset;
++              else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
++                      iwdev->dcb = true;
++      }
+       info.exception_lan_queue = 1;
+       info.vchnl_send = i40iw_virtchnl_send;
+       status = i40iw_device_init(&iwdev->sc_dev, &info);
+@@ -1416,6 +1428,8 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       i40iw_pr_info("state = %d\n", iwdev->init_state);
++      if (iwdev->param_wq)
++              destroy_workqueue(iwdev->param_wq);
+       switch (iwdev->init_state) {
+       case RDMA_DEV_REGISTERED:
+@@ -1630,6 +1644,9 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+               iwdev->init_state = RDMA_DEV_REGISTERED;
+               iwdev->iw_status = 1;
+               i40iw_port_ibevent(iwdev);
++              iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
++              if(iwdev->param_wq == NULL)
++                      break;
+               i40iw_pr_info("i40iw_open completed\n");
+               return 0;
+       } while (0);
+@@ -1640,25 +1657,58 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+ }
+ /**
+- * i40iw_l2param_change : handle qs handles for qos and mss change
++ * i40iw_l2params_worker - worker for l2 params change
++ * @work: work pointer for l2 params
++ */
++static void i40iw_l2params_worker(struct work_struct *work)
++{
++      struct l2params_work *dwork =
++          container_of(work, struct l2params_work, work);
++      struct i40iw_device *iwdev = dwork->iwdev;
++
++      i40iw_change_l2params(&iwdev->sc_dev, &dwork->l2params);
++      atomic_dec(&iwdev->params_busy);
++      kfree(work);
++}
++
++/**
++ * i40iw_l2param_change - handle qs handles for qos and mss change
+  * @ldev: lan device information
+  * @client: client for paramater change
+  * @params: new parameters from L2
+  */
+-static void i40iw_l2param_change(struct i40e_info *ldev,
+-                               struct i40e_client *client,
++static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
+                                struct i40e_params *params)
+ {
+       struct i40iw_handler *hdl;
++      struct i40iw_l2params *l2params;
++      struct l2params_work *work;
+       struct i40iw_device *iwdev;
++      int i;
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+       iwdev = &hdl->device;
+-      if (params->mtu)
+-              iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
++
++      if (atomic_read(&iwdev->params_busy))
++              return;
++
++
++      work = kzalloc(sizeof(*work), GFP_ATOMIC);
++      if (!work)
++              return;
++
++      atomic_inc(&iwdev->params_busy);
++
++      work->iwdev = iwdev;
++      l2params = &work->l2params;
++      for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
++              l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
++
++      INIT_WORK(&work->work, i40iw_l2params_worker);
++      queue_work(iwdev->param_wq, &work->work);
+ }
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+index 80f422b..a6b18cd 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+@@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
+ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+                           struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
+ void *i40iw_remove_head(struct list_head *list);
++void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
++void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+ void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
+ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
+index a0b8ca1..c9e8cb8 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
+@@ -65,6 +65,8 @@ enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_f
+                                          u32 *vf_cnt_array);
+ /* cqp misc functions */
++void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2params);
++void i40iw_qp_add_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+ void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index c62d354..7541b0d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -608,7 +608,8 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+               ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+                                                   I40E_VFPE_WQEALLOC1);
+-      qp->qs_handle = qp->dev->qs_handle;
++      qp->user_pri = 0;
++      i40iw_qp_add_qos(rsrc->dev, qp);
+       i40iw_puda_qp_setctx(rsrc);
+       ret = i40iw_puda_qp_wqe(rsrc);
+       if (ret)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index 2b1a04e..b6f448a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -397,6 +397,9 @@ struct i40iw_sc_qp {
+       bool virtual_map;
+       bool flush_sq;
+       bool flush_rq;
++      u8 user_pri;
++      struct list_head list;
++      bool on_qoslist;
+       bool sq_flush;
+       enum i40iw_flush_opcode flush_code;
+       enum i40iw_term_eventtypes eventtype;
+@@ -424,6 +427,12 @@ struct i40iw_vchnl_vf_msg_buffer {
+       char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
+ };
++struct i40iw_qos {
++      struct list_head qplist;
++      spinlock_t lock;        /* qos list */
++      u16 qs_handle;
++};
++
+ struct i40iw_vfdev {
+       struct i40iw_sc_dev *pf_dev;
+       u8 *hmc_info_mem;
+@@ -482,7 +491,8 @@ struct i40iw_sc_dev {
+       const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+       struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+-      u16 qs_handle;
++      struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
++      u16 mss;
+       u32 debug_mask;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+@@ -564,7 +574,7 @@ struct i40iw_device_init_info {
+       struct i40iw_hw *hw;
+       void __iomem *bar0;
+       enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
+-      u16 qs_handle;
++      struct i40iw_l2params l2params;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+       bool is_pf;
+@@ -722,6 +732,8 @@ struct i40iw_qp_host_ctx_info {
+       bool iwarp_info_valid;
+       bool err_rq_idx_valid;
+       u16 err_rq_idx;
++      bool add_to_qoslist;
++      u8 user_pri;
+ };
+ struct i40iw_aeqe_info {
+@@ -886,7 +898,7 @@ struct i40iw_qhash_table_info {
+       bool ipv4_valid;
+       u8 mac_addr[6];
+       u16 vlan_id;
+-      u16 qs_handle;
++      u8 user_pri;
+       u32 qp_num;
+       u32 dest_ip[4];
+       u32 src_ip[4];
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 6fd043b..cd98902 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -712,6 +712,51 @@ enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+ }
+ /**
++ * i40iw_qp_suspend_resume - cqp command for suspend/resume
++ * @dev: hardware control device structure
++ * @qp: hardware control qp
++ * @suspend: flag if suspend or resume
++ */
++void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++      struct i40iw_cqp_request *cqp_request;
++      struct i40iw_sc_cqp *cqp = dev->cqp;
++      struct cqp_commands_info *cqp_info;
++      enum i40iw_status_code status;
++
++      cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
++      if (!cqp_request)
++              return;
++
++      cqp_info = &cqp_request->info;
++      cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
++      cqp_info->in.u.suspend_resume.cqp = cqp;
++      cqp_info->in.u.suspend_resume.qp = qp;
++      cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
++      status = i40iw_handle_cqp_op(iwdev, cqp_request);
++      if (status)
++              i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
++}
++
++/**
++ * i40iw_qp_mss_modify - modify mss for qp
++ * @dev: hardware control device structure
++ * @qp: hardware control qp
++ */
++void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++      struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
++      struct i40iw_modify_qp_info info;
++
++      memset(&info, 0, sizeof(info));
++      info.mss_change = true;
++      info.new_mss = dev->mss;
++      i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
++}
++
++/**
+  * i40iw_term_modify_qp - modify qp for term message
+  * @qp: hardware control qp
+  * @next_state: qp's next state
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 6329c97..56e1c2c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -254,7 +254,6 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
+ {
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+-      struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+       if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
+@@ -270,7 +269,7 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
+       cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+       cqp_info->post_sq = 1;
+-      cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
++      cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
+       cqp_info->in.u.manage_push_page.info.free_page = 0;
+       cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+@@ -292,7 +291,6 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
+ {
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+-      struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+       if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
+@@ -307,7 +305,7 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+-      cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
++      cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
+       cqp_info->in.u.manage_push_page.info.free_page = 1;
+       cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0014-i40iw-Enable-message-packing.patch b/linux-next-cherry-picks/0014-i40iw-Enable-message-packing.patch
new file mode 100755 (executable)
index 0000000..2d59e04
--- /dev/null
@@ -0,0 +1,73 @@
+From d62d563424e3da0c0a1176f38c0d49c7ad91fbc1 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 19 Oct 2016 15:32:53 -0500
+Subject: [PATCH 14/52] i40iw: Enable message packing
+
+Remove the parameter to disable message packing and
+always enable it.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 5 -----
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 2 +-
+ drivers/infiniband/hw/i40iw/i40iw_type.h | 2 +-
+ 3 files changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 31c4a0c..6c6a1ef 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -429,12 +429,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
+ /**
+  * i40iw_sc_cqp_create - create cqp during bringup
+  * @cqp: struct for cqp hw
+- * @disable_pfpdus: if pfpdu to be disabled
+  * @maj_err: If error, major err number
+  * @min_err: If error, minor err number
+  */
+ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
+-                                                bool disable_pfpdus,
+                                                 u16 *maj_err,
+                                                 u16 *min_err)
+ {
+@@ -453,9 +451,6 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
+       temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
+              LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
+-      if (disable_pfpdus)
+-              temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
+-
+       set_64bit_val(cqp->host_ctx, 0, temp);
+       set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+       temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 40aac87..e6abdaf 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -603,7 +603,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
+               i40iw_pr_err("cqp init status %d\n", status);
+               goto exit;
+       }
+-      status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
++      status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
+       if (status) {
+               i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
+                            status, maj_err, min_err);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index b6f448a..d1847e6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -988,7 +988,7 @@ struct i40iw_cqp_query_fpm_values {
+ struct i40iw_cqp_ops {
+       enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
+                                          struct i40iw_cqp_init_info *);
+-      enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
++      enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
+       void (*cqp_post_sq)(struct i40iw_sc_cqp *);
+       u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
+       enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0015-i40iw-Remove-workaround-for-pre-production-errata.patch b/linux-next-cherry-picks/0015-i40iw-Remove-workaround-for-pre-production-errata.patch
new file mode 100755 (executable)
index 0000000..13e2c0a
--- /dev/null
@@ -0,0 +1,109 @@
+From 7581e96ca4de26da7237d507ac9cf519753e1787 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 19 Oct 2016 15:33:32 -0500
+Subject: [PATCH 15/52] i40iw: Remove workaround for pre-production errata
+
+Pre-production silicon incorrectly truncates 4 bytes of the MPA
+packet in UDP loopback case. Remove the workaround as it is no
+longer necessary.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    | 26 +++-----------------------
+ drivers/infiniband/hw/i40iw/i40iw_cm.h    |  2 --
+ drivers/infiniband/hw/i40iw/i40iw_utils.c |  2 +-
+ 3 files changed, 4 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 24b22e9..9e447b5 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -361,15 +361,6 @@ static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
+       spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ }
+-static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
+-{
+-      if ((cm_node->rem_mac[0] == 0x0) &&
+-          (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
+-           ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
+-              return true;
+-      return false;
+-}
+-
+ /**
+  * i40iw_form_cm_frame - get a free packet and build frame
+  * @cm_node: connection's node ionfo to use in frame
+@@ -410,11 +401,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+       if (hdr)
+               hdr_len = hdr->size;
+-      if (pdata) {
++      if (pdata)
+               pd_len = pdata->size;
+-              if (!is_remote_ne020_or_chelsio(cm_node))
+-                      pd_len += MPA_ZERO_PAD_LEN;
+-      }
+       if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+               eth_hlen += 4;
+@@ -3587,7 +3575,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       iwqp->cm_node = (void *)cm_node;
+       cm_node->iwqp = iwqp;
+-      buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
++      buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
+       status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
+@@ -3621,18 +3609,10 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+               iwqp->lsmm_mr = ibmr;
+               if (iwqp->page)
+                       iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+-              if (is_remote_ne020_or_chelsio(cm_node))
+-                      dev->iw_priv_qp_ops->qp_send_lsmm(
+-                                                      &iwqp->sc_qp,
++              dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
+                                                       iwqp->ietf_mem.va,
+                                                       (accept.size + conn_param->private_data_len),
+                                                       ibmr->lkey);
+-              else
+-                      dev->iw_priv_qp_ops->qp_send_lsmm(
+-                                                      &iwqp->sc_qp,
+-                                                      iwqp->ietf_mem.va,
+-                                                      (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
+-                                                      ibmr->lkey);
+       } else {
+               if (iwqp->page)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+index 945ed26..24615c2 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+@@ -56,8 +56,6 @@
+ #define I40IW_MAX_IETF_SIZE      32
+-#define MPA_ZERO_PAD_LEN      4
+-
+ /* IETF RTR MSG Fields               */
+ #define IETF_PEER_TO_PEER       0x8000
+ #define IETF_FLPDU_ZERO_LEN     0x4000
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index cd98902..4e880e8 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -1253,7 +1253,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
+       buf->totallen = pkt_len + buf->maclen;
+-      if (info->payload_len < buf->totallen - 4) {
++      if (info->payload_len < buf->totallen) {
+               i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
+                            info->payload_len, buf->totallen);
+               return I40IW_ERR_INVALID_SIZE;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0016-i40iw-Set-MAX-IRD-MAX-ORD-size-to-max-supported-valu.patch b/linux-next-cherry-picks/0016-i40iw-Set-MAX-IRD-MAX-ORD-size-to-max-supported-valu.patch
new file mode 100755 (executable)
index 0000000..6f335c6
--- /dev/null
@@ -0,0 +1,52 @@
+From 7cba2cc13e12c824ad7e414b3834dc3df05fbf46 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 19 Oct 2016 15:33:58 -0500
+Subject: [PATCH 16/52] i40iw: Set MAX IRD, MAX ORD size to max supported value
+
+Set the MAX_IRD and MAX_ORD size negotiated to the maximum
+supported values.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 5 -----
+ drivers/infiniband/hw/i40iw/i40iw_user.h | 5 ++---
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 6c6a1ef..6bf2a19 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -2621,11 +2621,6 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
+                             152,
+                             LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
+-              /*
+-              * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
+-              *advertisable IRD of 64
+-              */
+-              iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
+               set_64bit_val(qp_ctx,
+                             160,
+                             LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
+index 276bcef..e65c2baa 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
+@@ -72,10 +72,9 @@ enum i40iw_device_capabilities_const {
+       I40IW_MAX_SQ_PAYLOAD_SIZE =             2145386496,
+       I40IW_MAX_INLINE_DATA_SIZE =            48,
+       I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE =   48,
+-      I40IW_MAX_IRD_SIZE =                    32,
+-      I40IW_QPCTX_ENCD_MAXIRD =               3,
++      I40IW_MAX_IRD_SIZE =                    63,
++      I40IW_MAX_ORD_SIZE =                    127,
+       I40IW_MAX_WQ_ENTRIES =                  2048,
+-      I40IW_MAX_ORD_SIZE =                    32,
+       I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
+       I40IW_QP_CTX_SIZE =                     248
+ };
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0017-i40iw-Convert-page_size-to-encoded-value.patch b/linux-next-cherry-picks/0017-i40iw-Convert-page_size-to-encoded-value.patch
new file mode 100755 (executable)
index 0000000..507beef
--- /dev/null
@@ -0,0 +1,98 @@
+From 68583ca2a183c6368f4c333fa989685fba7cf325 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Sat, 19 Nov 2016 20:26:25 -0600
+Subject: [PATCH 17/52] i40iw: Convert page_size to encoded value
+
+Passed in page_size was used as encoded value for writing
+the WQE and passed in value was usually 4096. This was
+working out since bit 0 was 0 and implies 4KB pages,
+but would not work for other page sizes.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 12 +++++++++---
+ drivers/infiniband/hw/i40iw/i40iw_type.h |  5 +++++
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 6bf2a19..8417452 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -2747,7 +2747,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
++      enum i40iw_page_size page_size;
++      page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+@@ -2767,7 +2769,7 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
+                LS_64(1, I40IW_CQPSQ_STAG_MR) |
+                LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+                LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+-               LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
++               LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+                LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+                LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+                LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+@@ -2803,7 +2805,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
+       u32 pble_obj_cnt;
+       bool remote_access;
+       u8 addr_type;
++      enum i40iw_page_size page_size;
++      page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
+       if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+                                  I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+               remote_access = true;
+@@ -2846,7 +2850,7 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
+       header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_STAG_MR) |
+                LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+-               LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
++               LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+                LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+                LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+                LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+@@ -3061,7 +3065,9 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
+       u64 temp, header;
+       u64 *wqe;
+       u32 wqe_idx;
++      enum i40iw_page_size page_size;
++      page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
+       wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
+                                        0, info->wr_id);
+       if (!wqe)
+@@ -3088,7 +3094,7 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
+                LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
+                LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
+                LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
+-               LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
++               LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
+                LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
+                LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
+                LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index d1847e6..928d91b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -74,6 +74,11 @@ struct i40iw_cq_shadow_area {
+ struct i40iw_priv_cq_ops;
+ struct i40iw_hmc_ops;
++enum i40iw_page_size {
++      I40IW_PAGE_SIZE_4K,
++      I40IW_PAGE_SIZE_2M
++};
++
+ enum i40iw_resource_indicator_type {
+       I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
+       I40IW_RSRC_INDICATOR_TYPE_CQ,
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0018-i40iw-Use-vector-when-creating-CQs.patch b/linux-next-cherry-picks/0018-i40iw-Use-vector-when-creating-CQs.patch
new file mode 100755 (executable)
index 0000000..3325ac3
--- /dev/null
@@ -0,0 +1,88 @@
+From e69c5093617afdbd2ab02c289d0adaac044dff66 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:24:48 -0600
+Subject: [PATCH 18/52] i40iw: Use vector when creating CQs
+
+Assign each CEQ vector to a different CPU when possible, then
+when creating a CQ, use the vector for the CEQ id. This
+allows completion work to be distributed over multiple cores.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c  | 8 +++++++-
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 5 +++--
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index e6abdaf..ed24831 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -270,6 +270,7 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
+               i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
+       else
+               i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
++      irq_set_affinity_hint(msix_vec->irq, NULL);
+       free_irq(msix_vec->irq, dev_id);
+ }
+@@ -688,6 +689,7 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
+                                                        struct i40iw_msix_vector *msix_vec)
+ {
+       enum i40iw_status_code status;
++      cpumask_t mask;
+       if (iwdev->msix_shared && !ceq_id) {
+               tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+@@ -697,12 +699,15 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
+               status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
+       }
++      cpumask_clear(&mask);
++      cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
++      irq_set_affinity_hint(msix_vec->irq, &mask);
++
+       if (status) {
+               i40iw_pr_err("ceq irq config fail\n");
+               return I40IW_ERR_CONFIG;
+       }
+       msix_vec->ceq_id = ceq_id;
+-      msix_vec->cpu_affinity = 0;
+       return 0;
+ }
+@@ -1396,6 +1401,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+       for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
+               iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
+               iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
++              iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
+               if (i == 0) {
+                       iw_qvinfo->aeq_idx = 0;
+                       if (iwdev->msix_shared)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 56e1c2c..aacaa0f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -1135,7 +1135,8 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+       ukinfo->cq_id = cq_num;
+       iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+       info.ceqe_mask = 0;
+-      info.ceq_id = 0;
++      if (attr->comp_vector < iwdev->ceqs_count)
++              info.ceq_id = attr->comp_vector;
+       info.ceq_id_valid = true;
+       info.ceqe_mask = 1;
+       info.type = I40IW_CQ_TYPE_IWARP;
+@@ -2619,7 +2620,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
+           (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+           (1ull << IB_USER_VERBS_CMD_POST_SEND);
+       iwibdev->ibdev.phys_port_cnt = 1;
+-      iwibdev->ibdev.num_comp_vectors = 1;
++      iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
+       iwibdev->ibdev.dma_device = &pcidev->dev;
+       iwibdev->ibdev.dev.parent = &pcidev->dev;
+       iwibdev->ibdev.query_port = i40iw_query_port;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0019-i40iw-Correct-values-for-max_recv_sge-max_send_sge.patch b/linux-next-cherry-picks/0019-i40iw-Correct-values-for-max_recv_sge-max_send_sge.patch
new file mode 100755 (executable)
index 0000000..b50fad2
--- /dev/null
@@ -0,0 +1,47 @@
+From 01d0b36798732d826fbf84de8961a09b3a2fbf3f Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:26:39 -0600
+Subject: [PATCH 19/52] i40iw: Correct values for max_recv_sge, max_send_sge
+
+When creating QPs, ensure init_attr->cap.max_recv_sge
+is clipped to MAX_FRAG_COUNT.
+
+Expose MAX_FRAG_COUNT for max_recv_sge and max_send_sge in
+i40iw_query_qp().
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Reviewed-By: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index aacaa0f..6b516d6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -608,6 +608,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+       if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
+               init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
++      if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
++              init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
++
+       memset(&init_info, 0, sizeof(init_info));
+       sq_size = init_attr->cap.max_send_wr;
+@@ -813,8 +816,9 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
+       attr->qp_access_flags = 0;
+       attr->cap.max_send_wr = qp->qp_uk.sq_size;
+       attr->cap.max_recv_wr = qp->qp_uk.rq_size;
+-      attr->cap.max_recv_sge = 1;
+       attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
++      attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
++      attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+       init_attr->event_handler = iwqp->ibqp.event_handler;
+       init_attr->qp_context = iwqp->ibqp.qp_context;
+       init_attr->send_cq = iwqp->ibqp.send_cq;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0020-i40iw-Fix-for-LAN-handler-removal.patch b/linux-next-cherry-picks/0020-i40iw-Fix-for-LAN-handler-removal.patch
new file mode 100755 (executable)
index 0000000..1de34bd
--- /dev/null
@@ -0,0 +1,65 @@
+From c38d7e0d08421a53cea4e09b76b3453d499fbd67 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:27:02 -0600
+Subject: [PATCH 20/52] i40iw: Fix for LAN handler removal
+
+If i40iw_open() fails for any reason, the LAN handler
+is not being removed. Modify i40iw_deinit_device()
+to always remove the handler.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index ed24831..db9fd31 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1422,12 +1422,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+  * i40iw_deinit_device - clean up the device resources
+  * @iwdev: iwarp device
+  * @reset: true if called before reset
+- * @del_hdl: true if delete hdl entry
+  *
+  * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
+  * destroy the device queues and free the pble and the hmc objects
+  */
+-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
++static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+ {
+       struct i40e_info *ldev = iwdev->ldev;
+@@ -1492,8 +1491,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
+               break;
+       }
+-      if (del_hdl)
+-              i40iw_del_handler(i40iw_find_i40e_handler(ldev));
++      i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+       kfree(iwdev->hdl);
+ }
+@@ -1658,7 +1656,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+       } while (0);
+       i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
+-      i40iw_deinit_device(iwdev, false, false);
++      i40iw_deinit_device(iwdev, false);
+       return -ERESTART;
+ }
+@@ -1736,7 +1734,7 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
+       iwdev = &hdl->device;
+       destroy_workqueue(iwdev->virtchnl_wq);
+-      i40iw_deinit_device(iwdev, reset, true);
++      i40iw_deinit_device(iwdev, reset);
+ }
+ /**
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0021-i40iw-Optimize-inline-data-copy.patch b/linux-next-cherry-picks/0021-i40iw-Optimize-inline-data-copy.patch
new file mode 100755 (executable)
index 0000000..f9e40e2
--- /dev/null
@@ -0,0 +1,78 @@
+From e7f9774af591d346990f1d6dfca0ee9caeb52756 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:28:02 -0600
+Subject: [PATCH 21/52] i40iw: Optimize inline data copy
+
+Use memcpy for inline data copy in sends
+and writes instead of byte by byte copy.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 4d28c3c..47cb2e0 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -430,7 +430,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+       struct i40iw_inline_rdma_write *op_info;
+       u64 *push;
+       u64 header = 0;
+-      u32 i, wqe_idx;
++      u32 wqe_idx;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+@@ -465,14 +465,12 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+       src = (u8 *)(op_info->data);
+       if (op_info->len <= 16) {
+-              for (i = 0; i < op_info->len; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, op_info->len);
+       } else {
+-              for (i = 0; i < 16; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, 16);
++              src += 16;
+               dest = (u8 *)wqe + 32;
+-              for (; i < op_info->len; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, op_info->len - 16);
+       }
+       wmb(); /* make sure WQE is populated before valid bit is set */
+@@ -507,7 +505,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+       u8 *dest, *src;
+       struct i40iw_post_inline_send *op_info;
+       u64 header;
+-      u32 wqe_idx, i;
++      u32 wqe_idx;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+@@ -540,14 +538,12 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+       src = (u8 *)(op_info->data);
+       if (op_info->len <= 16) {
+-              for (i = 0; i < op_info->len; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, op_info->len);
+       } else {
+-              for (i = 0; i < 16; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, 16);
++              src += 16;
+               dest = (u8 *)wqe + 32;
+-              for (; i < op_info->len; i++, src++, dest++)
+-                      *dest = *src;
++              memcpy(dest, src, op_info->len - 16);
+       }
+       wmb(); /* make sure WQE is populated before valid bit is set */
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0022-i40iw-Query-device-accounts-for-internal-rsrc.patch b/linux-next-cherry-picks/0022-i40iw-Query-device-accounts-for-internal-rsrc.patch
new file mode 100755 (executable)
index 0000000..e59f570
--- /dev/null
@@ -0,0 +1,120 @@
+From 85a87c90ee90217da1b05a77bbb47ebe31a2f124 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:30:28 -0600
+Subject: [PATCH 22/52] i40iw: Query device accounts for internal rsrc
+
+Some resources are consumed internally and not available to the user.
+After hw is initialized, figure out how many resources are consumed
+and subtract those numbers from the initial max device capability in
+i40iw_query_device().
+
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h       |  4 ++++
+ drivers/infiniband/hw/i40iw/i40iw_hw.c    |  2 +-
+ drivers/infiniband/hw/i40iw/i40iw_main.c  | 15 +++++++++++++++
+ drivers/infiniband/hw/i40iw/i40iw_user.h  |  3 ++-
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c |  8 ++++----
+ 5 files changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 4a0c12b..dac9a6b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -303,6 +303,10 @@ struct i40iw_device {
+       u32 mr_stagmask;
+       u32 mpa_version;
+       bool dcb;
++      u32 used_pds;
++      u32 used_cqs;
++      u32 used_mrs;
++      u32 used_qps;
+ };
+ struct i40iw_ib_device {
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index b94727f..5e2c16c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -62,7 +62,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
+       max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
+       arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
+       iwdev->max_cqe = 0xFFFFF;
+-      num_pds = max_qp * 4;
++      num_pds = I40IW_MAX_PDS;
+       resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index db9fd31..9d3b9ee 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1558,6 +1558,20 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+ }
+ /**
++ * i40iw_get_used_rsrc - determine resources used internally
++ * @iwdev: iwarp device
++ *
++ * Called after internal allocations
++ */
++static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
++{
++      iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
++      iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
++      iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
++      iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
++}
++
++/**
+  * i40iw_open - client interface operation open for iwarp/uda device
+  * @ldev: lan device information
+  * @client: iwarp client information, provided during registration
+@@ -1629,6 +1643,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+               status = i40iw_initialize_hw_resources(iwdev);
+               if (status)
+                       break;
++              i40iw_get_used_rsrc(iwdev);
+               dev->ccq_ops->ccq_arm(dev->ccq);
+               status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+               if (status)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
+index e65c2baa..66263fc 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
+@@ -76,7 +76,8 @@ enum i40iw_device_capabilities_const {
+       I40IW_MAX_ORD_SIZE =                    127,
+       I40IW_MAX_WQ_ENTRIES =                  2048,
+       I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
+-      I40IW_QP_CTX_SIZE =                     248
++      I40IW_QP_CTX_SIZE =                     248,
++      I40IW_MAX_PDS =                         32768
+ };
+ #define i40iw_handle void *
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 6b516d6..d20ee11 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -67,13 +67,13 @@ static int i40iw_query_device(struct ib_device *ibdev,
+       props->vendor_part_id = iwdev->ldev->pcidev->device;
+       props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
+       props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
+-      props->max_qp = iwdev->max_qp;
++      props->max_qp = iwdev->max_qp - iwdev->used_qps;
+       props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+       props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+-      props->max_cq = iwdev->max_cq;
++      props->max_cq = iwdev->max_cq - iwdev->used_cqs;
+       props->max_cqe = iwdev->max_cqe;
+-      props->max_mr = iwdev->max_mr;
+-      props->max_pd = iwdev->max_pd;
++      props->max_mr = iwdev->max_mr - iwdev->used_mrs;
++      props->max_pd = iwdev->max_pd - iwdev->used_pds;
+       props->max_sge_rd = I40IW_MAX_SGE_RD;
+       props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
+       props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0023-i40iw-Remove-checks-for-more-than-48-bytes-inline-da.patch b/linux-next-cherry-picks/0023-i40iw-Remove-checks-for-more-than-48-bytes-inline-da.patch
new file mode 100755 (executable)
index 0000000..78839fd
--- /dev/null
@@ -0,0 +1,39 @@
+From 799749979dbf41a878a00abdae00cf484c21a5b2 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:32:03 -0600
+Subject: [PATCH 23/52] i40iw: Remove checks for more than 48 bytes inline data
+
+Remove dead code, which isn't executed because we
+return error if the data size is greater than 48 bytes.
+
+Inline data size greater than 48 bytes isn't supported
+and the maximum WQE size is 64 bytes.
+
+Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 47cb2e0..5d9c3bf 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -1186,12 +1186,8 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+       if (data_size <= 16)
+               *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+-      else if (data_size <= 48)
+-              *wqe_size = 64;
+-      else if (data_size <= 80)
+-              *wqe_size = 96;
+       else
+-              *wqe_size = 128;
++              *wqe_size = 64;
+       return 0;
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0024-i40iw-Remove-NULL-check-for-cm_node-iwdev.patch b/linux-next-cherry-picks/0024-i40iw-Remove-NULL-check-for-cm_node-iwdev.patch
new file mode 100755 (executable)
index 0000000..bc291ff
--- /dev/null
@@ -0,0 +1,32 @@
+From 1ad19f739f494eda2f8e9611ab6c3056244b70fc Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:32:25 -0600
+Subject: [PATCH 24/52] i40iw: Remove NULL check for cm_node->iwdev
+
+It is not necessary to check cm_node->iwdev in
+i40iw_rem_ref_cm_node() as it can never be NULL after
+a successful call out of i40iw_make_cm_node().
+
+Signed-off-by: Chien Tin Tung <chien.tin.tung@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 9e447b5..cbd77eb 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2234,7 +2234,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+               i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+       } else {
+               if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
+-                  cm_node->apbvt_set && cm_node->iwdev) {
++                  cm_node->apbvt_set) {
+                       i40iw_manage_apbvt(cm_node->iwdev,
+                                          cm_node->loc_port,
+                                          I40IW_MANAGE_APBVT_DEL);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0025-i40iw-Use-actual-page-size.patch b/linux-next-cherry-picks/0025-i40iw-Use-actual-page-size.patch
new file mode 100755 (executable)
index 0000000..3c1c743
--- /dev/null
@@ -0,0 +1,49 @@
+From e67791858e7b8d1389833386cb2dd0ca30d06862 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:33:32 -0600
+Subject: [PATCH 25/52] i40iw: Use actual page size
+
+In i40iw_post_send, use the actual page size instead of
+encoded page size. This is to be consistent with the
+rest of the file.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index d20ee11..dcf08b8 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -2145,7 +2145,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
+               case IB_WR_REG_MR:
+               {
+                       struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
+-                      int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
+                       int flags = reg_wr(ib_wr)->access;
+                       struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
+                       struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
+@@ -2156,6 +2155,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
+                       info.access_rights |= i40iw_get_user_access(flags);
+                       info.stag_key = reg_wr(ib_wr)->key & 0xff;
+                       info.stag_idx = reg_wr(ib_wr)->key >> 8;
++                      info.page_size = reg_wr(ib_wr)->mr->page_size;
+                       info.wr_id = ib_wr->wr_id;
+                       info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
+@@ -2169,9 +2169,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
+                       if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+                               info.chunk_size = 1;
+-                      if (page_shift == 21)
+-                              info.page_size = 1; /* 2M page */
+-
+                       ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
+                       if (ret)
+                               err = -ENOMEM;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0026-i40iw-Use-runtime-check-for-IS_ENABLED-CONFIG_IPV6.patch b/linux-next-cherry-picks/0026-i40iw-Use-runtime-check-for-IS_ENABLED-CONFIG_IPV6.patch
new file mode 100755 (executable)
index 0000000..5d630d8
--- /dev/null
@@ -0,0 +1,42 @@
+From 5ebcb0ff54e594668e506583fa7344d101e3d05e Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:34:02 -0600
+Subject: [PATCH 26/52] i40iw: Use runtime check for IS_ENABLED(CONFIG_IPV6)
+
+To be consistent, use the runtime check instead of
+conditional compile.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index cbd77eb..b60e346 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -1583,9 +1583,10 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
+ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+ {
+       struct net_device *ip_dev = NULL;
+-#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr laddr6;
++      if (!IS_ENABLED(CONFIG_IPV6))
++              return NULL;
+       i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+       if (vlan_id)
+               *vlan_id = I40IW_NO_VLAN;
+@@ -1602,7 +1603,6 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
+               }
+       }
+       rcu_read_unlock();
+-#endif
+       return ip_dev;
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0027-i40iw-Remove-check-on-return-from-device_init_pestat.patch b/linux-next-cherry-picks/0027-i40iw-Remove-check-on-return-from-device_init_pestat.patch
new file mode 100755 (executable)
index 0000000..25a6358
--- /dev/null
@@ -0,0 +1,80 @@
+From dfd9c43b3ce97e1b66a6dc1d9dcc95db9a27cc4b Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 21:42:26 -0600
+Subject: [PATCH 27/52] i40iw: Remove check on return from device_init_pestat()
+
+Remove unnecessary check for return code from
+device_init_pestat() and change func to void.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c     | 10 ++--------
+ drivers/infiniband/hw/i40iw/i40iw_p.h        |  2 +-
+ drivers/infiniband/hw/i40iw/i40iw_virtchnl.c |  6 +-----
+ 3 files changed, 4 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 8417452..5dde358 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -4853,10 +4853,9 @@ static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+  * i40iw_device_init_pestat - Initialize the pestat structure
+  * @dev: pestat struct
+  */
+-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
++void i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
+ {
+       devstat->ops = iw_device_pestat_ops;
+-      return 0;
+ }
+ /**
+@@ -4881,12 +4880,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+       dev->debug_mask = info->debug_mask;
+-      ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
+-      if (ret_code) {
+-              i40iw_debug(dev, I40IW_DEBUG_DEV,
+-                          "%s: i40iw_device_init_pestat failed\n", __func__);
+-              return ret_code;
+-      }
++      i40iw_device_init_pestat(&dev->dev_pestat);
+       dev->hmc_fn_id = info->hmc_fn_id;
+       i40iw_fill_qos_list(info->l2params.qs_handle_list);
+       for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
+index c9e8cb8..2a4bd32 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
+@@ -47,7 +47,7 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+                                        struct i40iw_device_init_info *info);
+-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
++void i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat);
+ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+index 3041003..dbd39c4 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+@@ -496,11 +496,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "VF%u error CQP HMC Function operation.\n",
+                                           vf_id);
+-                      ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
+-                      if (ret_code)
+-                              i40iw_debug(dev, I40IW_DEBUG_VIRT,
+-                                          "VF%u - i40iw_device_init_pestat failed\n",
+-                                          vf_id);
++                      i40iw_device_init_pestat(&vf_dev->dev_pestat);
+                       vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
+                                                             (u8)vf_dev->pmf_index,
+                                                             dev->hw, false);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0028-i40iw-Remove-variable-flush_code-and-check-to-set-qp.patch b/linux-next-cherry-picks/0028-i40iw-Remove-variable-flush_code-and-check-to-set-qp.patch
new file mode 100755 (executable)
index 0000000..7b0371e
--- /dev/null
@@ -0,0 +1,43 @@
+From 78e945aace5b9aaf19404799cd29b4d155806053 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 9 Nov 2016 22:20:31 -0600
+Subject: [PATCH 28/52] i40iw: Remove variable flush_code and check to set
+ qp->sq_flush
+
+The flush_code variable in i40iw_bld_terminate_hdr() is obsolete and
+the check to set qp->sq_flush is unreachable. Currently flush code is
+populated in setup_term_hdr() and both SQ and RQ are flushed always
+as part of the tear down flow.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 5dde358..a135037 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -4185,7 +4185,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
+       u16 ddp_seg_len;
+       int copy_len = 0;
+       u8 is_tagged = 0;
+-      enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
+       u32 opcode;
+       struct i40iw_terminate_hdr *termhdr;
+@@ -4358,9 +4357,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
+       if (copy_len)
+               memcpy(termhdr + 1, pkt, copy_len);
+-      if (flush_code && !info->in_rdrsp_wr)
+-              qp->sq_flush = (info->sq) ? true : false;
+-
+       return sizeof(struct i40iw_terminate_hdr) + copy_len;
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0029-i40iw-Fix-incorrect-assignment-of-SQ-head.patch b/linux-next-cherry-picks/0029-i40iw-Fix-incorrect-assignment-of-SQ-head.patch
new file mode 100755 (executable)
index 0000000..ec574dd
--- /dev/null
@@ -0,0 +1,43 @@
+From d4165e3abdf16707602c10f0d678d4d564a87e35 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Tue, 22 Nov 2016 09:44:20 -0600
+Subject: [PATCH 29/52] i40iw: Fix incorrect assignment of SQ head
+
+The SQ head is incorrectly incremented when the number
+of WQEs required is greater than the number available.
+The fix is to use the I40IW_RING_MOV_HEAD_BY_COUNT
+macro. This checks for the SQ full condition first and
+only if SQ has room for the request, then we move the
+head appropriately.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 5d9c3bf..4376cd6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -175,12 +175,10 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
+               if (!*wqe_idx)
+                       qp->swqe_polarity = !qp->swqe_polarity;
+       }
+-
+-      for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
+-              I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+-              if (ret_code)
+-                      return NULL;
+-      }
++      I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
++                                    wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
++      if (ret_code)
++              return NULL;
+       wqe = qp->sq_base[*wqe_idx].elem;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0030-i40iw-Utilize-physically-mapped-memory-regions.patch b/linux-next-cherry-picks/0030-i40iw-Utilize-physically-mapped-memory-regions.patch
new file mode 100755 (executable)
index 0000000..7f3ef38
--- /dev/null
@@ -0,0 +1,224 @@
+From b6a529da69ce880ee4d0f3730ef46ead7f7cd0d3 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 30 Nov 2016 14:56:14 -0600
+Subject: [PATCH 30/52] i40iw: Utilize physically mapped memory regions
+
+Add support to use physically mapped WQ's and MR's if determined
+that the OS registered user-memory for the region is physically
+contiguous. This feature will eliminate the need for unnecessarily
+setting up and using PBL's when not required.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 102 +++++++++++++++++++++++++++---
+ 1 file changed, 93 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index dcf08b8..43bae5b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -1356,10 +1356,62 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+ }
+ /**
++ * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
++ * @arr: lvl1 pbl array
++ * @npages: page count
++ * pg_size: page size
++ *
++ */
++static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
++{
++      u32 pg_idx;
++
++      for (pg_idx = 0; pg_idx < npages; pg_idx++) {
++              if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
++                      return false;
++      }
++      return true;
++}
++
++/**
++ * i40iw_check_mr_contiguous - check if MR is physically contiguous
++ * @palloc: pbl allocation struct
++ * pg_size: page size
++ */
++static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
++{
++      struct i40iw_pble_level2 *lvl2 = &palloc->level2;
++      struct i40iw_pble_info *leaf = lvl2->leaf;
++      u64 *arr = NULL;
++      u64 *start_addr = NULL;
++      int i;
++      bool ret;
++
++      if (palloc->level == I40IW_LEVEL_1) {
++              arr = (u64 *)palloc->level1.addr;
++              ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
++              return ret;
++      }
++
++      start_addr = (u64 *)leaf->addr;
++
++      for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
++              arr = (u64 *)leaf->addr;
++              if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
++                      return false;
++              ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
++              if (!ret)
++                      return false;
++      }
++
++      return true;
++}
++
++/**
+  * i40iw_setup_pbles - copy user pg address to pble's
+  * @iwdev: iwarp device
+  * @iwmr: mr pointer for this memory registration
+- * @use_pbles: flag if to use pble's or memory (level 0)
++ * @use_pbles: flag if to use pble's
+  */
+ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+                            struct i40iw_mr *iwmr,
+@@ -1372,9 +1424,6 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+       enum i40iw_status_code status;
+       enum i40iw_pble_level level = I40IW_LEVEL_1;
+-      if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
+-              return -ENOMEM;
+-
+       if (use_pbles) {
+               mutex_lock(&iwdev->pbl_mutex);
+               status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
+@@ -1391,6 +1440,10 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+       }
+       i40iw_copy_user_pgaddrs(iwmr, pbl, level);
++
++      if (use_pbles)
++              iwmr->pgaddrmem[0] = *pbl;
++
+       return 0;
+ }
+@@ -1412,14 +1465,18 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+       struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
+       struct i40iw_hmc_pble *hmc_p;
+       u64 *arr = iwmr->pgaddrmem;
++      u32 pg_size;
+       int err;
+       int total;
++      bool ret = true;
+       total = req->sq_pages + req->rq_pages + req->cq_pages;
++      pg_size = iwmr->region->page_size;
+       err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+       if (err)
+               return err;
++
+       if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
+               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+               iwpbl->pbl_allocated = false;
+@@ -1428,26 +1485,44 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+       if (use_pbles)
+               arr = (u64 *)palloc->level1.addr;
+-      if (req->reg_type == IW_MEMREG_TYPE_QP) {
++
++      if (iwmr->type == IW_MEMREG_TYPE_QP) {
+               hmc_p = &qpmr->sq_pbl;
+               qpmr->shadow = (dma_addr_t)arr[total];
++
+               if (use_pbles) {
++                      ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
++                      if (ret)
++                              ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
++              }
++
++              if (!ret) {
+                       hmc_p->idx = palloc->level1.idx;
+                       hmc_p = &qpmr->rq_pbl;
+                       hmc_p->idx = palloc->level1.idx + req->sq_pages;
+               } else {
+                       hmc_p->addr = arr[0];
+                       hmc_p = &qpmr->rq_pbl;
+-                      hmc_p->addr = arr[1];
++                      hmc_p->addr = arr[req->sq_pages];
+               }
+       } else {                /* CQ */
+               hmc_p = &cqmr->cq_pbl;
+               cqmr->shadow = (dma_addr_t)arr[total];
++
+               if (use_pbles)
++                      ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
++
++              if (!ret)
+                       hmc_p->idx = palloc->level1.idx;
+               else
+                       hmc_p->addr = arr[0];
+       }
++
++      if (use_pbles && ret) {
++              i40iw_free_pble(iwdev->pble_rsrc, palloc);
++              iwpbl->pbl_allocated = false;
++      }
++
+       return err;
+ }
+@@ -1646,7 +1721,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
+       stag_info->pd_id = iwpd->sc_pd.pd_id;
+       stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+-      if (iwmr->page_cnt > 1) {
++      if (iwpbl->pbl_allocated) {
+               if (palloc->level == I40IW_LEVEL_1) {
+                       stag_info->first_pm_pbl_index = palloc->level1.idx;
+                       stag_info->chunk_size = 1;
+@@ -1702,6 +1777,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+       bool use_pbles = false;
+       unsigned long flags;
+       int err = -ENOSYS;
++      int ret;
+       if (length > I40IW_MAX_MR_SIZE)
+               return ERR_PTR(-EINVAL);
+@@ -1758,13 +1834,21 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+               spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+               break;
+       case IW_MEMREG_TYPE_MEM:
++              use_pbles = (iwmr->page_cnt != 1);
+               access = I40IW_ACCESS_FLAGS_LOCALREAD;
+-              use_pbles = (iwmr->page_cnt != 1);
+               err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+               if (err)
+                       goto error;
++              if (use_pbles) {
++                      ret = i40iw_check_mr_contiguous(palloc, region->page_size);
++                      if (ret) {
++                              i40iw_free_pble(iwdev->pble_rsrc, palloc);
++                              iwpbl->pbl_allocated = false;
++                      }
++              }
++
+               access |= i40iw_get_user_access(acc);
+               stag = i40iw_create_stag(iwdev);
+               if (!stag) {
+@@ -1792,7 +1876,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+       return &iwmr->ibmr;
+ error:
+-      if (palloc->level != I40IW_LEVEL_0)
++      if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
+               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+       ib_umem_release(region);
+       kfree(iwmr);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0031-i40iw-Add-2MB-page-support.patch b/linux-next-cherry-picks/0031-i40iw-Add-2MB-page-support.patch
new file mode 100755 (executable)
index 0000000..021c027
--- /dev/null
@@ -0,0 +1,173 @@
+From f26c7c83395b72f30d111f4e3adb3437c0a30b77 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 30 Nov 2016 14:57:40 -0600
+Subject: [PATCH 31/52] i40iw: Add 2MB page support
+
+Add support to allow each independent memory region to
+be configured for 2MB page size in addition to 4KB
+page size.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 59 +++++++++++++++++++++++++------
+ drivers/infiniband/hw/i40iw/i40iw_verbs.h |  2 ++
+ 2 files changed, 51 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 43bae5b..1c2f0a1 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -37,6 +37,7 @@
+ #include <linux/random.h>
+ #include <linux/highmem.h>
+ #include <linux/time.h>
++#include <linux/hugetlb.h>
+ #include <asm/byteorder.h>
+ #include <net/ip.h>
+ #include <rdma/ib_verbs.h>
+@@ -1305,13 +1306,11 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
+ /**
+  * i40iw_next_pbl_addr - Get next pbl address
+- * @palloc: Poiner to allocated pbles
+  * @pbl: pointer to a pble
+  * @pinfo: info pointer
+  * @idx: index
+  */
+-static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
+-                                     u64 *pbl,
++static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
+                                      struct i40iw_pble_info **pinfo,
+                                      u32 *idx)
+ {
+@@ -1339,9 +1338,11 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       struct i40iw_pble_info *pinfo;
+       struct scatterlist *sg;
++      u64 pg_addr = 0;
+       u32 idx = 0;
+       pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
++
+       pg_shift = ffs(region->page_size) - 1;
+       for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+               chunk_pages = sg_dma_len(sg) >> pg_shift;
+@@ -1349,8 +1350,35 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+                   !iwpbl->qp_mr.sq_page)
+                       iwpbl->qp_mr.sq_page = sg_page(sg);
+               for (i = 0; i < chunk_pages; i++) {
+-                      *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
+-                      pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
++                      pg_addr = sg_dma_address(sg) + region->page_size * i;
++
++                      if ((entry + i) == 0)
++                              *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
++                      else if (!(pg_addr & ~iwmr->page_msk))
++                              *pbl = cpu_to_le64(pg_addr);
++                      else
++                              continue;
++                      pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
++              }
++      }
++}
++
++/**
++ * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
++ * @addr: virtual address
++ * @iwmr: mr pointer for this memory registration
++ */
++static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
++{
++      struct vm_area_struct *vma;
++      struct hstate *h;
++
++      vma = find_vma(current->mm, addr);
++      if (vma && is_vm_hugetlb_page(vma)) {
++              h = hstate_vma(vma);
++              if (huge_page_size(h) == 0x200000) {
++                      iwmr->page_size = huge_page_size(h);
++                      iwmr->page_msk = huge_page_mask(h);
+               }
+       }
+ }
+@@ -1471,7 +1499,7 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+       bool ret = true;
+       total = req->sq_pages + req->rq_pages + req->cq_pages;
+-      pg_size = iwmr->region->page_size;
++      pg_size = iwmr->page_size;
+       err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+       if (err)
+@@ -1720,6 +1748,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
+       stag_info->access_rights = access;
+       stag_info->pd_id = iwpd->sc_pd.pd_id;
+       stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
++      stag_info->page_size = iwmr->page_size;
+       if (iwpbl->pbl_allocated) {
+               if (palloc->level == I40IW_LEVEL_1) {
+@@ -1778,6 +1807,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+       unsigned long flags;
+       int err = -ENOSYS;
+       int ret;
++      int pg_shift;
+       if (length > I40IW_MAX_MR_SIZE)
+               return ERR_PTR(-EINVAL);
+@@ -1802,9 +1832,17 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+       iwmr->ibmr.pd = pd;
+       iwmr->ibmr.device = pd->device;
+       ucontext = to_ucontext(pd->uobject->context);
+-      region_length = region->length + (start & 0xfff);
+-      pbl_depth = region_length >> 12;
+-      pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
++
++      iwmr->page_size = region->page_size;
++      iwmr->page_msk = PAGE_MASK;
++
++      if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
++              i40iw_set_hugetlb_values(start, iwmr);
++
++      region_length = region->length + (start & (iwmr->page_size - 1));
++      pg_shift = ffs(iwmr->page_size) - 1;
++      pbl_depth = region_length >> pg_shift;
++      pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
+       iwmr->length = region->length;
+       iwpbl->user_base = virt;
+@@ -1842,7 +1880,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+                       goto error;
+               if (use_pbles) {
+-                      ret = i40iw_check_mr_contiguous(palloc, region->page_size);
++                      ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
+                       if (ret) {
+                               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+                               iwpbl->pbl_allocated = false;
+@@ -1865,6 +1903,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+                       i40iw_free_stag(iwdev, stag);
+                       goto error;
+               }
++
+               break;
+       default:
+               goto error;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+index 0069be8..6549c93 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+@@ -92,6 +92,8 @@ struct i40iw_mr {
+       struct ib_umem *region;
+       u16 type;
+       u32 page_cnt;
++      u32 page_size;
++      u64 page_msk;
+       u32 npages;
+       u32 stag;
+       u64 length;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0032-i40iw-Add-missing-cleanup-on-device-close.patch b/linux-next-cherry-picks/0032-i40iw-Add-missing-cleanup-on-device-close.patch
new file mode 100755 (executable)
index 0000000..a8f053c
--- /dev/null
@@ -0,0 +1,290 @@
+From d59659340c61e777208524f77c268fe6edc6fe37 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Wed, 30 Nov 2016 14:59:26 -0600
+Subject: [PATCH 32/52] i40iw: Add missing cleanup on device close
+
+On i40iw device close, disconnect all connected QPs by moving
+them to error state; and block further QPs, PDs and CQs from
+being created. Additionally, make sure all resources have been
+freed before deallocating the ibdev as part of the device close.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h       |  5 +++++
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    | 31 +++++++++++++++++++++++++++++++
+ drivers/infiniband/hw/i40iw/i40iw_cm.h    |  2 ++
+ drivers/infiniband/hw/i40iw/i40iw_d.h     |  2 ++
+ drivers/infiniband/hw/i40iw/i40iw_main.c  |  4 ++++
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 21 +++++++++++++++++++++
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 25 +++++++++++++++++++++++++
+ 7 files changed, 90 insertions(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index dac9a6b..c795c61 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -303,10 +303,13 @@ struct i40iw_device {
+       u32 mr_stagmask;
+       u32 mpa_version;
+       bool dcb;
++      bool closing;
+       u32 used_pds;
+       u32 used_cqs;
+       u32 used_mrs;
+       u32 used_qps;
++      wait_queue_head_t close_wq;
++      atomic64_t use_count;
+ };
+ struct i40iw_ib_device {
+@@ -521,6 +524,8 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+ void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
++void i40iw_rem_devusecount(struct i40iw_device *iwdev);
++void i40iw_add_devusecount(struct i40iw_device *iwdev);
+ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+                       struct i40iw_modify_qp_info *info, bool wait);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index b60e346..11ef0b0 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -4128,3 +4128,34 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
+       queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+ }
++
++/**
++ * i40iw_cm_disconnect_all - disconnect all connected qp's
++ * @iwdev: device pointer
++ */
++void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
++{
++      struct i40iw_cm_core *cm_core = &iwdev->cm_core;
++      struct list_head *list_core_temp;
++      struct list_head *list_node;
++      struct i40iw_cm_node *cm_node;
++      unsigned long flags;
++      struct list_head connected_list;
++      struct ib_qp_attr attr;
++
++      INIT_LIST_HEAD(&connected_list);
++      spin_lock_irqsave(&cm_core->ht_lock, flags);
++      list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
++              cm_node = container_of(list_node, struct i40iw_cm_node, list);
++              atomic_inc(&cm_node->ref_count);
++              list_add(&cm_node->connected_entry, &connected_list);
++      }
++      spin_unlock_irqrestore(&cm_core->ht_lock, flags);
++
++      list_for_each_safe(list_node, list_core_temp, &connected_list) {
++              cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
++              attr.qp_state = IB_QPS_ERR;
++              i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
++              i40iw_rem_ref_cm_node(cm_node);
++      }
++}
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+index 24615c2..0381b7f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+@@ -339,6 +339,7 @@ struct i40iw_cm_node {
+       int accept_pend;
+       struct list_head timer_entry;
+       struct list_head reset_entry;
++      struct list_head connected_entry;
+       atomic_t passive_state;
+       bool qhash_set;
+       u8 user_pri;
+@@ -443,4 +444,5 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
+                   u8 *mac_addr,
+                   u32 action);
++void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+ #endif /* I40IW_CM_H */
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
+index e184c0e..1bd4bad 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
+@@ -35,6 +35,8 @@
+ #ifndef I40IW_D_H
+ #define I40IW_D_H
++#define I40IW_FIRST_USER_QP_ID  2
++
+ #define I40IW_DB_ADDR_OFFSET    (4 * 1024 * 1024 - 64 * 1024)
+ #define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 9d3b9ee..d86bb6e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1546,6 +1546,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+       init_waitqueue_head(&iwdev->vchnl_waitq);
+       init_waitqueue_head(&dev->vf_reqs);
++      init_waitqueue_head(&iwdev->close_wq);
+       status = i40iw_initialize_dev(iwdev, ldev);
+ exit:
+@@ -1748,6 +1749,9 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
+               return;
+       iwdev = &hdl->device;
++      iwdev->closing = true;
++
++      i40iw_cm_disconnect_all(iwdev);
+       destroy_workqueue(iwdev->virtchnl_wq);
+       i40iw_deinit_device(iwdev, reset);
+ }
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 4e880e8..5815128 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -392,6 +392,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+       i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+       i40iw_free_qp_resources(iwdev, iwqp, qp_num);
++      i40iw_rem_devusecount(iwdev);
+ }
+ /**
+@@ -459,6 +460,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ }
+ /**
++ * i40iw_add_devusecount - add dev refcount
++ * @iwdev: dev for refcount
++ */
++void i40iw_add_devusecount(struct i40iw_device *iwdev)
++{
++      atomic64_inc(&iwdev->use_count);
++}
++
++/**
++ * i40iw_rem_devusecount - decrement refcount for dev
++ * @iwdev: device
++ */
++void i40iw_rem_devusecount(struct i40iw_device *iwdev)
++{
++      if (!atomic64_dec_and_test(&iwdev->use_count))
++              return;
++      wake_up(&iwdev->close_wq);
++}
++
++/**
+  * i40iw_add_pdusecount - add pd refcount
+  * @iwpd: pd for refcount
+  */
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 1c2f0a1..bc24086 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -336,6 +336,9 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+       u32 pd_id = 0;
+       int err;
++      if (iwdev->closing)
++              return ERR_PTR(-ENODEV);
++
+       err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
+                                  iwdev->max_pd, &pd_id, &iwdev->next_pd);
+       if (err) {
+@@ -601,6 +604,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+       struct i40iwarp_offload_info *iwarp_info;
+       unsigned long flags;
++      if (iwdev->closing)
++              return ERR_PTR(-ENODEV);
++
+       if (init_attr->create_flags)
+               return ERR_PTR(-EINVAL);
+       if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
+@@ -776,6 +782,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+       iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+       iwdev->qp_table[qp_num] = iwqp;
+       i40iw_add_pdusecount(iwqp->iwpd);
++      i40iw_add_devusecount(iwdev);
+       if (ibpd->uobject && udata) {
+               memset(&uresp, 0, sizeof(uresp));
+               uresp.actual_sq_size = sq_size;
+@@ -887,6 +894,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+       spin_lock_irqsave(&iwqp->lock, flags);
+       if (attr_mask & IB_QP_STATE) {
++              if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
++                      err = -EINVAL;
++                      goto exit;
++              }
++
+               switch (attr->qp_state) {
+               case IB_QPS_INIT:
+               case IB_QPS_RTR:
+@@ -1086,6 +1098,7 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+       cq_wq_destroy(iwdev, cq);
+       cq_free_resources(iwdev, iwcq);
+       kfree(iwcq);
++      i40iw_rem_devusecount(iwdev);
+       return 0;
+ }
+@@ -1116,6 +1129,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+       int err_code;
+       int entries = attr->cqe;
++      if (iwdev->closing)
++              return ERR_PTR(-ENODEV);
++
+       if (entries > iwdev->max_cqe)
+               return ERR_PTR(-EINVAL);
+@@ -1233,6 +1249,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+               }
+       }
++      i40iw_add_devusecount(iwdev);
+       return (struct ib_cq *)iwcq;
+ cq_destroy:
+@@ -1270,6 +1287,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
+       stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+       i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
++      i40iw_rem_devusecount(iwdev);
+ }
+ /**
+@@ -1300,6 +1318,7 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
+               stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
+               stag |= driver_key;
+               stag += (u32)consumer_key;
++              i40iw_add_devusecount(iwdev);
+       }
+       return stag;
+ }
+@@ -1809,6 +1828,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+       int ret;
+       int pg_shift;
++      if (iwdev->closing)
++              return ERR_PTR(-ENODEV);
++
+       if (length > I40IW_MAX_MR_SIZE)
+               return ERR_PTR(-EINVAL);
+       region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+@@ -2842,6 +2864,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
+       i40iw_unregister_rdma_device(iwibdev);
+       kfree(iwibdev->ibdev.iwcm);
+       iwibdev->ibdev.iwcm = NULL;
++      wait_event_timeout(iwibdev->iwdev->close_wq,
++                         !atomic64_read(&iwibdev->iwdev->use_count),
++                         I40IW_EVENT_TIMEOUT);
+       ib_dealloc_device(&iwibdev->ibdev);
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0033-i40iw-Add-IP-addr-handling-on-netdev-events.patch b/linux-next-cherry-picks/0033-i40iw-Add-IP-addr-handling-on-netdev-events.patch
new file mode 100755 (executable)
index 0000000..8c820f0
--- /dev/null
@@ -0,0 +1,314 @@
+From e5e74b61b16503acbd914f673b783fa2a1532a64 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Wed, 30 Nov 2016 15:07:30 -0600
+Subject: [PATCH 33/52] i40iw: Add IP addr handling on netdev events
+
+Disable listeners and disconnect all connected QPs on
+a netdev interface down event. On an interface up event,
+the listeners are re-enabled.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    | 138 +++++++++++++++++++++++++++++-
+ drivers/infiniband/hw/i40iw/i40iw_cm.h    |   2 +
+ drivers/infiniband/hw/i40iw/i40iw_utils.c |  58 ++++---------
+ 3 files changed, 156 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 11ef0b0..93ae764 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -1556,9 +1556,15 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
+               memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+                      sizeof(cm_info->loc_addr));
+               cm_info->vlan_id = child_listen_node->vlan_id;
+-              ret = i40iw_manage_qhash(iwdev, cm_info,
+-                                       I40IW_QHASH_TYPE_TCP_SYN,
+-                                       I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
++              if (child_listen_node->qhash_set) {
++                      ret = i40iw_manage_qhash(iwdev, cm_info,
++                                               I40IW_QHASH_TYPE_TCP_SYN,
++                                               I40IW_QHASH_MANAGE_TYPE_DELETE,
++                                               NULL, false);
++                      child_listen_node->qhash_set = false;
++              } else {
++                      ret = I40IW_SUCCESS;
++              }
+               i40iw_debug(&iwdev->sc_dev,
+                           I40IW_DEBUG_CM,
+                           "freed pointer = %p\n",
+@@ -1687,6 +1693,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+                                                        I40IW_QHASH_MANAGE_TYPE_ADD,
+                                                        NULL, true);
+                               if (!ret) {
++                                      child_listen_node->qhash_set = true;
+                                       spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+                                       list_add(&child_listen_node->child_listen_list,
+                                                &cm_parent_listen_node->child_listen_list);
+@@ -1765,6 +1772,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
+                                                        NULL,
+                                                        true);
+                               if (!ret) {
++                                      child_listen_node->qhash_set = true;
+                                       spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+                                       list_add(&child_listen_node->child_listen_list,
+                                                &cm_parent_listen_node->child_listen_list);
+@@ -4130,6 +4138,73 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
+ }
+ /**
++ * i40iw_qhash_ctrl - enable/disable qhash for list
++ * @iwdev: device pointer
++ * @parent_listen_node: parent listen node
++ * @nfo: cm info node
++ * @ipaddr: Pointer to IPv4 or IPv6 address
++ * @ipv4: flag indicating IPv4 when true
++ * @ifup: flag indicating interface up when true
++ *
++ * Enables or disables the qhash for the node in the child
++ * listen list that matches ipaddr. If no matching IP was found
++ * it will allocate and add a new child listen node to the
++ * parent listen node. The listen_list_lock is assumed to be
++ * held when called.
++ */
++static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
++                           struct i40iw_cm_listener *parent_listen_node,
++                           struct i40iw_cm_info *nfo,
++                           u32 *ipaddr, bool ipv4, bool ifup)
++{
++      struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
++      struct i40iw_cm_listener *child_listen_node;
++      struct list_head *pos, *tpos;
++      enum i40iw_status_code ret;
++      bool node_allocated = false;
++      enum i40iw_quad_hash_manage_type op =
++              ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
++
++      list_for_each_safe(pos, tpos, child_listen_list) {
++              child_listen_node =
++                      list_entry(pos,
++                                 struct i40iw_cm_listener,
++                                 child_listen_list);
++              if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
++                      goto set_qhash;
++      }
++
++      /* if not found then add a child listener if interface is going up */
++      if (!ifup)
++              return;
++      child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
++      if (!child_listen_node)
++              return;
++      node_allocated = true;
++      memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
++
++      memcpy(child_listen_node->loc_addr, ipaddr,  ipv4 ? 4 : 16);
++
++set_qhash:
++      memcpy(nfo->loc_addr,
++             child_listen_node->loc_addr,
++             sizeof(nfo->loc_addr));
++      nfo->vlan_id = child_listen_node->vlan_id;
++      ret = i40iw_manage_qhash(iwdev, nfo,
++                               I40IW_QHASH_TYPE_TCP_SYN,
++                               op,
++                               NULL, false);
++      if (!ret) {
++              child_listen_node->qhash_set = ifup;
++              if (node_allocated)
++                      list_add(&child_listen_node->child_listen_list,
++                               &parent_listen_node->child_listen_list);
++      } else if (node_allocated) {
++              kfree(child_listen_node);
++      }
++}
++
++/**
+  * i40iw_cm_disconnect_all - disconnect all connected qp's
+  * @iwdev: device pointer
+  */
+@@ -4159,3 +4234,60 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+               i40iw_rem_ref_cm_node(cm_node);
+       }
+ }
++
++/**
++ * i40iw_ifdown_notify - process an ifdown on an interface
++ * @iwdev: device pointer
++ * @ipaddr: Pointer to IPv4 or IPv6 address
++ * @ipv4: flag indicating IPv4 when true
++ * @ifup: flag indicating interface up when true
++ */
++void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
++                   u32 *ipaddr, bool ipv4, bool ifup)
++{
++      struct i40iw_cm_core *cm_core = &iwdev->cm_core;
++      unsigned long flags;
++      struct i40iw_cm_listener *listen_node;
++      static const u32 ip_zero[4] = { 0, 0, 0, 0 };
++      struct i40iw_cm_info nfo;
++      u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
++      enum i40iw_status_code ret;
++      enum i40iw_quad_hash_manage_type op =
++              ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
++
++      /* Disable or enable qhash for listeners */
++      spin_lock_irqsave(&cm_core->listen_list_lock, flags);
++      list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
++              if (vlan_id == listen_node->vlan_id &&
++                  (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
++                  !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
++                      memcpy(nfo.loc_addr, listen_node->loc_addr,
++                             sizeof(nfo.loc_addr));
++                      nfo.loc_port = listen_node->loc_port;
++                      nfo.ipv4 = listen_node->ipv4;
++                      nfo.vlan_id = listen_node->vlan_id;
++                      nfo.user_pri = listen_node->user_pri;
++                      if (!list_empty(&listen_node->child_listen_list)) {
++                              i40iw_qhash_ctrl(iwdev,
++                                               listen_node,
++                                               &nfo,
++                                               ipaddr, ipv4, ifup);
++                      } else if (memcmp(listen_node->loc_addr, ip_zero,
++                                        ipv4 ? 4 : 16)) {
++                              ret = i40iw_manage_qhash(iwdev,
++                                                       &nfo,
++                                                       I40IW_QHASH_TYPE_TCP_SYN,
++                                                       op,
++                                                       NULL,
++                                                       false);
++                              if (!ret)
++                                      listen_node->qhash_set = ifup;
++                      }
++              }
++      }
++      spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
++
++      /* disconnect any connected qp's on ifdown */
++      if (!ifup)
++              i40iw_cm_disconnect_all(iwdev);
++}
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+index 0381b7f..49ed7a5 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+@@ -444,5 +444,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
+                   u8 *mac_addr,
+                   u32 action);
++void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
++                   u32 *ipaddr, bool ipv4, bool ifup);
+ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+ #endif /* I40IW_CM_H */
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 5815128..641f00f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -153,6 +153,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *hdl;
+       u32 local_ipaddr;
++      u32 action = I40IW_ARP_ADD;
+       hdl = i40iw_find_netdev(event_netdev);
+       if (!hdl)
+@@ -164,44 +165,25 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
+       if (netdev != event_netdev)
+               return NOTIFY_DONE;
++      if (upper_dev)
++              local_ipaddr = ntohl(
++                      ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
++      else
++              local_ipaddr = ntohl(ifa->ifa_address);
+       switch (event) {
+       case NETDEV_DOWN:
+-              if (upper_dev)
+-                      local_ipaddr = ntohl(
+-                              ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+-              else
+-                      local_ipaddr = ntohl(ifa->ifa_address);
+-              i40iw_manage_arp_cache(iwdev,
+-                                     netdev->dev_addr,
+-                                     &local_ipaddr,
+-                                     true,
+-                                     I40IW_ARP_DELETE);
+-              return NOTIFY_OK;
++              action = I40IW_ARP_DELETE;
++              /* Fall through */
+       case NETDEV_UP:
+-              if (upper_dev)
+-                      local_ipaddr = ntohl(
+-                              ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+-              else
+-                      local_ipaddr = ntohl(ifa->ifa_address);
+-              i40iw_manage_arp_cache(iwdev,
+-                                     netdev->dev_addr,
+-                                     &local_ipaddr,
+-                                     true,
+-                                     I40IW_ARP_ADD);
+-              break;
++              /* Fall through */
+       case NETDEV_CHANGEADDR:
+-              /* Add the address to the IP table */
+-              if (upper_dev)
+-                      local_ipaddr = ntohl(
+-                              ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+-              else
+-                      local_ipaddr = ntohl(ifa->ifa_address);
+-
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      &local_ipaddr,
+                                      true,
+-                                     I40IW_ARP_ADD);
++                                     action);
++              i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
++                              (action == I40IW_ARP_ADD) ? true : false);
+               break;
+       default:
+               break;
+@@ -225,6 +207,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *hdl;
+       u32 local_ipaddr6[4];
++      u32 action = I40IW_ARP_ADD;
+       hdl = i40iw_find_netdev(event_netdev);
+       if (!hdl)
+@@ -235,24 +218,21 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
+       if (netdev != event_netdev)
+               return NOTIFY_DONE;
++      i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+       switch (event) {
+       case NETDEV_DOWN:
+-              i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+-              i40iw_manage_arp_cache(iwdev,
+-                                     netdev->dev_addr,
+-                                     local_ipaddr6,
+-                                     false,
+-                                     I40IW_ARP_DELETE);
+-              return NOTIFY_OK;
++              action = I40IW_ARP_DELETE;
++              /* Fall through */
+       case NETDEV_UP:
+               /* Fall through */
+       case NETDEV_CHANGEADDR:
+-              i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      local_ipaddr6,
+                                      false,
+-                                     I40IW_ARP_ADD);
++                                     action);
++              i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
++                              (action == I40IW_ARP_ADD) ? true : false);
+               break;
+       default:
+               break;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0034-i40iw-Replace-list_for_each_entry-macro-with-safe-ve.patch b/linux-next-cherry-picks/0034-i40iw-Replace-list_for_each_entry-macro-with-safe-ve.patch
new file mode 100755 (executable)
index 0000000..4fd8e8f
--- /dev/null
@@ -0,0 +1,65 @@
+From a05e15135b67d71f30f70ab45dede4706f988439 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Wed, 30 Nov 2016 15:08:34 -0600
+Subject: [PATCH 34/52] i40iw: Replace list_for_each_entry macro with safe
+ version
+
+Use list_for_each_entry_safe macro for the IPv6 addr list
+as IPv6 addresses can be deleted while going through the
+list.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c   | 4 ++--
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 93ae764..9a14880 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -1644,7 +1644,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+ {
+       struct net_device *ip_dev;
+       struct inet6_dev *idev;
+-      struct inet6_ifaddr *ifp;
++      struct inet6_ifaddr *ifp, *tmp;
+       enum i40iw_status_code ret = 0;
+       struct i40iw_cm_listener *child_listen_node;
+       unsigned long flags;
+@@ -1659,7 +1659,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+                               i40iw_pr_err("idev == NULL\n");
+                               break;
+                       }
+-                      list_for_each_entry(ifp, &idev->addr_list, if_list) {
++                      list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "IP=%pI6, vlan_id=%d, MAC=%pM\n",
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index d86bb6e..4ce05b8 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1164,7 +1164,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
+ {
+       struct net_device *ip_dev;
+       struct inet6_dev *idev;
+-      struct inet6_ifaddr *ifp;
++      struct inet6_ifaddr *ifp, *tmp;
+       u32 local_ipaddr6[4];
+       rcu_read_lock();
+@@ -1177,7 +1177,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
+                               i40iw_pr_err("ipv6 inet device not found\n");
+                               break;
+                       }
+-                      list_for_each_entry(ifp, &idev->addr_list, if_list) {
++                      list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+                               i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
+                                             rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
+                               i40iw_copy_ip_ntohl(local_ipaddr6,
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0035-i40iw-Add-NULL-check-for-ibqp-event-handler.patch b/linux-next-cherry-picks/0035-i40iw-Add-NULL-check-for-ibqp-event-handler.patch
new file mode 100755 (executable)
index 0000000..74f04d9
--- /dev/null
@@ -0,0 +1,31 @@
+From e0b010da87e3aaf7ca9d28ba5d141924a7f8c66d Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Wed, 30 Nov 2016 15:09:07 -0600
+Subject: [PATCH 35/52] i40iw: Add NULL check for ibqp event handler
+
+Add NULL check for ibqp event handler before calling it to report
+QP events, as it might not initialized.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 9a14880..13b6dee 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3474,7 +3474,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+               /* Flush the queues */
+               i40iw_flush_wqes(iwdev, iwqp);
+-              if (qp->term_flags) {
++              if (qp->term_flags && iwqp->ibqp.event_handler) {
+                       ibevent.device = iwqp->ibqp.device;
+                       ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
+                                       IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0036-i40iw-Set-TOS-field-in-IP-header.patch b/linux-next-cherry-picks/0036-i40iw-Set-TOS-field-in-IP-header.patch
new file mode 100755 (executable)
index 0000000..bbd7466
--- /dev/null
@@ -0,0 +1,137 @@
+From 7eb2bde7f3900f044ab351e450adc41623ff2f5c Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Wed, 30 Nov 2016 15:09:34 -0600
+Subject: [PATCH 36/52] i40iw: Set TOS field in IP header
+
+Set the TOS field in IP header with the value passed in
+from application. If there is mismatch between the remote
+client's TOS and listener, set the listener Tos to the higher
+of the two values.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 25 +++++++++++++++++++------
+ drivers/infiniband/hw/i40iw/i40iw_cm.h |  3 +++
+ 2 files changed, 22 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 13b6dee..0c92037 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -445,7 +445,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+               iph->version = IPVERSION;
+               iph->ihl = 5;   /* 5 * 4Byte words, IP headr len */
+-              iph->tos = 0;
++              iph->tos = cm_node->tos;
+               iph->tot_len = htons(packetsize);
+               iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+@@ -472,7 +472,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+                       ethh->h_proto = htons(ETH_P_IPV6);
+               }
+               ip6h->version = 6;
+-              ip6h->flow_lbl[0] = 0;
++              ip6h->priority = cm_node->tos >> 4;
++              ip6h->flow_lbl[0] = cm_node->tos << 4;
+               ip6h->flow_lbl[1] = 0;
+               ip6h->flow_lbl[2] = 0;
+               ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
+@@ -2141,9 +2142,18 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
+       cm_node->vlan_id = cm_info->vlan_id;
+       if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
+               cm_node->vlan_id = 0;
++      cm_node->tos = cm_info->tos;
+       cm_node->user_pri = cm_info->user_pri;
+-      if (listener)
+-              cm_node->user_pri = listener->user_pri;
++      if (listener) {
++              if (listener->tos != cm_info->tos)
++                      i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
++                                  "application TOS[%d] and remote client TOS[%d] mismatch\n",
++                                   listener->tos, cm_info->tos);
++              cm_node->tos = max(listener->tos, cm_info->tos);
++              cm_node->user_pri = rt_tos2priority(cm_node->tos);
++              i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
++                          cm_node->tos, cm_node->user_pri);
++      }
+       memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+       memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+       cm_node->loc_port = cm_info->loc_port;
+@@ -3092,6 +3102,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+               cm_info.loc_addr[0] = ntohl(iph->daddr);
+               cm_info.rem_addr[0] = ntohl(iph->saddr);
+               cm_info.ipv4 = true;
++              cm_info.tos = iph->tos;
+       } else {
+               ip6h = (struct ipv6hdr *)rbuf->iph;
+               i40iw_copy_ip_ntohl(cm_info.loc_addr,
+@@ -3099,6 +3110,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+               i40iw_copy_ip_ntohl(cm_info.rem_addr,
+                                   ip6h->saddr.in6_u.u6_addr32);
+               cm_info.ipv4 = false;
++              cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
+       }
+       cm_info.loc_port = ntohs(tcph->dest);
+       cm_info.rem_port = ntohs(tcph->source);
+@@ -3331,6 +3343,7 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+       cm_node->state = I40IW_CM_STATE_OFFLOADED;
+       tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+       tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
++      tcp_info.tos = cm_node->tos;
+       dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
+@@ -3763,6 +3776,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+               i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+       }
+       cm_info.cm_id = cm_id;
++      cm_info.tos = cm_id->tos;
+       cm_info.user_pri = rt_tos2priority(cm_id->tos);
+       i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
+                   __func__, cm_id->tos, cm_info.user_pri);
+@@ -3911,10 +3925,9 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+       cm_id->provider_data = cm_listen_node;
++      cm_listen_node->tos = cm_id->tos;
+       cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+       cm_info.user_pri = cm_listen_node->user_pri;
+-      i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
+-                  __func__, cm_id->tos, cm_listen_node->user_pri);
+       if (!cm_listen_node->reused_node) {
+               if (wildcard) {
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+index 49ed7a5..2e52e38 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
+@@ -297,6 +297,7 @@ struct i40iw_cm_listener {
+       enum i40iw_cm_listener_state listener_state;
+       u32 reused_node;
+       u8 user_pri;
++      u8 tos;
+       u16 vlan_id;
+       bool qhash_set;
+       bool ipv4;
+@@ -343,6 +344,7 @@ struct i40iw_cm_node {
+       atomic_t passive_state;
+       bool qhash_set;
+       u8 user_pri;
++      u8 tos;
+       bool ipv4;
+       bool snd_mark_en;
+       u16 lsmm_size;
+@@ -368,6 +370,7 @@ struct i40iw_cm_info {
+       u16 vlan_id;
+       int backlog;
+       u8 user_pri;
++      u8 tos;
+       bool ipv4;
+ };
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0037-i40iw-Fill-in-IRD-value-when-on-connect-request.patch b/linux-next-cherry-picks/0037-i40iw-Fill-in-IRD-value-when-on-connect-request.patch
new file mode 100755 (executable)
index 0000000..4cdd0eb
--- /dev/null
@@ -0,0 +1,31 @@
+From fd4e906b2e2c04056e8c1773b5b6e06d307239e6 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Wed, 30 Nov 2016 15:12:11 -0600
+Subject: [PATCH 37/52] i40iw: Fill in IRD value when on connect request
+
+IRD is not populated on connect request and application is
+getting 0 for the value. Fill in the correct value on
+connect request.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 0c92037..2f14de7 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -272,6 +272,7 @@ static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
+               event.provider_data = (void *)cm_node;
+               event.private_data = (void *)cm_node->pdata_buf;
+               event.private_data_len = (u8)cm_node->pdata.size;
++              event.ird = cm_node->ird_size;
+               break;
+       case IW_CM_EVENT_CONNECT_REPLY:
+               i40iw_get_cmevent_info(cm_node, cm_id, &event);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0038-i40iw-Correctly-fail-loopback-connection-if-no-liste.patch b/linux-next-cherry-picks/0038-i40iw-Correctly-fail-loopback-connection-if-no-liste.patch
new file mode 100755 (executable)
index 0000000..65a1437
--- /dev/null
@@ -0,0 +1,143 @@
+From bf69f494c337cf3c43d3358ad66642dbde50fe03 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Wed, 30 Nov 2016 15:12:35 -0600
+Subject: [PATCH 38/52] i40iw: Correctly fail loopback connection if no
+ listener
+
+Fail the connect and return the proper error code if a client
+is started with local IP address and there is no corresponding
+loopback listener.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 62 +++++++++++++++++++---------------
+ 1 file changed, 35 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 2f14de7..25af89a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2878,7 +2878,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
+       /* create a CM connection node */
+       cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
+       if (!cm_node)
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+       /* set our node side to client (active) side */
+       cm_node->tcp_cntxt.client = 1;
+       cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2891,7 +2891,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
+                                               cm_node->vlan_id,
+                                               I40IW_CM_LISTENER_ACTIVE_STATE);
+               if (!loopback_remotelistener) {
+-                      i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
++                      i40iw_rem_ref_cm_node(cm_node);
++                      return ERR_PTR(-ECONNREFUSED);
+               } else {
+                       loopback_cm_info = *cm_info;
+                       loopback_cm_info.loc_port = cm_info->rem_port;
+@@ -2904,7 +2905,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
+                                                                loopback_remotelistener);
+                       if (!loopback_remotenode) {
+                               i40iw_rem_ref_cm_node(cm_node);
+-                              return NULL;
++                              return ERR_PTR(-ENOMEM);
+                       }
+                       cm_core->stats_loopbacks++;
+                       loopback_remotenode->loopbackpartner = cm_node;
+@@ -3732,6 +3733,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       struct sockaddr_in6 *raddr6;
+       bool qhash_set = false;
+       int apbvt_set = 0;
++      int err = 0;
+       enum i40iw_status_code status;
+       ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+@@ -3812,8 +3814,11 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+                                      conn_param->private_data_len,
+                                      (void *)conn_param->private_data,
+                                      &cm_info);
+-      if (!cm_node)
+-              goto err;
++
++      if (IS_ERR(cm_node)) {
++              err = PTR_ERR(cm_node);
++              goto err_out;
++      }
+       i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+       if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+@@ -3827,10 +3832,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       iwqp->cm_id = cm_id;
+       i40iw_add_ref(&iwqp->ibqp);
+-      if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
+-              if (i40iw_send_syn(cm_node, 0)) {
++      if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
++              cm_node->state = I40IW_CM_STATE_SYN_SENT;
++              err = i40iw_send_syn(cm_node, 0);
++              if (err) {
+                       i40iw_rem_ref_cm_node(cm_node);
+-                      goto err;
++                      goto err_out;
+               }
+       }
+@@ -3842,24 +3849,25 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+                   cm_node->cm_id);
+       return 0;
+-err:
+-      if (cm_node) {
+-              if (cm_node->ipv4)
+-                      i40iw_debug(cm_node->dev,
+-                                  I40IW_DEBUG_CM,
+-                                  "Api - connect() FAILED: dest addr=%pI4",
+-                                  cm_node->rem_addr);
+-              else
+-                      i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+-                                  "Api - connect() FAILED: dest addr=%pI6",
+-                                  cm_node->rem_addr);
+-      }
+-      i40iw_manage_qhash(iwdev,
+-                         &cm_info,
+-                         I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+-                         I40IW_QHASH_MANAGE_TYPE_DELETE,
+-                         NULL,
+-                         false);
++err_out:
++      if (cm_info.ipv4)
++              i40iw_debug(&iwdev->sc_dev,
++                          I40IW_DEBUG_CM,
++                          "Api - connect() FAILED: dest addr=%pI4",
++                          cm_info.rem_addr);
++      else
++              i40iw_debug(&iwdev->sc_dev,
++                          I40IW_DEBUG_CM,
++                          "Api - connect() FAILED: dest addr=%pI6",
++                          cm_info.rem_addr);
++
++      if (qhash_set)
++              i40iw_manage_qhash(iwdev,
++                                 &cm_info,
++                                 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
++                                 I40IW_QHASH_MANAGE_TYPE_DELETE,
++                                 NULL,
++                                 false);
+       if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
+                                                  cm_info.loc_port))
+@@ -3868,7 +3876,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+                                  I40IW_MANAGE_APBVT_DEL);
+       cm_id->rem_ref(cm_id);
+       iwdev->cm_core.stats_connect_errs++;
+-      return -ENOMEM;
++      return err;
+ }
+ /**
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0039-i40iw-Code-cleanup-remove-check-of-PBLE-pages.patch b/linux-next-cherry-picks/0039-i40iw-Code-cleanup-remove-check-of-PBLE-pages.patch
new file mode 100755 (executable)
index 0000000..54c517d
--- /dev/null
@@ -0,0 +1,33 @@
+From 1ef936b229c54e73a3cce9f4eb8dd5a146add073 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 30 Nov 2016 15:13:47 -0600
+Subject: [PATCH 39/52] i40iw: Code cleanup, remove check of PBLE pages
+
+Remove check for zero 'pages' of unallocated pbles calculated in
+add_pble_pool(); as it can never be true.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_pble.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+index 85993dc..c87ba16 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+@@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+       pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
+                       idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
+       pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+-      if (!pages) {
+-              ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
+-              goto error;
+-      }
+       info.chunk = chunk;
+       info.hmc_info = hmc_info;
+       info.pages = pages;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0040-i40iw-Add-request-for-reset-on-CQP-timeout.patch b/linux-next-cherry-picks/0040-i40iw-Add-request-for-reset-on-CQP-timeout.patch
new file mode 100755 (executable)
index 0000000..e4c0824
--- /dev/null
@@ -0,0 +1,58 @@
+From 78300cf8152f87adb20fbe71a600e0d8d72aabe8 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Wed, 30 Nov 2016 15:14:15 -0600
+Subject: [PATCH 40/52] i40iw: Add request for reset on CQP timeout
+
+When CQP times out, send a request to LAN driver for reset.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h       |  1 +
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 10 +++++++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index c795c61..ef188e6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -304,6 +304,7 @@ struct i40iw_device {
+       u32 mpa_version;
+       bool dcb;
+       bool closing;
++      bool reset;
+       u32 used_pds;
+       u32 used_cqs;
+       u32 used_mrs;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 641f00f..4a08ffb 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -396,7 +396,10 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
+               i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
+                            info->cqp_cmd, timeout_ret);
+               err_code = -ETIME;
+-              i40iw_request_reset(iwdev);
++              if (!iwdev->reset) {
++                      iwdev->reset = true;
++                      i40iw_request_reset(iwdev);
++              }
+               goto done;
+       }
+       cqp_error = cqp_request->compl_info.error;
+@@ -426,6 +429,11 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+       struct cqp_commands_info *info = &cqp_request->info;
+       int err_code = 0;
++      if (iwdev->reset) {
++              i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
++              return I40IW_ERR_CQP_COMPL_ERROR;
++      }
++
+       status = i40iw_process_cqp_cmd(dev, info);
+       if (status) {
+               i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0041-i40iw-Remove-macros-I40IW_STAG_KEY_FROM_STAG-and-I40.patch b/linux-next-cherry-picks/0041-i40iw-Remove-macros-I40IW_STAG_KEY_FROM_STAG-and-I40.patch
new file mode 100755 (executable)
index 0000000..4539d95
--- /dev/null
@@ -0,0 +1,41 @@
+From 5e589171225b6aeac5eaca0b0887bd83dc9376d8 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Wed, 5 Oct 2016 13:55:38 +0200
+Subject: [PATCH 41/52] i40iw: Remove macros I40IW_STAG_KEY_FROM_STAG and
+ I40IW_STAG_INDEX_FROM_STAG
+
+The macros I40IW_STAG_KEY_FROM_STAG and I40IW_STAG_INDEX_FROM_STAG are
+apparently bad - they are using the logical "&&" operation which
+does not make sense here. It should have been a bitwise "&" instead.
+Since the macros seem to be completely unused, let's simply remove
+them so that nobody accidentially uses them in the future. And while
+we're at it, also remove the unused macro I40IW_CREATE_STAG.
+
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Acked-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_user.h | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
+index 66263fc..80d9f46 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
+@@ -96,12 +96,6 @@ enum i40iw_device_capabilities_const {
+ #define i40iw_physical_fragment u64
+ #define i40iw_address_list u64 *
+-#define I40IW_CREATE_STAG(index, key)       (((index) << 8) + (key))
+-
+-#define I40IW_STAG_KEY_FROM_STAG(stag)      ((stag) && 0x000000FF)
+-
+-#define I40IW_STAG_INDEX_FROM_STAG(stag)    (((stag) && 0xFFFFFF00) >> 8)
+-
+ #define       I40IW_MAX_MR_SIZE       0x10000000000L
+ struct i40iw_qp_uk;
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0042-i40iw-Use-correct-src-address-in-memcpy-to-rdma-stat.patch b/linux-next-cherry-picks/0042-i40iw-Use-correct-src-address-in-memcpy-to-rdma-stat.patch
new file mode 100755 (executable)
index 0000000..f18ac21
--- /dev/null
@@ -0,0 +1,36 @@
+From 91c42b72f8e8b45961ff05a05009b644e6316ca2 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 11 Nov 2016 10:55:41 -0600
+Subject: [PATCH 42/52] i40iw: Use correct src address in memcpy to rdma stats
+ counters
+
+hw_stats is a pointer to i40_iw_dev_stats struct in i40iw_get_hw_stats().
+Use hw_stats and not &hw_stats in the memcpy to copy the i40iw device stats
+data into rdma_hw_stats counters.
+
+Fixes: b40f4757daa1 ("IB/core: Make device counter infrastructure dynamic")
+
+Cc: stable@vger.kernel.org # 4.7+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index bc24086..206d72b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -2646,7 +2646,7 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
+                       return -ENOSYS;
+       }
+-      memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
++      memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
+       return stats->num_counters;
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0043-i40iw-Fix-double-free-of-QP.patch b/linux-next-cherry-picks/0043-i40iw-Fix-double-free-of-QP.patch
new file mode 100755 (executable)
index 0000000..fa223ce
--- /dev/null
@@ -0,0 +1,94 @@
+From f4a87ca12a1c203913a5cc889ec49b817a1f45fc Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 6 Dec 2016 15:49:30 -0600
+Subject: [PATCH 43/52] i40iw: Fix double free of QP
+
+A QP can be double freed if i40iw_cm_disconn() is
+called while it is currently being freed by
+i40iw_rem_ref(). The fix in i40iw_cm_disconn() will
+first check if the QP is already freed before
+making another request for the QP to be freed.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h    |  2 +-
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 20 ++++++++++++++++----
+ drivers/infiniband/hw/i40iw/i40iw_hw.c |  4 +++-
+ 3 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index ef188e6..51b8280 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -512,7 +512,7 @@ static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
+ int i40iw_register_rdma_device(struct i40iw_device *iwdev);
+ void i40iw_port_ibevent(struct i40iw_device *iwdev);
+-int i40iw_cm_disconn(struct i40iw_qp *);
++void i40iw_cm_disconn(struct i40iw_qp *iwqp);
+ void i40iw_cm_disconn_worker(void *);
+ int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
+                    struct sk_buff *);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 25af89a..ff95fea 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3359,21 +3359,33 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+  * i40iw_cm_disconn - when a connection is being closed
+  * @iwqp: associate qp for the connection
+  */
+-int i40iw_cm_disconn(struct i40iw_qp *iwqp)
++void i40iw_cm_disconn(struct i40iw_qp *iwqp)
+ {
+       struct disconn_work *work;
+       struct i40iw_device *iwdev = iwqp->iwdev;
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
++      unsigned long flags;
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+-              return -ENOMEM; /* Timer will clean up */
+-
++              return; /* Timer will clean up */
++
++      spin_lock_irqsave(&iwdev->qptable_lock, flags);
++      if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
++              spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
++              i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
++                          "%s qp_id %d is already freed\n",
++                           __func__, iwqp->ibqp.qp_num);
++              kfree(work);
++              return;
++      }
+       i40iw_add_ref(&iwqp->ibqp);
++      spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
++
+       work->iwqp = iwqp;
+       INIT_WORK(&work->work, i40iw_disconnect_worker);
+       queue_work(cm_core->disconn_wq, &work->work);
+-      return 0;
++      return;
+ }
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index 5e2c16c..b2854b1 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -308,7 +308,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+                       iwqp = iwdev->qp_table[info->qp_cq_id];
+                       if (!iwqp) {
+                               spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+-                              i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
++                              i40iw_debug(dev, I40IW_DEBUG_AEQ,
++                                          "%s qp_id %d is already freed\n",
++                                          __func__, info->qp_cq_id);
+                               continue;
+                       }
+                       i40iw_add_ref(&iwqp->ibqp);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0044-i40iw-Fix-QP-flush-to-not-hang-on-empty-queues-or-fa.patch b/linux-next-cherry-picks/0044-i40iw-Fix-QP-flush-to-not-hang-on-empty-queues-or-fa.patch
new file mode 100755 (executable)
index 0000000..4616561
--- /dev/null
@@ -0,0 +1,87 @@
+From 1cda28bb5b503bab734072d97a41b2e7eda6b6b9 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Tue, 6 Dec 2016 15:49:31 -0600
+Subject: [PATCH 44/52] i40iw: Fix QP flush to not hang on empty queues or
+ failure
+
+When flush QP and there are no pending work requests, signal completion
+to unblock i40iw_drain_sq and i40iw_drain_rq which are waiting on
+completion for iwqp->sq_drained and iwqp->sq_drained respectively.
+Also, signal completion if flush QP fails to prevent the drain SQ or RQ
+from being blocked indefintely.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h    |  9 ++++++---
+ drivers/infiniband/hw/i40iw/i40iw_hw.c | 26 ++++++++++++++++++++++++--
+ 2 files changed, 30 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 51b8280..2aab85b 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -112,9 +112,12 @@
+ #define I40IW_DRV_OPT_MCAST_LOGPORT_MAP    0x00000800
+ #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+-#define IW_CFG_FPM_QP_COUNT           32768
+-#define I40IW_MAX_PAGES_PER_FMR               512
+-#define I40IW_MIN_PAGES_PER_FMR               1
++#define IW_CFG_FPM_QP_COUNT               32768
++#define I40IW_MAX_PAGES_PER_FMR           512
++#define I40IW_MIN_PAGES_PER_FMR           1
++#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED    2
++#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED    3
++#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
+ #define I40IW_MTU_TO_MSS              40
+ #define I40IW_DEFAULT_MSS             1460
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index b2854b1..4394a67 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -622,6 +622,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+       struct i40iw_qp_flush_info *hw_info;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
++      struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+       if (!cqp_request)
+@@ -636,9 +637,30 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+       cqp_info->in.u.qp_flush_wqes.qp = qp;
+       cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+-      if (status)
++      if (status) {
+               i40iw_pr_err("CQP-OP Flush WQE's fail");
+-      return status;
++              complete(&iwqp->sq_drained);
++              complete(&iwqp->rq_drained);
++              return status;
++      }
++      if (!cqp_request->compl_info.maj_err_code) {
++              switch (cqp_request->compl_info.min_err_code) {
++              case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
++                      complete(&iwqp->sq_drained);
++                      break;
++              case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
++                      complete(&iwqp->rq_drained);
++                      break;
++              case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
++                      break;
++              default:
++                      complete(&iwqp->sq_drained);
++                      complete(&iwqp->rq_drained);
++                      break;
++              }
++      }
++
++      return 0;
+ }
+ /**
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0045-i40iw-Fix-memory-leak-in-CQP-destroy-when-in-reset.patch b/linux-next-cherry-picks/0045-i40iw-Fix-memory-leak-in-CQP-destroy-when-in-reset.patch
new file mode 100755 (executable)
index 0000000..4da0608
--- /dev/null
@@ -0,0 +1,53 @@
+From fd90d4d4c2dc815ef5a5f5d50a9c65c266c68ace Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 6 Dec 2016 15:49:32 -0600
+Subject: [PATCH 45/52] i40iw: Fix memory leak in CQP destroy when in reset
+
+On a device close, the control QP (CQP) is destroyed by calling
+cqp_destroy which destroys the CQP and frees its SD buffer memory.
+However, if the reset flag is true, cqp_destroy is never called and
+leads to a memory leak on SD buffer memory. Fix this by always calling
+cqp_destroy, on device close, regardless of reset. The exception to this
+when CQP create fails. In this case, the SD buffer memory is already
+freed on an error check and there is no need to call cqp_destroy.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 4ce05b8..85d8fa6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -237,14 +237,11 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
+  */
+ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
+ {
+-      enum i40iw_status_code status = 0;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_cqp *cqp = &iwdev->cqp;
+-      if (free_hwcqp && dev->cqp_ops->cqp_destroy)
+-              status = dev->cqp_ops->cqp_destroy(dev->cqp);
+-      if (status)
+-              i40iw_pr_err("destroy cqp failed");
++      if (free_hwcqp)
++              dev->cqp_ops->cqp_destroy(dev->cqp);
+       i40iw_free_dma_mem(dev->hw, &cqp->sq);
+       kfree(cqp->scratch_array);
+@@ -1475,7 +1472,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+               i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+               /* fallthrough */
+       case CQP_CREATED:
+-              i40iw_destroy_cqp(iwdev, !reset);
++              i40iw_destroy_cqp(iwdev, true);
+               /* fallthrough */
+       case INITIAL_STATE:
+               i40iw_cleanup_cm_core(&iwdev->cm_core);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0046-i40iw-Fix-race-condition-in-terminate-timer-s-handle.patch b/linux-next-cherry-picks/0046-i40iw-Fix-race-condition-in-terminate-timer-s-handle.patch
new file mode 100755 (executable)
index 0000000..53d9e6a
--- /dev/null
@@ -0,0 +1,76 @@
+From d627b506313c887e7159580cde926f5d14797aaa Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Tue, 6 Dec 2016 15:49:33 -0600
+Subject: [PATCH 46/52] i40iw: Fix race condition in terminate timer's handler
+
+Add a QP reference when terminate timer is started to ensure
+the destroy QP doesn't race ahead to free the QP while it is being
+referenced in the terminate timer's handler.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c    | 2 +-
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 5 ++++-
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +-
+ 3 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index ff95fea..a217d2f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3471,7 +3471,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+                *terminate-handler to issue cm_disconn which can re-free
+                *a QP even after its refcnt=0.
+                */
+-              del_timer(&iwqp->terminate_timer);
++              i40iw_terminate_del_timer(qp);
+               if (!iwqp->flush_issued) {
+                       iwqp->flush_issued = 1;
+                       issue_flush = 1;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 4a08ffb..7d4af77 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -823,6 +823,7 @@ static void i40iw_terminate_timeout(unsigned long context)
+       struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+       i40iw_terminate_done(qp, 1);
++      i40iw_rem_ref(&iwqp->ibqp);
+ }
+ /**
+@@ -834,6 +835,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+       struct i40iw_qp *iwqp;
+       iwqp = (struct i40iw_qp *)qp->back_qp;
++      i40iw_add_ref(&iwqp->ibqp);
+       init_timer(&iwqp->terminate_timer);
+       iwqp->terminate_timer.function = i40iw_terminate_timeout;
+       iwqp->terminate_timer.expires = jiffies + HZ;
+@@ -850,7 +852,8 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+       struct i40iw_qp *iwqp;
+       iwqp = (struct i40iw_qp *)qp->back_qp;
+-      del_timer(&iwqp->terminate_timer);
++      if (del_timer(&iwqp->terminate_timer))
++              i40iw_rem_ref(&iwqp->ibqp);
+ }
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 206d72b..18526e6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -959,7 +959,7 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+                               goto exit;
+                       }
+                       if (iwqp->sc_qp.term_flags)
+-                              del_timer(&iwqp->terminate_timer);
++                              i40iw_terminate_del_timer(&iwqp->sc_qp);
+                       info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+                       if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
+                           iwdev->iw_status &&
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0047-i40iw-Assign-MSS-only-when-it-is-a-new-MTU.patch b/linux-next-cherry-picks/0047-i40iw-Assign-MSS-only-when-it-is-a-new-MTU.patch
new file mode 100755 (executable)
index 0000000..9cd65da
--- /dev/null
@@ -0,0 +1,33 @@
+From 6b0805c25657f9b702607ed4617c2821343158c0 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 6 Dec 2016 15:49:34 -0600
+Subject: [PATCH 47/52] i40iw: Assign MSS only when it is a new MTU
+
+Currently we are changing the MSS regardless of whether
+there is a change or not in MTU. Fix to make the
+assignment of MSS dependent on an MTU change.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 85d8fa6..cf9d288 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1724,6 +1724,8 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
+       for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
+               l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
++      l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->mss;
++
+       INIT_WORK(&work->work, i40iw_l2params_worker);
+       queue_work(iwdev->param_wq, &work->work);
+ }
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0048-i40iw-Fix-incorrect-check-for-error.patch b/linux-next-cherry-picks/0048-i40iw-Fix-incorrect-check-for-error.patch
new file mode 100755 (executable)
index 0000000..a72579c
--- /dev/null
@@ -0,0 +1,30 @@
+From 0cc0d851ccf1746466822c1b7ce02c980406d57f Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 6 Dec 2016 15:49:35 -0600
+Subject: [PATCH 48/52] i40iw: Fix incorrect check for error
+
+In i40iw_ieq_handle_partial() the check for !status is incorrect.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_puda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 7541b0d..c3d28ba 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -1132,7 +1132,7 @@ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *i
+       list_add(&buf->list, &pbufl);
+       status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+-      if (!status)
++      if (status)
+               goto error;
+       txbuf = i40iw_puda_get_bufpool(ieq);
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0049-i40iw-Reorganize-structures-to-align-with-HW-capabil.patch b/linux-next-cherry-picks/0049-i40iw-Reorganize-structures-to-align-with-HW-capabil.patch
new file mode 100755 (executable)
index 0000000..f0e82c3
--- /dev/null
@@ -0,0 +1,2464 @@
+From d6f7bbcc2e419c8afd4a426af78b3dac44632268 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Tue, 6 Dec 2016 16:16:20 -0600
+Subject: [PATCH 49/52] i40iw: Reorganize structures to align with HW
+ capabilities
+
+Some resources are incorrectly organized and at odds with
+HW capabilities. Specifically, ILQ, IEQ, QPs, MSS, QOS
+and statistics belong in a VSI.
+
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h          |   7 +-
+ drivers/infiniband/hw/i40iw/i40iw_cm.c       |  36 +-
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c     | 527 +++++++++++++++------------
+ drivers/infiniband/hw/i40iw/i40iw_d.h        |  21 +-
+ drivers/infiniband/hw/i40iw/i40iw_hw.c       |   4 +-
+ drivers/infiniband/hw/i40iw/i40iw_main.c     |  53 ++-
+ drivers/infiniband/hw/i40iw/i40iw_osdep.h    |   6 +-
+ drivers/infiniband/hw/i40iw/i40iw_p.h        |  23 +-
+ drivers/infiniband/hw/i40iw/i40iw_puda.c     | 268 ++++++++------
+ drivers/infiniband/hw/i40iw/i40iw_puda.h     |  20 +-
+ drivers/infiniband/hw/i40iw/i40iw_type.h     |  79 ++--
+ drivers/infiniband/hw/i40iw/i40iw_utils.c    | 150 +++++++-
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c    |  17 +-
+ drivers/infiniband/hw/i40iw/i40iw_virtchnl.c |  29 +-
+ 14 files changed, 775 insertions(+), 465 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 2aab85b..da2eb5a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -236,6 +236,7 @@ struct i40iw_device {
+       struct net_device *netdev;
+       wait_queue_head_t vchnl_waitq;
+       struct i40iw_sc_dev sc_dev;
++      struct i40iw_sc_vsi vsi;
+       struct i40iw_handler *hdl;
+       struct i40e_info *ldev;
+       struct i40e_client *client;
+@@ -289,7 +290,6 @@ struct i40iw_device {
+       u32 sd_type;
+       struct workqueue_struct *param_wq;
+       atomic_t params_busy;
+-      u32 mss;
+       enum init_completion_state init_state;
+       u16 mac_ip_table_idx;
+       atomic_t vchnl_msgs;
+@@ -525,6 +525,7 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+                                         u8 *mac_addr, u8 *mac_index);
+ int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
++void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
+ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+ void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+@@ -542,8 +543,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+                                         enum i40iw_quad_hash_manage_type mtype,
+                                         void *cmnode,
+                                         bool wait);
+-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
+-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
++void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
++void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
+ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+                            struct i40iw_qp *iwqp,
+                            u32 qp_num);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index a217d2f..e4820be 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -68,13 +68,13 @@
+ /**
+  * i40iw_free_sqbuf - put back puda buffer if refcount = 0
+- * @dev: FPK device
++ * @vsi: pointer to vsi structure
+  * @buf: puda buffer to free
+  */
+-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
++void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
+ {
+       struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
+-      struct i40iw_puda_rsrc *ilq = dev->ilq;
++      struct i40iw_puda_rsrc *ilq = vsi->ilq;
+       if (!atomic_dec_return(&buf->refcount))
+               i40iw_puda_ret_bufpool(ilq, buf);
+@@ -337,13 +337,13 @@ static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
+  */
+ static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
+ {
+-      struct i40iw_sc_dev *dev = cm_node->dev;
++      struct i40iw_device *iwdev = cm_node->iwdev;
+       struct i40iw_timer_entry *send_entry;
+       send_entry = cm_node->send_entry;
+       if (send_entry) {
+               cm_node->send_entry = NULL;
+-              i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
++              i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
+               kfree(send_entry);
+               atomic_dec(&cm_node->ref_count);
+       }
+@@ -377,7 +377,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+                                                 u8 flags)
+ {
+       struct i40iw_puda_buf *sqbuf;
+-      struct i40iw_sc_dev *dev = cm_node->dev;
++      struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
+       u8 *buf;
+       struct tcphdr *tcph;
+@@ -391,7 +391,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+       u32 hdr_len = 0;
+       u16 vtag;
+-      sqbuf = i40iw_puda_get_bufpool(dev->ilq);
++      sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
+       if (!sqbuf)
+               return NULL;
+       buf = sqbuf->mem.va;
+@@ -1059,7 +1059,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+                           int send_retrans,
+                           int close_when_complete)
+ {
+-      struct i40iw_sc_dev *dev = cm_node->dev;
++      struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
+       struct i40iw_cm_core *cm_core = cm_node->cm_core;
+       struct i40iw_timer_entry *new_send;
+       int ret = 0;
+@@ -1068,7 +1068,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+       new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+       if (!new_send) {
+-              i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
++              i40iw_free_sqbuf(vsi, (void *)sqbuf);
+               return -ENOMEM;
+       }
+       new_send->retrycount = I40IW_DEFAULT_RETRYS;
+@@ -1083,7 +1083,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+               new_send->timetosend += (HZ / 10);
+               if (cm_node->close_entry) {
+                       kfree(new_send);
+-                      i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
++                      i40iw_free_sqbuf(vsi, (void *)sqbuf);
+                       i40iw_pr_err("already close entry\n");
+                       return -EINVAL;
+               }
+@@ -1098,7 +1098,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+               new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
+               atomic_inc(&sqbuf->refcount);
+-              i40iw_puda_send_buf(dev->ilq, sqbuf);
++              i40iw_puda_send_buf(vsi->ilq, sqbuf);
+               if (!send_retrans) {
+                       i40iw_cleanup_retrans_entry(cm_node);
+                       if (close_when_complete)
+@@ -1195,6 +1195,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_timer_entry *send_entry, *close_entry;
+       struct list_head *list_core_temp;
++      struct i40iw_sc_vsi *vsi;
+       struct list_head *list_node;
+       struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+       u32 settimer = 0;
+@@ -1270,9 +1271,10 @@ static void i40iw_cm_timer_tick(unsigned long pass)
+               cm_node->cm_core->stats_pkt_retrans++;
+               spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
++              vsi = &cm_node->iwdev->vsi;
+               dev = cm_node->dev;
+               atomic_inc(&send_entry->sqbuf->refcount);
+-              i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
++              i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+               spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+               if (send_entry->send_retrans) {
+                       send_entry->retranscount--;
+@@ -1373,10 +1375,11 @@ int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
+ static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
+ {
+       struct i40iw_puda_buf *sqbuf;
++      struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
+       sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
+       if (sqbuf)
+-              i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
++              i40iw_puda_send_buf(vsi->ilq, sqbuf);
+       else
+               i40iw_pr_err("no sqbuf\n");
+ }
+@@ -2179,7 +2182,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
+                       I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
+       ts = current_kernel_time();
+       cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
+-      cm_node->tcp_cntxt.mss = iwdev->mss;
++      cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
+       cm_node->iwdev = iwdev;
+       cm_node->dev = &iwdev->sc_dev;
+@@ -3059,10 +3062,10 @@ static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
+ /**
+  * i40iw_receive_ilq - recv an ETHERNET packet, and process it
+  * through CM
+- * @dev: FPK dev struct
++ * @vsi: pointer to the vsi structure
+  * @rbuf: receive buffer
+  */
+-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
++void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
+ {
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_cm_listener *listener;
+@@ -3070,6 +3073,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+       struct ipv6hdr *ip6h;
+       struct tcphdr *tcph;
+       struct i40iw_cm_info cm_info;
++      struct i40iw_sc_dev *dev = vsi->dev;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+       struct vlan_ethhdr *ethh;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index a135037..392f783 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -103,6 +103,7 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
+               if (newtail != tail) {
+                       /* SUCCESS */
+                       I40IW_RING_MOVE_TAIL(cqp->sq_ring);
++                      cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
+                       return 0;
+               }
+               udelay(I40IW_SLEEP_COUNT);
+@@ -276,11 +277,12 @@ static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_
+ /**
+  * i40iw_change_l2params - given the new l2 parameters, change all qp
+- * @dev: IWARP device pointer
++ * @vsi: pointer to the vsi structure
+  * @l2params: New paramaters from l2
+  */
+-void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2params)
++void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
+ {
++      struct i40iw_sc_dev *dev = vsi->dev;
+       struct i40iw_sc_qp *qp = NULL;
+       bool qs_handle_change = false;
+       bool mss_change = false;
+@@ -288,20 +290,20 @@ void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2pa
+       u16 qs_handle;
+       int i;
+-      if (dev->mss != l2params->mss) {
++      if (vsi->mss != l2params->mss) {
+               mss_change = true;
+-              dev->mss = l2params->mss;
++              vsi->mss = l2params->mss;
+       }
+       i40iw_fill_qos_list(l2params->qs_handle_list);
+       for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+               qs_handle = l2params->qs_handle_list[i];
+-              if (dev->qos[i].qs_handle != qs_handle)
++              if (vsi->qos[i].qs_handle != qs_handle)
+                       qs_handle_change = true;
+               else if (!mss_change)
+                       continue;       /* no MSS nor qs handle change */
+-              spin_lock_irqsave(&dev->qos[i].lock, flags);
+-              qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
++              spin_lock_irqsave(&vsi->qos[i].lock, flags);
++              qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+               while (qp) {
+                       if (mss_change)
+                               i40iw_qp_mss_modify(dev, qp);
+@@ -310,43 +312,45 @@ void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2pa
+                               /* issue cqp suspend command */
+                               i40iw_qp_suspend_resume(dev, qp, true);
+                       }
+-                      qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
++                      qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+               }
+-              spin_unlock_irqrestore(&dev->qos[i].lock, flags);
+-              dev->qos[i].qs_handle = qs_handle;
++              spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
++              vsi->qos[i].qs_handle = qs_handle;
+       }
+ }
+ /**
+  * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
+- * @dev: IWARP device pointer
+  * @qp: qp to be removed from qos
+  */
+-static void i40iw_qp_rem_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+ {
++      struct i40iw_sc_vsi *vsi = qp->vsi;
+       unsigned long flags;
+       if (!qp->on_qoslist)
+               return;
+-      spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
++      spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+       list_del(&qp->list);
+-      spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
++      spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+ }
+ /**
+  * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
+- * @dev: IWARP device pointer
+  * @qp: qp to be added to qos
+  */
+-void i40iw_qp_add_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
+ {
++      struct i40iw_sc_vsi *vsi = qp->vsi;
+       unsigned long flags;
+-      spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
+-      qp->qs_handle = dev->qos[qp->user_pri].qs_handle;
+-      list_add(&qp->list, &dev->qos[qp->user_pri].qplist);
++      if (qp->on_qoslist)
++              return;
++      spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
++      qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
++      list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+       qp->on_qoslist = true;
+-      spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
++      spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+ }
+ /**
+@@ -419,6 +423,9 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
+       info->dev->cqp = cqp;
+       I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
++      cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
++      cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
++
+       i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+                   "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
+                   __func__, cqp->sq_size, cqp->hw_sq_size,
+@@ -546,6 +553,7 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+               return NULL;
+       }
+       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
++      cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
+       if (ret_code)
+               return NULL;
+       if (!wqe_idx)
+@@ -681,6 +689,8 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
+                     I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
+       wmb(); /* write shadow area before tail */
+       I40IW_RING_MOVE_TAIL(cqp->sq_ring);
++      ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
++
+       return ret_code;
+ }
+@@ -1173,6 +1183,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+       u64 qw1 = 0;
+       u64 qw2 = 0;
+       u64 temp;
++      struct i40iw_sc_vsi *vsi = info->vsi;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+@@ -1204,7 +1215,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+                             LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+                             LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+       }
+-      qw2 = LS_64(cqp->dev->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
++      qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+       if (info->vlan_valid)
+               qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
+       set_64bit_val(wqe, 16, qw2);
+@@ -2225,6 +2236,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+       u32 offset;
+       qp->dev = info->pd->dev;
++      qp->vsi = info->vsi;
+       qp->sq_pa = info->sq_pa;
+       qp->rq_pa = info->rq_pa;
+       qp->hw_host_ctx_pa = info->host_ctx_pa;
+@@ -2273,7 +2285,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+       qp->rq_tph_en = info->rq_tph_en;
+       qp->rcv_tph_en = info->rcv_tph_en;
+       qp->xmit_tph_en = info->xmit_tph_en;
+-      qp->qs_handle = qp->pd->dev->qos[qp->user_pri].qs_handle;
++      qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
+       qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
+       return 0;
+@@ -2418,7 +2430,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+-      i40iw_qp_rem_qos(qp->pd->dev, qp);
++      i40iw_qp_rem_qos(qp);
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+@@ -2566,13 +2578,17 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
+ {
+       struct i40iwarp_offload_info *iw;
+       struct i40iw_tcp_offload_info *tcp;
++      struct i40iw_sc_vsi *vsi;
++      struct i40iw_sc_dev *dev;
+       u64 qw0, qw3, qw7 = 0;
+       iw = info->iwarp_info;
+       tcp = info->tcp_info;
++      vsi = qp->vsi;
++      dev = qp->dev;
+       if (info->add_to_qoslist) {
+               qp->user_pri = info->user_pri;
+-              i40iw_qp_add_qos(qp->pd->dev, qp);
++              i40iw_qp_add_qos(qp);
+               i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
+                           __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
+       }
+@@ -2616,7 +2632,10 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
+                      LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
+               qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
+-              set_64bit_val(qp_ctx, 144, qp->q2_pa);
++              set_64bit_val(qp_ctx,
++                            144,
++                            LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
++                            LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
+               set_64bit_val(qp_ctx,
+                             152,
+                             LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
+@@ -2631,6 +2650,9 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
+                             LS_64(iw->bind_en, I40IWQPC_BINDEN) |
+                             LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
+                             LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
++                            LS_64((((vsi->stats_fcn_id_alloc) &&
++                                    (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
++                                  I40IWQPC_USESTATSINSTANCE) |
+                             LS_64(1, I40IWQPC_IWARPMODE) |
+                             LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
+                             LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
+@@ -4447,286 +4469,370 @@ void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *in
+ }
+ /**
+- * i40iw_hw_stat_init - Initiliaze HW stats table
+- * @devstat: pestat struct
++ * i40iw_sc_vsi_init - Initialize virtual device
++ * @vsi: pointer to the vsi structure
++ * @info: parameters to initialize vsi
++ **/
++void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
++{
++      int i;
++
++      vsi->dev = info->dev;
++      vsi->back_vsi = info->back_vsi;
++      vsi->mss = info->params->mss;
++      i40iw_fill_qos_list(info->params->qs_handle_list);
++
++      for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
++              vsi->qos[i].qs_handle =
++                      info->params->qs_handle_list[i];
++                      i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
++              spin_lock_init(&vsi->qos[i].lock);
++              INIT_LIST_HEAD(&vsi->qos[i].qplist);
++      }
++}
++
++/**
++ * i40iw_hw_stats_init - Initiliaze HW stats table
++ * @stats: pestat struct
+  * @fcn_idx: PCI fn id
+- * @hw: PF i40iw_hw structure.
+  * @is_pf: Is it a PF?
+  *
+- * Populate the HW stat table with register offset addr for each
+- * stat. And start the perioidic stats timer.
++ * Populate the HW stats table with register offset addr for each
++ * stats. And start the perioidic stats timer.
+  */
+-static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
+-                             u8 fcn_idx,
+-                             struct i40iw_hw *hw, bool is_pf)
++void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
+ {
+-      u32 stat_reg_offset;
+-      u32 stat_index;
+-      struct i40iw_dev_hw_stat_offsets *stat_table =
+-              &devstat->hw_stat_offsets;
+-      struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+-
+-      devstat->hw = hw;
++      u32 stats_reg_offset;
++      u32 stats_index;
++      struct i40iw_dev_hw_stats_offsets *stats_table =
++              &stats->hw_stats_offsets;
++      struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+       if (is_pf) {
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+                               I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+                               I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+                               I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+                               I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+                               I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+                               I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+                               I40E_GLPES_PFTCPRTXSEG(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+                               I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+                               I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+                               I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+                               I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+                               I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+                               I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+                               I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+                               I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+                               I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+                               I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+                               I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+                               I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+                               I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+                               I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+                               I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+                               I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+                               I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+                               I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+                               I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+                               I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+                               I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+                               I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+                               I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+                               I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+                               I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+                               I40E_GLPES_PFRDMAVINVLO(fcn_idx);
+       } else {
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+                               I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+                               I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+                               I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+                               I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+                               I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+                               I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+                               I40E_GLPES_VFTCPRTXSEG(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+                               I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
+-              stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
++              stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+                               I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+                               I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+                               I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+                               I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+                               I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+                               I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+                               I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+                               I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+                               I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+                               I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+                               I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+                               I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+                               I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+                               I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+                               I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+                               I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+                               I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+                               I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+                               I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+                               I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+                               I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+                               I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+                               I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+                               I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
+-              stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
++              stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+                               I40E_GLPES_VFRDMAVINVLO(fcn_idx);
+       }
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+-           stat_index++) {
+-              stat_reg_offset = stat_table->stat_offset_64[stat_index];
+-              last_rd_stats->stat_value_64[stat_index] =
+-                      readq(devstat->hw->hw_addr + stat_reg_offset);
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
++           stats_index++) {
++              stats_reg_offset = stats_table->stats_offset_64[stats_index];
++              last_rd_stats->stats_value_64[stats_index] =
++                      readq(stats->hw->hw_addr + stats_reg_offset);
+       }
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+-           stat_index++) {
+-              stat_reg_offset = stat_table->stat_offset_32[stat_index];
+-              last_rd_stats->stat_value_32[stat_index] =
+-                      i40iw_rd32(devstat->hw, stat_reg_offset);
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
++           stats_index++) {
++              stats_reg_offset = stats_table->stats_offset_32[stats_index];
++              last_rd_stats->stats_value_32[stats_index] =
++                      i40iw_rd32(stats->hw, stats_reg_offset);
+       }
+ }
+ /**
+- * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
+- * @devstat: pestat struct
+- * @index: index in HW stat table which contains offset reg-addr
+- * @value: hw stat value
++ * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
++ * @stat: pestat struct
++ * @index: index in HW stats table which contains offset reg-addr
++ * @value: hw stats value
+  */
+-static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
+-                                enum i40iw_hw_stat_index_32b index,
+-                                u64 *value)
++void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
++                          enum i40iw_hw_stats_index_32b index,
++                          u64 *value)
+ {
+-      struct i40iw_dev_hw_stat_offsets *stat_table =
+-              &devstat->hw_stat_offsets;
+-      struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+-      struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+-      u64 new_stat_value = 0;
+-      u32 stat_reg_offset = stat_table->stat_offset_32[index];
+-
+-      new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
++      struct i40iw_dev_hw_stats_offsets *stats_table =
++              &stats->hw_stats_offsets;
++      struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
++      struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
++      u64 new_stats_value = 0;
++      u32 stats_reg_offset = stats_table->stats_offset_32[index];
++
++      new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
+       /*roll-over case */
+-      if (new_stat_value < last_rd_stats->stat_value_32[index])
+-              hw_stats->stat_value_32[index] += new_stat_value;
++      if (new_stats_value < last_rd_stats->stats_value_32[index])
++              hw_stats->stats_value_32[index] += new_stats_value;
+       else
+-              hw_stats->stat_value_32[index] +=
+-                      new_stat_value - last_rd_stats->stat_value_32[index];
+-      last_rd_stats->stat_value_32[index] = new_stat_value;
+-      *value = hw_stats->stat_value_32[index];
++              hw_stats->stats_value_32[index] +=
++                      new_stats_value - last_rd_stats->stats_value_32[index];
++      last_rd_stats->stats_value_32[index] = new_stats_value;
++      *value = hw_stats->stats_value_32[index];
+ }
+ /**
+- * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
+- * @devstat: pestat struct
+- * @index: index in HW stat table which contains offset reg-addr
+- * @value: hw stat value
++ * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
++ * @stats: pestat struct
++ * @index: index in HW stats table which contains offset reg-addr
++ * @value: hw stats value
+  */
+-static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
+-                                enum i40iw_hw_stat_index_64b index,
+-                                u64 *value)
++void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
++                          enum i40iw_hw_stats_index_64b index,
++                          u64 *value)
+ {
+-      struct i40iw_dev_hw_stat_offsets *stat_table =
+-              &devstat->hw_stat_offsets;
+-      struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+-      struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+-      u64 new_stat_value = 0;
+-      u32 stat_reg_offset = stat_table->stat_offset_64[index];
+-
+-      new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
++      struct i40iw_dev_hw_stats_offsets *stats_table =
++              &stats->hw_stats_offsets;
++      struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
++      struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
++      u64 new_stats_value = 0;
++      u32 stats_reg_offset = stats_table->stats_offset_64[index];
++
++      new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
+       /*roll-over case */
+-      if (new_stat_value < last_rd_stats->stat_value_64[index])
+-              hw_stats->stat_value_64[index] += new_stat_value;
++      if (new_stats_value < last_rd_stats->stats_value_64[index])
++              hw_stats->stats_value_64[index] += new_stats_value;
+       else
+-              hw_stats->stat_value_64[index] +=
+-                      new_stat_value - last_rd_stats->stat_value_64[index];
+-      last_rd_stats->stat_value_64[index] = new_stat_value;
+-      *value = hw_stats->stat_value_64[index];
++              hw_stats->stats_value_64[index] +=
++                      new_stats_value - last_rd_stats->stats_value_64[index];
++      last_rd_stats->stats_value_64[index] = new_stats_value;
++      *value = hw_stats->stats_value_64[index];
+ }
+ /**
+- * i40iw_hw_stat_read_all - read all HW stat counters
+- * @devstat: pestat struct
+- * @stat_values: hw stats structure
++ * i40iw_hw_stats_read_all - read all HW stat counters
++ * @stats: pestat struct
++ * @stats_values: hw stats structure
+  *
+  * Read all the HW stat counters and populates hw_stats structure
+- * of passed-in dev's pestat as well as copy created in stat_values.
++ * of passed-in vsi's pestat as well as copy created in stat_values.
+  */
+-static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
+-                                 struct i40iw_dev_hw_stats *stat_values)
++void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
++                           struct i40iw_dev_hw_stats *stats_values)
+ {
+-      u32 stat_index;
+-
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+-           stat_index++)
+-              i40iw_hw_stat_read_32(devstat, stat_index,
+-                                    &stat_values->stat_value_32[stat_index]);
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+-           stat_index++)
+-              i40iw_hw_stat_read_64(devstat, stat_index,
+-                                    &stat_values->stat_value_64[stat_index]);
++      u32 stats_index;
++      unsigned long flags;
++
++      spin_lock_irqsave(&stats->lock, flags);
++
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
++           stats_index++)
++              i40iw_hw_stats_read_32(stats, stats_index,
++                                     &stats_values->stats_value_32[stats_index]);
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
++           stats_index++)
++              i40iw_hw_stats_read_64(stats, stats_index,
++                                     &stats_values->stats_value_64[stats_index]);
++      spin_unlock_irqrestore(&stats->lock, flags);
+ }
+ /**
+- * i40iw_hw_stat_refresh_all - Update all HW stat structs
+- * @devstat: pestat struct
+- * @stat_values: hw stats structure
++ * i40iw_hw_stats_refresh_all - Update all HW stats structs
++ * @stats: pestat struct
+  *
+- * Read all the HW stat counters to refresh values in hw_stats structure
++ * Read all the HW stats counters to refresh values in hw_stats structure
+  * of passed-in dev's pestat
+  */
+-static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
++void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
++{
++      u64 stats_value;
++      u32 stats_index;
++      unsigned long flags;
++
++      spin_lock_irqsave(&stats->lock, flags);
++
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
++           stats_index++)
++              i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
++      for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
++           stats_index++)
++              i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
++      spin_unlock_irqrestore(&stats->lock, flags);
++}
++
++/**
++ * i40iw_get_fcn_id - Return the function id
++ * @dev: pointer to the device
++ */
++static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
++{
++      u8 fcn_id = I40IW_INVALID_FCN_ID;
++      u8 i;
++
++      for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
++              if (!dev->fcn_id_array[i]) {
++                      fcn_id = i;
++                      dev->fcn_id_array[i] = true;
++                      break;
++              }
++      return fcn_id;
++}
++
++/**
++ * i40iw_vsi_stats_init - Initialize the vsi statistics
++ * @vsi: pointer to the vsi structure
++ * @info: The info structure used for initialization
++ */
++enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
+ {
+-      u64 stat_value;
+-      u32 stat_index;
+-
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+-           stat_index++)
+-              i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
+-      for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+-           stat_index++)
+-              i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
++      u8 fcn_id = info->fcn_id;
++
++      if (info->alloc_fcn_id)
++              fcn_id = i40iw_get_fcn_id(vsi->dev);
++
++      if (fcn_id == I40IW_INVALID_FCN_ID)
++              return I40IW_ERR_NOT_READY;
++
++      vsi->pestat = info->pestat;
++      vsi->pestat->hw = vsi->dev->hw;
++
++      if (info->stats_initialize) {
++              i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
++              spin_lock_init(&vsi->pestat->lock);
++              i40iw_hw_stats_start_timer(vsi);
++      }
++      vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
++      vsi->fcn_id = fcn_id;
++      return I40IW_SUCCESS;
++}
++
++/**
++ * i40iw_vsi_stats_free - Free the vsi stats
++ * @vsi: pointer to the vsi structure
++ */
++void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
++{
++      u8 fcn_id = vsi->fcn_id;
++
++      if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
++              vsi->dev->fcn_id_array[fcn_id] = false;
++      i40iw_hw_stats_stop_timer(vsi);
+ }
+ static struct i40iw_cqp_ops iw_cqp_ops = {
+@@ -4837,23 +4943,6 @@ static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+       NULL
+ };
+-static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
+-      i40iw_hw_stat_init,
+-      i40iw_hw_stat_read_32,
+-      i40iw_hw_stat_read_64,
+-      i40iw_hw_stat_read_all,
+-      i40iw_hw_stat_refresh_all
+-};
+-
+-/**
+- * i40iw_device_init_pestat - Initialize the pestat structure
+- * @dev: pestat struct
+- */
+-void i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
+-{
+-      devstat->ops = iw_device_pestat_ops;
+-}
+-
+ /**
+  * i40iw_device_init - Initialize IWARP device
+  * @dev: IWARP device pointer
+@@ -4867,7 +4956,6 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+       u16 hmc_fcn = 0;
+       enum i40iw_status_code ret_code = 0;
+       u8 db_size;
+-      int i;
+       spin_lock_init(&dev->cqp_lock);
+       INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
+@@ -4876,15 +4964,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+       dev->debug_mask = info->debug_mask;
+-      i40iw_device_init_pestat(&dev->dev_pestat);
+       dev->hmc_fn_id = info->hmc_fn_id;
+-      i40iw_fill_qos_list(info->l2params.qs_handle_list);
+-      for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+-              dev->qos[i].qs_handle = info->l2params.qs_handle_list[i];
+-              i40iw_debug(dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, dev->qos[i].qs_handle);
+-              spin_lock_init(&dev->qos[i].lock);
+-              INIT_LIST_HEAD(&dev->qos[i].qplist);
+-      }
+       dev->exception_lan_queue = info->exception_lan_queue;
+       dev->is_pf = info->is_pf;
+@@ -4897,15 +4977,10 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+       dev->hw = info->hw;
+       dev->hw->hw_addr = info->bar0;
+-      val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+-      dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+-
+       if (dev->is_pf) {
+-              dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
+-                      dev->hmc_fn_id, dev->hw, true);
+-              spin_lock_init(&dev->dev_pestat.stats_lock);
+-              /*start the periodic stats_timer */
+-              i40iw_hw_stats_start_timer(dev);
++              val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
++              dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
++
+               val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
+               db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
+               if ((db_size != I40IW_PE_DB_SIZE_4M) &&
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
+index 1bd4bad..a39ac12 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
+@@ -69,6 +69,9 @@
+ #define I40IW_STAG_TYPE_NONSHARED 1
+ #define I40IW_MAX_USER_PRIORITY 8
++#define I40IW_MAX_STATS_COUNT 16
++#define I40IW_FIRST_NON_PF_STAT       4
++
+ #define LS_64_1(val, bits)      ((u64)(uintptr_t)val << bits)
+ #define RS_64_1(val, bits)      ((u64)(uintptr_t)val >> bits)
+@@ -1203,8 +1206,11 @@
+ #define I40IWQPC_RXCQNUM_SHIFT 32
+ #define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
+-#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+-#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
++#define I40IWQPC_STAT_INDEX_SHIFT 0
++#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
++
++#define I40IWQPC_Q2ADDR_SHIFT 0
++#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
+ #define I40IWQPC_LASTBYTESENT_SHIFT 0
+ #define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
+@@ -1236,11 +1242,8 @@
+ #define I40IWQPC_PRIVEN_SHIFT 25
+ #define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
+-#define I40IWQPC_LSMMPRESENT_SHIFT 26
+-#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
+-
+-#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
+-#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
++#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
++#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
+ #define I40IWQPC_IWARPMODE_SHIFT 28
+ #define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
+@@ -1717,6 +1720,8 @@ enum i40iw_alignment {
+ #define OP_MANAGE_VF_PBLE_BP                    28
+ #define OP_QUERY_FPM_VALUES                     29
+ #define OP_COMMIT_FPM_VALUES                    30
+-#define OP_SIZE_CQP_STAT_ARRAY                  31
++#define OP_REQUESTED_COMMANDS                   31
++#define OP_COMPLETED_COMMANDS                   32
++#define OP_SIZE_CQP_STAT_ARRAY                  33
+ #endif
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index 4394a67..476867a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -542,6 +542,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ {
+       struct i40iw_qhash_table_info *info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
++      struct i40iw_sc_vsi *vsi = &iwdev->vsi;
+       enum i40iw_status_code status;
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_cqp_request *cqp_request;
+@@ -554,6 +555,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+       info = &cqp_info->in.u.manage_qhash_table_entry.info;
+       memset(info, 0, sizeof(*info));
++      info->vsi = &iwdev->vsi;
+       info->manage = mtype;
+       info->entry_type = etype;
+       if (cminfo->vlan_id != 0xFFFF) {
+@@ -566,7 +568,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+       info->ipv4_valid = cminfo->ipv4;
+       info->user_pri = cminfo->user_pri;
+       ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+-      info->qp_num = cpu_to_le32(dev->ilq->qp_id);
++      info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
+       info->dest_port = cpu_to_le16(cminfo->loc_port);
+       info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
+       info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index cf9d288..2bdb8b0 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -932,6 +932,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+       struct i40iw_puda_rsrc_info info;
+       enum i40iw_status_code status;
++      memset(&info, 0, sizeof(info));
+       info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
+       info.cq_id = 1;
+       info.qp_id = 0;
+@@ -941,10 +942,9 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+       info.rq_size = 8192;
+       info.buf_size = 1024;
+       info.tx_buf_cnt = 16384;
+-      info.mss = iwdev->sc_dev.mss;
+       info.receive = i40iw_receive_ilq;
+       info.xmit_complete = i40iw_free_sqbuf;
+-      status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
++      status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
+       if (status)
+               i40iw_pr_err("ilq create fail\n");
+       return status;
+@@ -961,6 +961,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+       struct i40iw_puda_rsrc_info info;
+       enum i40iw_status_code status;
++      memset(&info, 0, sizeof(info));
+       info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
+       info.cq_id = 2;
+       info.qp_id = iwdev->sc_dev.exception_lan_queue;
+@@ -969,9 +970,8 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+       info.sq_size = 8192;
+       info.rq_size = 8192;
+       info.buf_size = 2048;
+-      info.mss = iwdev->sc_dev.mss;
+       info.tx_buf_cnt = 16384;
+-      status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
++      status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
+       if (status)
+               i40iw_pr_err("ieq create fail\n");
+       return status;
+@@ -1296,12 +1296,16 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+       enum i40iw_status_code status;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_device_init_info info;
++      struct i40iw_vsi_init_info vsi_info;
+       struct i40iw_dma_mem mem;
++      struct i40iw_l2params l2params;
+       u32 size;
++      struct i40iw_vsi_stats_info stats_info;
+       u16 last_qset = I40IW_NO_QSET;
+       u16 qset;
+       u32 i;
++      memset(&l2params, 0, sizeof(l2params));
+       memset(&info, 0, sizeof(info));
+       size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+                               (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
+@@ -1330,16 +1334,17 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+       info.bar0 = ldev->hw_addr;
+       info.hw = &iwdev->hw;
+       info.debug_mask = debug;
+-      info.l2params.mss =
++      l2params.mss =
+               (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+       for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+               qset = ldev->params.qos.prio_qos[i].qs_handle;
+-              info.l2params.qs_handle_list[i] = qset;
++              l2params.qs_handle_list[i] = qset;
+               if (last_qset == I40IW_NO_QSET)
+                       last_qset = qset;
+               else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
+                       iwdev->dcb = true;
+       }
++      i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
+       info.exception_lan_queue = 1;
+       info.vchnl_send = i40iw_virtchnl_send;
+       status = i40iw_device_init(&iwdev->sc_dev, &info);
+@@ -1348,6 +1353,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+               kfree(iwdev->hmc_info_mem);
+               iwdev->hmc_info_mem = NULL;
+       }
++      memset(&vsi_info, 0, sizeof(vsi_info));
++      vsi_info.dev = &iwdev->sc_dev;
++      vsi_info.back_vsi = (void *)iwdev;
++      vsi_info.params = &l2params;
++      i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
++
++      if (dev->is_pf) {
++              memset(&stats_info, 0, sizeof(stats_info));
++              stats_info.fcn_id = ldev->fid;
++              stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
++              stats_info.stats_initialize = true;
++              if (stats_info.pestat)
++                      i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
++      }
+       return status;
+ }
+@@ -1457,10 +1476,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+               i40iw_destroy_aeq(iwdev, reset);
+               /* fallthrough */
+       case IEQ_CREATED:
+-              i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
++              i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+               /* fallthrough */
+       case ILQ_CREATED:
+-              i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
++              i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+               /* fallthrough */
+       case CCQ_CREATED:
+               i40iw_destroy_ccq(iwdev, reset);
+@@ -1476,9 +1495,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+               /* fallthrough */
+       case INITIAL_STATE:
+               i40iw_cleanup_cm_core(&iwdev->cm_core);
+-              if (dev->is_pf)
+-                      i40iw_hw_stats_del_timer(dev);
+-
++              if (iwdev->vsi.pestat) {
++                      i40iw_vsi_stats_free(&iwdev->vsi);
++                      kfree(iwdev->vsi.pestat);
++              }
+               i40iw_del_init_mem(iwdev);
+               break;
+       case INVALID_STATE:
+@@ -1523,7 +1543,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+       iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
+       iwdev->netdev = ldev->netdev;
+       hdl->client = client;
+-      iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
+       if (!ldev->ftype)
+               iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
+       else
+@@ -1683,7 +1702,7 @@ static void i40iw_l2params_worker(struct work_struct *work)
+           container_of(work, struct l2params_work, work);
+       struct i40iw_device *iwdev = dwork->iwdev;
+-      i40iw_change_l2params(&iwdev->sc_dev, &dwork->l2params);
++      i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
+       atomic_dec(&iwdev->params_busy);
+       kfree(work);
+ }
+@@ -1724,7 +1743,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
+       for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
+               l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
+-      l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->mss;
++      l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+       INIT_WORK(&work->work, i40iw_l2params_worker);
+       queue_work(iwdev->param_wq, &work->work);
+@@ -1773,21 +1792,23 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
+       struct i40iw_vfdev *tmp_vfdev;
+       unsigned int i;
+       unsigned long flags;
++      struct i40iw_device *iwdev;
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+       dev = &hdl->device.sc_dev;
++      iwdev = (struct i40iw_device *)dev->back_dev;
+       for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
+               if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
+                       continue;
+               /* free all resources allocated on behalf of vf */
+               tmp_vfdev = dev->vf_dev[i];
+-              spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
++              spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
+               dev->vf_dev[i] = NULL;
+-              spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
++              spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
+               i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
+               /* remove vf hmc function */
+               memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+index a6b18cd..aa66c1c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+@@ -209,9 +209,9 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+                                                 struct i40iw_manage_vf_pble_info *info,
+                                                 bool wait);
+-struct i40iw_dev_pestat;
+-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
+-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
++struct i40iw_sc_vsi;
++void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
++void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
+ #define i40iw_mmiowb() mmiowb()
+ void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
+ u32  i40iw_rd32(struct i40iw_hw *hw, u32 reg);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
+index 2a4bd32..28a92fe 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
+@@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+                                        struct i40iw_device_init_info *info);
+-void i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat);
+-
+ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+@@ -64,9 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
+ enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
+                                          u32 *vf_cnt_array);
+-/* cqp misc functions */
+-void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2params);
+-void i40iw_qp_add_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
++/* stats functions */
++void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
++void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
++void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
++                          enum i40iw_hw_stats_index_32b index,
++                          u64 *value);
++void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
++                          enum i40iw_hw_stats_index_64b index,
++                          u64 *value);
++void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
++
++/* vsi misc functions */
++enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
++void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
++void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
++
++void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
++void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
+ void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index c3d28ba..449ba8c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -42,12 +42,13 @@
+ #include "i40iw_p.h"
+ #include "i40iw_puda.h"
+-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
++static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
+                             struct i40iw_puda_buf *buf);
+-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
++static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
+ static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
+ static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
+                                                     *rsrc, bool initial);
++static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
+ /**
+  * i40iw_puda_get_listbuf - get buffer from puda list
+  * @list: list to use for buffers (ILQ or IEQ)
+@@ -292,7 +293,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+       unsigned long   flags;
+       if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
+-              rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
++              rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
+               return I40IW_ERR_BAD_PTR;
+@@ -335,7 +336,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+               rsrc->stats_pkt_rcvd++;
+               rsrc->compl_rxwqe_idx = info.wqe_idx;
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
+-              rsrc->receive(rsrc->dev, buf);
++              rsrc->receive(rsrc->vsi, buf);
+               if (cq_type == I40IW_CQ_TYPE_ILQ)
+                       i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
+               else
+@@ -345,12 +346,12 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
+               sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
+               I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+-              rsrc->xmit_complete(rsrc->dev, sqwrid);
++              rsrc->xmit_complete(rsrc->vsi, sqwrid);
+               spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+               rsrc->tx_wqe_avail_cnt++;
+               spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+-              if (!list_empty(&dev->ilq->txpend))
+-                      i40iw_puda_send_buf(dev->ilq, NULL);
++              if (!list_empty(&rsrc->vsi->ilq->txpend))
++                      i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
+       }
+ done:
+@@ -513,10 +514,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
+  * i40iw_puda_qp_wqe - setup wqe for qp create
+  * @rsrc: resource for qp
+  */
+-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
++static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+ {
+-      struct i40iw_sc_qp *qp = &rsrc->qp;
+-      struct i40iw_sc_dev *dev = rsrc->dev;
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+@@ -582,6 +581,7 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+       qp->back_qp = (void *)rsrc;
+       qp->sq_pa = mem->pa;
+       qp->rq_pa = qp->sq_pa + sq_size;
++      qp->vsi = rsrc->vsi;
+       ukqp->sq_base = mem->va;
+       ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+       ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+@@ -609,15 +609,62 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+                                                   I40E_VFPE_WQEALLOC1);
+       qp->user_pri = 0;
+-      i40iw_qp_add_qos(rsrc->dev, qp);
++      i40iw_qp_add_qos(qp);
+       i40iw_puda_qp_setctx(rsrc);
+-      ret = i40iw_puda_qp_wqe(rsrc);
++      if (rsrc->ceq_valid)
++              ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
++      else
++              ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
+       if (ret)
+               i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+       return ret;
+ }
+ /**
++ * i40iw_puda_cq_wqe - setup wqe for cq create
++ * @rsrc: resource for cq
++ */
++static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
++{
++      u64 *wqe;
++      struct i40iw_sc_cqp *cqp;
++      u64 header;
++      struct i40iw_ccq_cqe_info compl_info;
++      enum i40iw_status_code status = 0;
++
++      cqp = dev->cqp;
++      wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
++      if (!wqe)
++              return I40IW_ERR_RING_FULL;
++
++      set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
++      set_64bit_val(wqe, 8, RS_64_1(cq, 1));
++      set_64bit_val(wqe, 16,
++                    LS_64(cq->shadow_read_threshold,
++                          I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
++      set_64bit_val(wqe, 32, cq->cq_pa);
++
++      set_64bit_val(wqe, 40, cq->shadow_area_pa);
++
++      header = cq->cq_uk.cq_id |
++          LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
++          LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
++          LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
++          LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
++          LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
++      set_64bit_val(wqe, 24, header);
++
++      i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
++                      wqe, I40IW_CQP_WQE_SIZE * 8);
++
++      i40iw_sc_cqp_post_sq(dev->cqp);
++      status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
++                                               I40IW_CQP_OP_CREATE_CQ,
++                                               &compl_info);
++      return status;
++}
++
++/**
+  * i40iw_puda_cq_create - create cq for resource
+  * @rsrc: resource for which cq to create
+  */
+@@ -625,18 +672,13 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+ {
+       struct i40iw_sc_dev *dev = rsrc->dev;
+       struct i40iw_sc_cq *cq = &rsrc->cq;
+-      u64 *wqe;
+-      struct i40iw_sc_cqp *cqp;
+-      u64 header;
+       enum i40iw_status_code ret = 0;
+       u32 tsize, cqsize;
+-      u32 shadow_read_threshold = 128;
+       struct i40iw_dma_mem *mem;
+-      struct i40iw_ccq_cqe_info compl_info;
+       struct i40iw_cq_init_info info;
+       struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+-      cq->back_cq = (void *)rsrc;
++      cq->vsi = rsrc->vsi;
+       cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
+       tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
+       ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
+@@ -657,43 +699,84 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+       init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
+       init_info->cq_size = rsrc->cq_size;
+       init_info->cq_id = rsrc->cq_id;
++      info.ceqe_mask = true;
++      info.ceq_id_valid = true;
+       ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
+       if (ret)
+               goto error;
+-      cqp = dev->cqp;
+-      wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+-      if (!wqe) {
+-              ret = I40IW_ERR_RING_FULL;
+-              goto error;
+-      }
++      if (rsrc->ceq_valid)
++              ret = i40iw_cqp_cq_create_cmd(dev, cq);
++      else
++              ret = i40iw_puda_cq_wqe(dev, cq);
++error:
++      if (ret)
++              i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
++      return ret;
++}
+-      set_64bit_val(wqe, 0, rsrc->cq_size);
+-      set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+-      set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+-      set_64bit_val(wqe, 32, cq->cq_pa);
++/**
++ * i40iw_puda_free_qp - free qp for resource
++ * @rsrc: resource for which qp to free
++ */
++static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
++{
++      enum i40iw_status_code ret;
++      struct i40iw_ccq_cqe_info compl_info;
++      struct i40iw_sc_dev *dev = rsrc->dev;
+-      set_64bit_val(wqe, 40, cq->shadow_area_pa);
++      if (rsrc->ceq_valid) {
++              i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
++              return;
++      }
+-      header = rsrc->cq_id |
+-          LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+-          LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+-          LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+-          LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+-          LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+-      set_64bit_val(wqe, 24, header);
++      ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
++                      0, false, true, true);
++      if (ret)
++              i40iw_debug(dev, I40IW_DEBUG_PUDA,
++                          "%s error puda qp destroy wqe\n",
++                          __func__);
+-      i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+-                      wqe, I40IW_CQP_WQE_SIZE * 8);
++      if (!ret) {
++              ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
++                              I40IW_CQP_OP_DESTROY_QP,
++                              &compl_info);
++              if (ret)
++                      i40iw_debug(dev, I40IW_DEBUG_PUDA,
++                                  "%s error puda qp destroy failed\n",
++                                  __func__);
++      }
++}
+-      i40iw_sc_cqp_post_sq(dev->cqp);
+-      ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+-                                               I40IW_CQP_OP_CREATE_CQ,
+-                                               &compl_info);
++/**
++ * i40iw_puda_free_cq - free cq for resource
++ * @rsrc: resource for which cq to free
++ */
++static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
++{
++      enum i40iw_status_code ret;
++      struct i40iw_ccq_cqe_info compl_info;
++      struct i40iw_sc_dev *dev = rsrc->dev;
++
++      if (rsrc->ceq_valid) {
++              i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
++              return;
++      }
++      ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+-error:
+       if (ret)
+-              i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+-      return ret;
++              i40iw_debug(dev, I40IW_DEBUG_PUDA,
++                          "%s error ieq cq destroy\n",
++                          __func__);
++
++      if (!ret) {
++              ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
++                              I40IW_CQP_OP_DESTROY_CQ,
++                              &compl_info);
++              if (ret)
++                      i40iw_debug(dev, I40IW_DEBUG_PUDA,
++                                  "%s error ieq qp destroy done\n",
++                                  __func__);
++      }
+ }
+ /**
+@@ -702,25 +785,24 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+  * @type: type of resource to dele
+  * @reset: true if reset chip
+  */
+-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
++void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
+                              enum puda_resource_type type,
+                              bool reset)
+ {
+-      struct i40iw_ccq_cqe_info compl_info;
++      struct i40iw_sc_dev *dev = vsi->dev;
+       struct i40iw_puda_rsrc *rsrc;
+       struct i40iw_puda_buf *buf = NULL;
+       struct i40iw_puda_buf *nextbuf = NULL;
+       struct i40iw_virt_mem *vmem;
+-      enum i40iw_status_code ret;
+       switch (type) {
+       case I40IW_PUDA_RSRC_TYPE_ILQ:
+-              rsrc = dev->ilq;
+-              vmem = &dev->ilq_mem;
++              rsrc = vsi->ilq;
++              vmem = &vsi->ilq_mem;
+               break;
+       case I40IW_PUDA_RSRC_TYPE_IEQ:
+-              rsrc = dev->ieq;
+-              vmem = &dev->ieq_mem;
++              rsrc = vsi->ieq;
++              vmem = &vsi->ieq_mem;
+               break;
+       default:
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
+@@ -732,45 +814,14 @@ void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+       case PUDA_HASH_CRC_COMPLETE:
+               i40iw_free_hash_desc(rsrc->hash_desc);
+       case PUDA_QP_CREATED:
+-              do {
+-                      if (reset)
+-                              break;
+-                      ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+-                                                            0, false, true, true);
+-                      if (ret)
+-                              i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+-                                          "%s error ieq qp destroy\n",
+-                                          __func__);
+-
+-                      ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+-                                                               I40IW_CQP_OP_DESTROY_QP,
+-                                                               &compl_info);
+-                      if (ret)
+-                              i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+-                                          "%s error ieq qp destroy done\n",
+-                                          __func__);
+-              } while (0);
++              if (!reset)
++                      i40iw_puda_free_qp(rsrc);
+               i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
+               /* fallthrough */
+       case PUDA_CQ_CREATED:
+-              do {
+-                      if (reset)
+-                              break;
+-                      ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+-                      if (ret)
+-                              i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+-                                          "%s error ieq cq destroy\n",
+-                                          __func__);
+-
+-                      ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+-                                                               I40IW_CQP_OP_DESTROY_CQ,
+-                                                               &compl_info);
+-                      if (ret)
+-                              i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+-                                          "%s error ieq qp destroy done\n",
+-                                          __func__);
+-              } while (0);
++              if (!reset)
++                      i40iw_puda_free_cq(rsrc);
+               i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+               break;
+@@ -826,9 +877,10 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
+  * @dev: iwarp device
+  * @info: resource information
+  */
+-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
++enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
+                                             struct i40iw_puda_rsrc_info *info)
+ {
++      struct i40iw_sc_dev *dev = vsi->dev;
+       enum i40iw_status_code ret = 0;
+       struct i40iw_puda_rsrc *rsrc;
+       u32 pudasize;
+@@ -841,10 +893,10 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+       rqwridsize = info->rq_size * 8;
+       switch (info->type) {
+       case I40IW_PUDA_RSRC_TYPE_ILQ:
+-              vmem = &dev->ilq_mem;
++              vmem = &vsi->ilq_mem;
+               break;
+       case I40IW_PUDA_RSRC_TYPE_IEQ:
+-              vmem = &dev->ieq_mem;
++              vmem = &vsi->ieq_mem;
+               break;
+       default:
+               return I40IW_NOT_SUPPORTED;
+@@ -857,22 +909,22 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+       rsrc = (struct i40iw_puda_rsrc *)vmem->va;
+       spin_lock_init(&rsrc->bufpool_lock);
+       if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
+-              dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+-              dev->ilq_count = info->count;
++              vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
++              vsi->ilq_count = info->count;
+               rsrc->receive = info->receive;
+               rsrc->xmit_complete = info->xmit_complete;
+       } else {
+-              vmem = &dev->ieq_mem;
+-              dev->ieq_count = info->count;
+-              dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
++              vmem = &vsi->ieq_mem;
++              vsi->ieq_count = info->count;
++              vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+               rsrc->receive = i40iw_ieq_receive;
+               rsrc->xmit_complete = i40iw_ieq_tx_compl;
+       }
++      rsrc->ceq_valid = info->ceq_valid;
+       rsrc->type = info->type;
+       rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
+       rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+-      rsrc->mss = info->mss;
+       /* Initialize all ieq lists */
+       INIT_LIST_HEAD(&rsrc->bufpool);
+       INIT_LIST_HEAD(&rsrc->txpend);
+@@ -886,6 +938,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+       rsrc->cq_size = info->rq_size + info->sq_size;
+       rsrc->buf_size = info->buf_size;
+       rsrc->dev = dev;
++      rsrc->vsi = vsi;
+       ret = i40iw_puda_cq_create(rsrc);
+       if (!ret) {
+@@ -920,7 +973,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+       dev->ccq_ops->ccq_arm(&rsrc->cq);
+       return ret;
+  error:
+-      i40iw_puda_dele_resources(dev, info->type, false);
++      i40iw_puda_dele_resources(vsi, info->type, false);
+       return ret;
+ }
+@@ -1333,7 +1386,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+       }
+       if (pfpdu->mode && (fps != pfpdu->fps)) {
+               /* clean up qp as it is new partial sequence */
+-              i40iw_ieq_cleanup_qp(ieq->dev, qp);
++              i40iw_ieq_cleanup_qp(ieq, qp);
+               i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+                           "%s: restarting new partial\n", __func__);
+               pfpdu->mode = false;
+@@ -1345,7 +1398,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+               pfpdu->rcv_nxt = fps;
+               pfpdu->fps = fps;
+               pfpdu->mode = true;
+-              pfpdu->max_fpdu_data = ieq->mss;
++              pfpdu->max_fpdu_data = ieq->vsi->mss;
+               pfpdu->pmode_count++;
+               INIT_LIST_HEAD(rxlist);
+               i40iw_ieq_check_first_buf(buf, fps);
+@@ -1380,14 +1433,14 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+  * @dev: iwarp device
+  * @buf: exception buffer received
+  */
+-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
++static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
+                             struct i40iw_puda_buf *buf)
+ {
+-      struct i40iw_puda_rsrc *ieq = dev->ieq;
++      struct i40iw_puda_rsrc *ieq = vsi->ieq;
+       struct i40iw_sc_qp *qp = NULL;
+       u32 wqe_idx = ieq->compl_rxwqe_idx;
+-      qp = i40iw_ieq_get_qp(dev, buf);
++      qp = i40iw_ieq_get_qp(vsi->dev, buf);
+       if (!qp) {
+               ieq->stats_bad_qp_id++;
+               i40iw_puda_ret_bufpool(ieq, buf);
+@@ -1405,12 +1458,12 @@ static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+ /**
+  * i40iw_ieq_tx_compl - put back after sending completed exception buffer
+- * @dev: iwarp device
++ * @vsi: pointer to the vsi structure
+  * @sqwrid: pointer to puda buffer
+  */
+-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
++static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
+ {
+-      struct i40iw_puda_rsrc *ieq = dev->ieq;
++      struct i40iw_puda_rsrc *ieq = vsi->ieq;
+       struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
+       i40iw_puda_ret_bufpool(ieq, buf);
+@@ -1422,15 +1475,14 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+ /**
+  * i40iw_ieq_cleanup_qp - qp is being destroyed
+- * @dev: iwarp device
++ * @ieq: ieq resource
+  * @qp: all pending fpdu buffers
+  */
+-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
+ {
+       struct i40iw_puda_buf *buf;
+       struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+       struct list_head *rxlist = &pfpdu->rxlist;
+-      struct i40iw_puda_rsrc *ieq = dev->ieq;
+       if (!pfpdu->mode)
+               return;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
+index 52bf782..dba05ce 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
+@@ -100,6 +100,7 @@ struct i40iw_puda_rsrc_info {
+       enum puda_resource_type type;   /* ILQ or IEQ */
+       u32 count;
+       u16 pd_id;
++      bool ceq_valid;
+       u32 cq_id;
+       u32 qp_id;
+       u32 sq_size;
+@@ -107,8 +108,8 @@ struct i40iw_puda_rsrc_info {
+       u16 buf_size;
+       u16 mss;
+       u32 tx_buf_cnt;         /* total bufs allocated will be rq_size + tx_buf_cnt */
+-      void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+-      void (*xmit_complete)(struct i40iw_sc_dev *, void *);
++      void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
++      void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
+ };
+ struct i40iw_puda_rsrc {
+@@ -116,6 +117,7 @@ struct i40iw_puda_rsrc {
+       struct i40iw_sc_qp qp;
+       struct i40iw_sc_pd sc_pd;
+       struct i40iw_sc_dev *dev;
++      struct i40iw_sc_vsi *vsi;
+       struct i40iw_dma_mem cqmem;
+       struct i40iw_dma_mem qpmem;
+       struct i40iw_virt_mem ilq_mem;
+@@ -123,6 +125,7 @@ struct i40iw_puda_rsrc {
+       enum puda_resource_type type;
+       u16 buf_size;           /*buffer must be max datalen + tcpip hdr + mac */
+       u16 mss;
++      bool ceq_valid;
+       u32 cq_id;
+       u32 qp_id;
+       u32 sq_size;
+@@ -142,8 +145,8 @@ struct i40iw_puda_rsrc {
+       u32 avail_buf_count;            /* snapshot of currently available buffers */
+       spinlock_t bufpool_lock;
+       struct i40iw_puda_buf *alloclist;
+-      void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+-      void (*xmit_complete)(struct i40iw_sc_dev *, void *);
++      void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
++      void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
+       /* puda stats */
+       u64 stats_buf_alloc_fail;
+       u64 stats_pkt_rcvd;
+@@ -160,14 +163,13 @@ void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
+                        struct i40iw_puda_buf *buf);
+ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+                                      struct i40iw_puda_send_info *info);
+-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
++enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
+                                             struct i40iw_puda_rsrc_info *info);
+-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
++void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
+                              enum puda_resource_type type,
+                              bool reset);
+ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+                                                 struct i40iw_sc_cq *cq, u32 *compl_err);
+-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+ struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+                                    struct i40iw_puda_buf *buf);
+@@ -180,4 +182,8 @@ enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+ void i40iw_free_hash_desc(struct shash_desc *desc);
+ void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
+                                u32 seqnum);
++enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
++enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
++void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
++void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+ #endif
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index 928d91b..f3f8e9c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -61,7 +61,7 @@ struct i40iw_cq_shadow_area {
+ struct i40iw_sc_dev;
+ struct i40iw_hmc_info;
+-struct i40iw_dev_pestat;
++struct i40iw_vsi_pestat;
+ struct i40iw_cqp_ops;
+ struct i40iw_ccq_ops;
+@@ -191,7 +191,7 @@ enum i40iw_debug_flag {
+       I40IW_DEBUG_ALL         = 0xFFFFFFFF
+ };
+-enum i40iw_hw_stat_index_32b {
++enum i40iw_hw_stats_index_32b {
+       I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
+       I40IW_HW_STAT_INDEX_IP4RXTRUNC,
+       I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
+@@ -204,7 +204,7 @@ enum i40iw_hw_stat_index_32b {
+       I40IW_HW_STAT_INDEX_MAX_32
+ };
+-enum i40iw_hw_stat_index_64b {
++enum i40iw_hw_stats_index_64b {
+       I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
+       I40IW_HW_STAT_INDEX_IP4RXPKTS,
+       I40IW_HW_STAT_INDEX_IP4RXFRAGS,
+@@ -234,32 +234,23 @@ enum i40iw_hw_stat_index_64b {
+       I40IW_HW_STAT_INDEX_MAX_64
+ };
+-struct i40iw_dev_hw_stat_offsets {
+-      u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+-      u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
++struct i40iw_dev_hw_stats_offsets {
++      u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
++      u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+ };
+ struct i40iw_dev_hw_stats {
+-      u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+-      u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
++      u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
++      u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
+ };
+-struct i40iw_device_pestat_ops {
+-      void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
+-      void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
+-      void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
+-      void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
+-      void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+-};
+-
+-struct i40iw_dev_pestat {
++struct i40iw_vsi_pestat {
+       struct i40iw_hw *hw;
+-      struct i40iw_device_pestat_ops ops;
+       struct i40iw_dev_hw_stats hw_stats;
+       struct i40iw_dev_hw_stats last_read_hw_stats;
+-      struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
++      struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
+       struct timer_list stats_timer;
+-      spinlock_t stats_lock; /* rdma stats lock */
++      spinlock_t lock; /* rdma stats lock */
+ };
+ struct i40iw_hw {
+@@ -355,6 +346,7 @@ struct i40iw_sc_cq {
+       u64 cq_pa;
+       u64 shadow_area_pa;
+       struct i40iw_sc_dev *dev;
++      struct i40iw_sc_vsi *vsi;
+       void *pbl_list;
+       void *back_cq;
+       u32 ceq_id;
+@@ -378,6 +370,7 @@ struct i40iw_sc_qp {
+       u64 shadow_area_pa;
+       u64 q2_pa;
+       struct i40iw_sc_dev *dev;
++      struct i40iw_sc_vsi *vsi;
+       struct i40iw_sc_pd *pd;
+       u64 *hw_host_ctx;
+       void *llp_stream_handle;
+@@ -441,7 +434,7 @@ struct i40iw_qos {
+ struct i40iw_vfdev {
+       struct i40iw_sc_dev *pf_dev;
+       u8 *hmc_info_mem;
+-      struct i40iw_dev_pestat dev_pestat;
++      struct i40iw_vsi_pestat pestat;
+       struct i40iw_hmc_pble_info *pble_info;
+       struct i40iw_hmc_info hmc_info;
+       struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
+@@ -455,11 +448,28 @@ struct i40iw_vfdev {
+       bool stats_initialized;
+ };
++#define I40IW_INVALID_FCN_ID 0xff
++struct i40iw_sc_vsi {
++      struct i40iw_sc_dev *dev;
++      void *back_vsi; /* Owned by OS */
++      u32 ilq_count;
++      struct i40iw_virt_mem ilq_mem;
++      struct i40iw_puda_rsrc *ilq;
++      u32 ieq_count;
++      struct i40iw_virt_mem ieq_mem;
++      struct i40iw_puda_rsrc *ieq;
++      u16 mss;
++      u8 fcn_id;
++      bool stats_fcn_id_alloc;
++      struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
++      struct i40iw_vsi_pestat *pestat;
++};
++
+ struct i40iw_sc_dev {
+       struct list_head cqp_cmd_head;  /* head of the CQP command list */
+       spinlock_t cqp_lock; /* cqp list sync */
+       struct i40iw_dev_uk dev_uk;
+-      struct i40iw_dev_pestat dev_pestat;
++      bool fcn_id_array[I40IW_MAX_STATS_COUNT];
+       struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
+       u64 fpm_query_buf_pa;
+       u64 fpm_commit_buf_pa;
+@@ -486,18 +496,9 @@ struct i40iw_sc_dev {
+       struct i40iw_cqp_misc_ops *cqp_misc_ops;
+       struct i40iw_hmc_ops *hmc_ops;
+       struct i40iw_vchnl_if vchnl_if;
+-      u32 ilq_count;
+-      struct i40iw_virt_mem ilq_mem;
+-      struct i40iw_puda_rsrc *ilq;
+-      u32 ieq_count;
+-      struct i40iw_virt_mem ieq_mem;
+-      struct i40iw_puda_rsrc *ieq;
+-
+       const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+       struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+-      struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
+-      u16 mss;
+       u32 debug_mask;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+@@ -571,6 +572,19 @@ struct i40iw_l2params {
+       u16 mss;
+ };
++struct i40iw_vsi_init_info {
++      struct i40iw_sc_dev *dev;
++      void  *back_vsi;
++      struct i40iw_l2params *params;
++};
++
++struct i40iw_vsi_stats_info {
++      struct i40iw_vsi_pestat *pestat;
++      u8 fcn_id;
++      bool alloc_fcn_id;
++      bool stats_initialize;
++};
++
+ struct i40iw_device_init_info {
+       u64 fpm_query_buf_pa;
+       u64 fpm_commit_buf_pa;
+@@ -579,7 +593,6 @@ struct i40iw_device_init_info {
+       struct i40iw_hw *hw;
+       void __iomem *bar0;
+       enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
+-      struct i40iw_l2params l2params;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+       bool is_pf;
+@@ -831,6 +844,7 @@ struct i40iw_register_shared_stag {
+ struct i40iw_qp_init_info {
+       struct i40iw_qp_uk_init_info qp_uk_init_info;
+       struct i40iw_sc_pd *pd;
++      struct i40iw_sc_vsi *vsi;
+       u64 *host_ctx;
+       u8 *q2;
+       u64 sq_pa;
+@@ -897,6 +911,7 @@ enum i40iw_quad_hash_manage_type {
+ };
+ struct i40iw_qhash_table_info {
++      struct i40iw_sc_vsi *vsi;
+       enum i40iw_quad_hash_manage_type manage;
+       enum i40iw_quad_entry_type entry_type;
+       bool vlan_valid;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 7d4af77..0f5d43d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -761,7 +761,7 @@ void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+       memset(&info, 0, sizeof(info));
+       info.mss_change = true;
+-      info.new_mss = dev->mss;
++      info.new_mss = qp->vsi->mss;
+       i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
+ }
+@@ -1068,6 +1068,116 @@ enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
+ }
+ /**
++ * i40iw_cqp_cq_create_cmd - create a cq for the cqp
++ * @dev: device pointer
++ * @cq: pointer to created cq
++ */
++enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
++                                             struct i40iw_sc_cq *cq)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++      struct i40iw_cqp *iwcqp = &iwdev->cqp;
++      struct i40iw_cqp_request *cqp_request;
++      struct cqp_commands_info *cqp_info;
++      enum i40iw_status_code status;
++
++      cqp_request = i40iw_get_cqp_request(iwcqp, true);
++      if (!cqp_request)
++              return I40IW_ERR_NO_MEMORY;
++
++      cqp_info = &cqp_request->info;
++      cqp_info->cqp_cmd = OP_CQ_CREATE;
++      cqp_info->post_sq = 1;
++      cqp_info->in.u.cq_create.cq = cq;
++      cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
++      status = i40iw_handle_cqp_op(iwdev, cqp_request);
++      if (status)
++              i40iw_pr_err("CQP-OP Create QP fail");
++
++      return status;
++}
++
++/**
++ * i40iw_cqp_qp_create_cmd - create a qp for the cqp
++ * @dev: device pointer
++ * @qp: pointer to created qp
++ */
++enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
++                                             struct i40iw_sc_qp *qp)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++      struct i40iw_cqp *iwcqp = &iwdev->cqp;
++      struct i40iw_cqp_request *cqp_request;
++      struct cqp_commands_info *cqp_info;
++      struct i40iw_create_qp_info *qp_info;
++      enum i40iw_status_code status;
++
++      cqp_request = i40iw_get_cqp_request(iwcqp, true);
++      if (!cqp_request)
++              return I40IW_ERR_NO_MEMORY;
++
++      cqp_info = &cqp_request->info;
++      qp_info = &cqp_request->info.in.u.qp_create.info;
++
++      memset(qp_info, 0, sizeof(*qp_info));
++
++      qp_info->cq_num_valid = true;
++      qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
++
++      cqp_info->cqp_cmd = OP_QP_CREATE;
++      cqp_info->post_sq = 1;
++      cqp_info->in.u.qp_create.qp = qp;
++      cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
++      status = i40iw_handle_cqp_op(iwdev, cqp_request);
++      if (status)
++              i40iw_pr_err("CQP-OP QP create fail");
++      return status;
++}
++
++/**
++ * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
++ * @dev: device pointer
++ * @cq: pointer to cq
++ */
++void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++
++      i40iw_cq_wq_destroy(iwdev, cq);
++}
++
++/**
++ * i40iw_cqp_qp_destroy_cmd - destroy the cqp
++ * @dev: device pointer
++ * @qp: pointer to qp
++ */
++void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
++{
++      struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
++      struct i40iw_cqp *iwcqp = &iwdev->cqp;
++      struct i40iw_cqp_request *cqp_request;
++      struct cqp_commands_info *cqp_info;
++      enum i40iw_status_code status;
++
++      cqp_request = i40iw_get_cqp_request(iwcqp, true);
++      if (!cqp_request)
++              return;
++
++      cqp_info = &cqp_request->info;
++      memset(cqp_info, 0, sizeof(*cqp_info));
++
++      cqp_info->cqp_cmd = OP_QP_DESTROY;
++      cqp_info->post_sq = 1;
++      cqp_info->in.u.qp_destroy.qp = qp;
++      cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
++      cqp_info->in.u.qp_destroy.remove_hash_idx = true;
++      status = i40iw_handle_cqp_op(iwdev, cqp_request);
++      if (status)
++              i40iw_pr_err("CQP QP_DESTROY fail");
++}
++
++
++/**
+  * i40iw_ieq_mpa_crc_ae - generate AE for crc error
+  * @dev: hardware control device structure
+  * @qp: hardware control qp
+@@ -1281,27 +1391,29 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
+ /**
+  * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
+- * @dev: hardware control device structure
++ * @vsi: pointer to the vsi structure
+  */
+-static void i40iw_hw_stats_timeout(unsigned long dev)
++static void i40iw_hw_stats_timeout(unsigned long vsi)
+ {
+-      struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
+-      struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
+-      struct i40iw_dev_pestat *vf_devstat = NULL;
++      struct i40iw_sc_vsi *sc_vsi =  (struct i40iw_sc_vsi *)vsi;
++      struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
++      struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
++      struct i40iw_vsi_pestat *vf_devstat = NULL;
+       u16 iw_vf_idx;
+       unsigned long flags;
+       /*PF*/
+-      pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
++      i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
++
+       for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+-              spin_lock_irqsave(&pf_devstat->stats_lock, flags);
++              spin_lock_irqsave(&pf_devstat->lock, flags);
+               if (pf_dev->vf_dev[iw_vf_idx]) {
+                       if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
+-                              vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
+-                              vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
++                              vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
++                              i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
+                       }
+               }
+-              spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
++              spin_unlock_irqrestore(&pf_devstat->lock, flags);
+       }
+       mod_timer(&pf_devstat->stats_timer,
+@@ -1310,26 +1422,26 @@ static void i40iw_hw_stats_timeout(unsigned long dev)
+ /**
+  * i40iw_hw_stats_start_timer - Start periodic stats timer
+- * @dev: hardware control device structure
++ * @vsi: pointer to the vsi structure
+  */
+-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
++void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
+ {
+-      struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
++      struct i40iw_vsi_pestat *devstat = vsi->pestat;
+       init_timer(&devstat->stats_timer);
+       devstat->stats_timer.function = i40iw_hw_stats_timeout;
+-      devstat->stats_timer.data = (unsigned long)dev;
++      devstat->stats_timer.data = (unsigned long)vsi;
+       mod_timer(&devstat->stats_timer,
+                 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+ }
+ /**
+- * i40iw_hw_stats_del_timer - Delete periodic stats timer
+- * @dev: hardware control device structure
++ * i40iw_hw_stats_stop_timer - Delete periodic stats timer
++ * @vsi: pointer to the vsi structure
+  */
+-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
++void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
+ {
+-      struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
++      struct i40iw_vsi_pestat *devstat = vsi->pestat;
+       del_timer_sync(&devstat->stats_timer);
+ }
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 18526e6..855e499 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -623,6 +623,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+       sq_size = init_attr->cap.max_send_wr;
+       rq_size = init_attr->cap.max_recv_wr;
++      init_info.vsi = &iwdev->vsi;
+       init_info.qp_uk_init_info.sq_size = sq_size;
+       init_info.qp_uk_init_info.rq_size = rq_size;
+       init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+@@ -1052,11 +1053,11 @@ static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
+ }
+ /**
+- * cq_wq_destroy - send cq destroy cqp
++ * i40iw_cq_wq_destroy - send cq destroy cqp
+  * @iwdev: iwarp device
+  * @cq: hardware control cq
+  */
+-static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
++void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+ {
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+@@ -1095,7 +1096,7 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+       iwcq = to_iwcq(ib_cq);
+       iwdev = to_iwdev(ib_cq->device);
+       cq = &iwcq->sc_cq;
+-      cq_wq_destroy(iwdev, cq);
++      i40iw_cq_wq_destroy(iwdev, cq);
+       cq_free_resources(iwdev, iwcq);
+       kfree(iwcq);
+       i40iw_rem_devusecount(iwdev);
+@@ -1253,7 +1254,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+       return (struct ib_cq *)iwcq;
+ cq_destroy:
+-      cq_wq_destroy(iwdev, cq);
++      i40iw_cq_wq_destroy(iwdev, cq);
+ cq_free_resources:
+       cq_free_resources(iwdev, iwcq);
+ error:
+@@ -2632,15 +2633,11 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
+ {
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+-      struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
++      struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
+       struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+-      unsigned long flags;
+       if (dev->is_pf) {
+-              spin_lock_irqsave(&devstat->stats_lock, flags);
+-              devstat->ops.iw_hw_stat_read_all(devstat,
+-                      &devstat->hw_stats);
+-              spin_unlock_irqrestore(&devstat->stats_lock, flags);
++              i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
+       } else {
+               if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
+                       return -ENOSYS;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+index dbd39c4..f4d1368 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+@@ -403,6 +403,19 @@ static void pf_del_hmc_obj_callback(void *work_vf_dev)
+ }
+ /**
++ * i40iw_vf_init_pestat - Initialize stats for VF
++ * @devL pointer to the VF Device
++ * @stats: Statistics structure pointer
++ * @index: Stats index
++ */
++static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
++{
++      stats->hw = dev->hw;
++      i40iw_hw_stats_init(stats, (u8)index, false);
++      spin_lock_init(&stats->lock);
++}
++
++/**
+  * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
+  * @dev: IWARP device pointer
+  * @vf_id: Virtual function ID associated with the message
+@@ -421,9 +434,8 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+       u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
+       struct i40iw_virt_mem vf_dev_mem;
+       struct i40iw_virtchnl_work_info work_info;
+-      struct i40iw_dev_pestat *devstat;
++      struct i40iw_vsi_pestat *stats;
+       enum i40iw_status_code ret_code;
+-      unsigned long flags;
+       if (!dev || !msg || !len)
+               return I40IW_ERR_PARAM;
+@@ -496,10 +508,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "VF%u error CQP HMC Function operation.\n",
+                                           vf_id);
+-                      i40iw_device_init_pestat(&vf_dev->dev_pestat);
+-                      vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
+-                                                            (u8)vf_dev->pmf_index,
+-                                                            dev->hw, false);
++                      i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
+                       vf_dev->stats_initialized = true;
+               } else {
+                       if (vf_dev) {
+@@ -530,12 +539,10 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+       case I40IW_VCHNL_OP_GET_STATS:
+               if (!vf_dev)
+                       return I40IW_ERR_BAD_PTR;
+-              devstat = &vf_dev->dev_pestat;
+-              spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+-              devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
+-              spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
++              stats = &vf_dev->pestat;
++              i40iw_hw_stats_read_all(stats, &stats->hw_stats);
+               vf_dev->msg_count--;
+-              vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
++              vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
+               break;
+       default:
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+-- 
+1.8.3.1
+
diff --git a/linux-next-cherry-picks/0051-i40iw-Set-128B-as-the-only-supported-RQ-WQE-size.patch b/linux-next-cherry-picks/0051-i40iw-Set-128B-as-the-only-supported-RQ-WQE-size.patch
new file mode 100755 (executable)
index 0000000..7d7e3ab
--- /dev/null
@@ -0,0 +1,303 @@
+From 61f51b7b20f631ef8fe744bc0412d4eb5194b6a9 Mon Sep 17 00:00:00 2001
+From: Chien Tin Tung <chien.tin.tung@intel.com>
+Date: Wed, 21 Dec 2016 08:53:46 -0600
+Subject: [PATCH 51/52] i40iw: Set 128B as the only supported RQ WQE size
+
+RQ WQE size other than 128B is not supported.  Correct
+RQ size calculation to use 128B only.
+
+Since this breaks ABI, add additional code to
+provide compatibility with v4 user provider, libi40iw.
+
+Signed-off-by: Chien Tin Tung <chien.tin.tung@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c     | 25 ++++++++++++++++++++-----
+ drivers/infiniband/hw/i40iw/i40iw_puda.c     |  2 +-
+ drivers/infiniband/hw/i40iw/i40iw_type.h     |  4 +++-
+ drivers/infiniband/hw/i40iw/i40iw_ucontext.h |  4 ++--
+ drivers/infiniband/hw/i40iw/i40iw_uk.c       | 17 ++++++++++++-----
+ drivers/infiniband/hw/i40iw/i40iw_user.h     |  4 +++-
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c    | 21 +++++++++++----------
+ drivers/infiniband/hw/i40iw/i40iw_verbs.h    |  1 +
+ 8 files changed, 53 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 392f783..98923a8 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -358,13 +358,16 @@ void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
+  * @dev: sc device struct
+  * @pd: sc pd ptr
+  * @pd_id: pd_id for allocated pd
++ * @abi_ver: ABI version from user context, -1 if not valid
+  */
+ static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
+                            struct i40iw_sc_pd *pd,
+-                           u16 pd_id)
++                           u16 pd_id,
++                           int abi_ver)
+ {
+       pd->size = sizeof(*pd);
+       pd->pd_id = pd_id;
++      pd->abi_ver = abi_ver;
+       pd->dev = dev;
+ }
+@@ -2252,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+                                             offset);
+       info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
++      info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
+       ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
+       if (ret_code)
+               return ret_code;
+@@ -2270,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+                                                   false);
+       i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+                   __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+-      ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+-                                             &wqe_size);
+-      if (ret_code)
+-              return ret_code;
++
++      switch (qp->pd->abi_ver) {
++      case 4:
++              ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
++                                                     &wqe_size);
++              if (ret_code)
++                      return ret_code;
++              break;
++      case 5: /* fallthrough until next ABI version */
++      default:
++              if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
++                      return I40IW_ERR_INVALID_FRAG_COUNT;
++              wqe_size = I40IW_MAX_WQE_SIZE_RQ;
++              break;
++      }
+       qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
+                               (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
+       i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 449ba8c..db41ab4 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -930,7 +930,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
+       INIT_LIST_HEAD(&rsrc->txpend);
+       rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+-      dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
++      dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
+       rsrc->qp_id = info->qp_id;
+       rsrc->cq_id = info->cq_id;
+       rsrc->sq_size = info->sq_size;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index f3f8e9c..7b76259 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -280,6 +280,7 @@ struct i40iw_sc_pd {
+       u32 size;
+       struct i40iw_sc_dev *dev;
+       u16 pd_id;
++      int abi_ver;
+ };
+ struct i40iw_cqp_quanta {
+@@ -852,6 +853,7 @@ struct i40iw_qp_init_info {
+       u64 host_ctx_pa;
+       u64 q2_pa;
+       u64 shadow_area_pa;
++      int abi_ver;
+       u8 sq_tph_val;
+       u8 rq_tph_val;
+       u8 type;
+@@ -1051,7 +1053,7 @@ struct i40iw_aeq_ops {
+ };
+ struct i40iw_pd_ops {
+-      void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
++      void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
+ };
+ struct i40iw_priv_qp_ops {
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
+index 12acd68..57d3f1d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
+@@ -39,8 +39,8 @@
+ #include <linux/types.h>
+-#define I40IW_ABI_USERSPACE_VER 4
+-#define I40IW_ABI_KERNEL_VER    4
++#define I40IW_ABI_VER 5
++
+ struct i40iw_alloc_ucontext_req {
+       __u32 reserved32;
+       __u8 userspace_ver;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 4376cd6..2800f79 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -966,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+       if (ret_code)
+               return ret_code;
+-      ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+-      if (ret_code)
+-              return ret_code;
+-
+       qp->sq_base = info->sq;
+       qp->rq_base = info->rq;
+       qp->shadow_area = info->shadow_area;
+@@ -998,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+       if (!qp->use_srq) {
+               qp->rq_size = info->rq_size;
+               qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+-              qp->rq_wqe_size = rqshift;
+               I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
++              switch (info->abi_ver) {
++              case 4:
++                      ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
++                      if (ret_code)
++                              return ret_code;
++                      break;
++              case 5: /* fallthrough until next ABI version */
++              default:
++                      rqshift = I40IW_MAX_RQ_WQE_SHIFT;
++                      break;
++              }
++              qp->rq_wqe_size = rqshift;
+               qp->rq_wqe_size_multiplier = 4 << rqshift;
+       }
+       qp->ops = iw_qp_uk_ops;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
+index 80d9f46..84be6f1 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
+@@ -76,6 +76,7 @@ enum i40iw_device_capabilities_const {
+       I40IW_MAX_ORD_SIZE =                    127,
+       I40IW_MAX_WQ_ENTRIES =                  2048,
+       I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
++      I40IW_MAX_WQE_SIZE_RQ =                 128,
+       I40IW_QP_CTX_SIZE =                     248,
+       I40IW_MAX_PDS =                         32768
+ };
+@@ -97,6 +98,7 @@ enum i40iw_device_capabilities_const {
+ #define i40iw_address_list u64 *
+ #define       I40IW_MAX_MR_SIZE       0x10000000000L
++#define       I40IW_MAX_RQ_WQE_SHIFT  2
+ struct i40iw_qp_uk;
+ struct i40iw_cq_uk;
+@@ -405,7 +407,7 @@ struct i40iw_qp_uk_init_info {
+       u32 max_sq_frag_cnt;
+       u32 max_rq_frag_cnt;
+       u32 max_inline_data;
+-
++      int abi_ver;
+ };
+ struct i40iw_cq_uk_init_info {
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 7368a50..29e97df 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -145,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
+       if (ib_copy_from_udata(&req, udata, sizeof(req)))
+               return ERR_PTR(-EINVAL);
+-      if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
+-              i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
+-                           req.userspace_ver, I40IW_ABI_USERSPACE_VER);
++      if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
++              i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
+               return ERR_PTR(-EINVAL);
+       }
+@@ -155,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
+       uresp.max_qps = iwdev->max_qp;
+       uresp.max_pds = iwdev->max_pd;
+       uresp.wq_size = iwdev->max_qp_wr * 2;
+-      uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
++      uresp.kernel_ver = req.userspace_ver;
+       ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
+       if (!ucontext)
+               return ERR_PTR(-ENOMEM);
+       ucontext->iwdev = iwdev;
++      ucontext->abi_ver = req.userspace_ver;
+       if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+               kfree(ucontext);
+@@ -333,6 +333,7 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_alloc_pd_resp uresp;
+       struct i40iw_sc_pd *sc_pd;
++      struct i40iw_ucontext *ucontext;
+       u32 pd_id = 0;
+       int err;
+@@ -353,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+       }
+       sc_pd = &iwpd->sc_pd;
+-      dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
+       if (context) {
++              ucontext = to_ucontext(context);
++              dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
+               memset(&uresp, 0, sizeof(uresp));
+               uresp.pd_id = pd_id;
+               if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+                       err = -EFAULT;
+                       goto error;
+               }
++      } else {
++              dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
+       }
+       i40iw_add_pdusecount(iwpd);
+@@ -518,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
+       struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
+       u32 sqdepth, rqdepth;
+       u32 sq_size, rq_size;
+-      u8 sqshift, rqshift;
++      u8 sqshift;
+       u32 size;
+       enum i40iw_status_code status;
+       struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+@@ -527,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
+       rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
+       status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+-      if (!status)
+-              status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
+-
+       if (status)
+               return -ENOMEM;
+       sqdepth = sq_size << sqshift;
+-      rqdepth = rq_size << rqshift;
++      rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
+       size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
+       iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+index 6549c93..07c3fec 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+@@ -42,6 +42,7 @@ struct i40iw_ucontext {
+       spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
+       struct list_head qp_reg_mem_list;
+       spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
++      int abi_ver;
+ };
+ struct i40iw_pd {
+-- 
+1.8.3.1
+