--- /dev/null
+From c0c643e16f9b00332cbbf3954556652dfa4ed5a3 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Wed, 10 May 2017 23:32:14 -0500
+Subject: [PATCH 03583/13040] RDMA/i40iw: Fix device initialization error path
+
+Some error paths in i40iw_initialize_dev are doing
+additional and unnecessary work before exiting.
+Correctly free resources allocated prior to error
+and return with correct status code.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intelcom>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 2728af3..a3f18a2 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+ if (status)
+- goto exit;
++ goto error;
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
+ if (status)
+- goto exit;
++ goto error;
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+ info.hmc_fn_id = ldev->fid;
+@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+ info.exception_lan_queue = 1;
+ info.vchnl_send = i40iw_virtchnl_send;
+ status = i40iw_device_init(&iwdev->sc_dev, &info);
+-exit:
+- if (status) {
+- kfree(iwdev->hmc_info_mem);
+- iwdev->hmc_info_mem = NULL;
+- }
++
++ if (status)
++ goto error;
+ memset(&vsi_info, 0, sizeof(vsi_info));
+ vsi_info.dev = &iwdev->sc_dev;
+ vsi_info.back_vsi = (void *)iwdev;
+@@ -1362,11 +1360,19 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+ memset(&stats_info, 0, sizeof(stats_info));
+ stats_info.fcn_id = ldev->fid;
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
++ if (!stats_info.pestat) {
++ status = I40IW_ERR_NO_MEMORY;
++ goto error;
++ }
+ stats_info.stats_initialize = true;
+ if (stats_info.pestat)
+ i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
+ }
+ return status;
++error:
++ kfree(iwdev->hmc_info_mem);
++ iwdev->hmc_info_mem = NULL;
++ return status;
+ }
+
+ /**
+--
+2.1.3
+
--- /dev/null
+From f300ba2d1ef1cb8411daa5e1ae44acfa7b88236c Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 19 May 2017 16:14:02 -0500
+Subject: [PATCH 03584/13040] RDMA/i40iw: Remove MSS change support
+
+MSS change on active QPs is not supported. Store new MSS
+value for new QPs only. Remove code to modify MSS on the fly.
+This also resolves a crash on QP modify to QP 0.
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
+IP: i40iw_sc_qp_modify+0x22/0x280 [i40iw]
+Oops: 0000 [#1] SMP KASAN
+CPU: 2 PID: 1236 Comm: kworker/u16:4 Not tainted 4.12.0-rc1 #5
+Hardware name: Gigabyte Technology Co., Ltd. To be filled by O.E.M./Q87M-D2H,
+BIOS F7 01/17/2014
+Workqueue: l2params i40iw_l2params_worker [i40iw]
+task: ffff88070f5a9b40 task.stack: ffff88070f5a0000
+RIP: 0010:i40iw_sc_qp_modify+0x22/0x280 [i40iw]
+...
+Call Trace:
+i40iw_exec_cqp_cmd+0x2ce/0x410 [i40iw]
+? _raw_spin_lock_irqsave+0x6f/0x80
+? i40iw_process_cqp_cmd+0x1d/0x80 [i40iw]
+i40iw_process_cqp_cmd+0x7c/0x80 [i40iw]
+i40iw_handle_cqp_op+0x2f/0x200 [i40iw]
+? trace_hardirqs_off+0xd/0x10
+? _raw_spin_unlock_irqrestore+0x46/0x50
+i40iw_hw_modify_qp+0x5e/0x90 [i40iw]
+i40iw_qp_mss_modify+0x52/0x60 [i40iw]
+i40iw_change_l2params+0x145/0x160 [i40iw]
+i40iw_l2params_worker+0x1f/0x40 [i40iw]
+process_one_work+0x1f5/0x650
+? process_one_work+0x161/0x650
+worker_thread+0x48/0x3b0
+kthread+0x112/0x150
+? process_one_work+0x650/0x650
+? kthread_create_on_node+0x40/0x40
+ret_from_fork+0x2e/0x40
+Code: 2e 0f 1f 84 00 00 00 00 00 55 48 89 e5 41 56 41 55 41 89 cd 41 54 49 89 fc
+53 48 89 f3 48 89 d6 48 83 ec 08 48 8b 87 10 01 00 00 <48> 8b 40 08 4c 8b b0 40 04
+00 00 4c 89 f7 e8 1b e5 ff ff 48 85
+RIP: i40iw_sc_qp_modify+0x22/0x280 [i40iw] RSP: ffff88070f5a7c28
+CR2: 0000000000000008
+---[ end trace 77a405931e296060 ]---
+
+Reported-by: Stefan Assmann <sassmann@redhat.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 12 +-----------
+ drivers/infiniband/hw/i40iw/i40iw_osdep.h | 1 -
+ drivers/infiniband/hw/i40iw/i40iw_type.h | 2 --
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 17 -----------------
+ 4 files changed, 1 insertion(+), 31 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index f82483b..a027e20 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
+ struct i40iw_sc_dev *dev = vsi->dev;
+ struct i40iw_sc_qp *qp = NULL;
+ bool qs_handle_change = false;
+- bool mss_change = false;
+ unsigned long flags;
+ u16 qs_handle;
+ int i;
+
+- if (vsi->mss != l2params->mss) {
+- mss_change = true;
+- vsi->mss = l2params->mss;
+- }
++ vsi->mss = l2params->mss;
+
+ i40iw_fill_qos_list(l2params->qs_handle_list);
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ qs_handle = l2params->qs_handle_list[i];
+ if (vsi->qos[i].qs_handle != qs_handle)
+ qs_handle_change = true;
+- else if (!mss_change)
+- continue; /* no MSS nor qs handle change */
+ spin_lock_irqsave(&vsi->qos[i].lock, flags);
+ qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+ while (qp) {
+- if (mss_change)
+- i40iw_qp_mss_modify(dev, qp);
+ if (qs_handle_change) {
+ qp->qs_handle = qs_handle;
+ /* issue cqp suspend command */
+@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
+
+ set_64bit_val(wqe,
+ 8,
+- LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
+ LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
+ LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+- LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
+ LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+ LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+index aa66c1c..f27be3e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
+ void *i40iw_remove_head(struct list_head *list);
+ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
+-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+
+ void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
+ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index 7b76259..959ec81 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
+ struct i40iw_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+- u16 new_mss;
+ u8 next_iwarp_state;
+ u8 termlen;
+ bool ord_valid;
+@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
+ bool dont_send_term;
+ bool dont_send_fin;
+ bool cached_var_valid;
+- bool mss_change;
+ bool force_loopback;
+ };
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 409a378..56d9869 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
+ }
+
+ /**
+- * i40iw_qp_mss_modify - modify mss for qp
+- * @dev: hardware control device structure
+- * @qp: hardware control qp
+- */
+-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+-{
+- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+- struct i40iw_modify_qp_info info;
+-
+- memset(&info, 0, sizeof(info));
+- info.mss_change = true;
+- info.new_mss = qp->vsi->mss;
+- i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
+-}
+-
+-/**
+ * i40iw_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+--
+2.1.3
+
--- /dev/null
+From e80bd98d1ff011beec872a8ebbb73930507d6a13 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+Date: Thu, 18 May 2017 13:11:17 -0500
+Subject: [PATCH 03585/13040] RDMA/i40iw: fix duplicated code for different
+ branches
+
+Refactor code to avoid identical code for different branches.
+
+Addresses-Coverity-ID: 1357356
+Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_virtchnl.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+index f4d1368..48fd327 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+ if (!dev->vchnl_up)
+ return I40IW_ERR_NOT_READY;
+ if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
+- if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
+- vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+- else
+- vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
++ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ return I40IW_SUCCESS;
+ }
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+--
+2.1.3
+
--- /dev/null
+From 73b976954360b4b37570ab001e85d8dde0c345b7 Mon Sep 17 00:00:00 2001
+From: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
+Date: Mon, 15 Aug 2016 23:40:09 +0530
+Subject: [PATCH] i40iw_main: Remove deprecated create_singlethread_workqueue
+
+alloc_ordered_workqueue() with WQ_MEM_RECLAIM set, replaces
+deprecated create_singlethread_workqueue(). This is the identity
+conversion.
+
+The workqueue "virtchnl_wq" queues work items i40iw_cqp_generic_worker
+and i40iw_cqp_manage_hmc_fcn_worker. It has been identity converted.
+
+WQ_MEM_RECLAIM has been set to ensure forward progress under memory
+pressure.
+
+Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 6e90813..798335f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1613,7 +1613,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+ status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+ if (status)
+ break;
+- iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
++ iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
+ i40iw_register_notifiers();
+ iwdev->init_state = INET_NOTIFIER;
+ status = i40iw_add_mac_ip(iwdev);
+--
+2.1.3
+
--- /dev/null
+From be8822db62ddda6d316d2dd682679732ed2f0abf Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Fri, 23 Jun 2017 16:03:55 -0500
+Subject: [PATCH 12647/13040] i40iw: Fix order of cleanup in close
+
+The order for calling i40iw_destroy_pble_pool is incorrect.
+Also, add PBLE_CHUNK_MEM init state to track pble pool
+creation and destruction.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index e0f47cc..8fc61b3 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1474,6 +1474,9 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+ unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ }
+ /* fallthrough */
++ case PBLE_CHUNK_MEM:
++ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
++ /* fallthrough */
+ case CEQ_CREATED:
+ i40iw_dele_ceqs(iwdev, reset);
+ /* fallthrough */
+@@ -1489,9 +1492,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+ case CCQ_CREATED:
+ i40iw_destroy_ccq(iwdev, reset);
+ /* fallthrough */
+- case PBLE_CHUNK_MEM:
+- i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+- /* fallthrough */
+ case HMC_OBJS_CREATED:
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ /* fallthrough */
+@@ -1670,6 +1670,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+ status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+ if (status)
+ break;
++ iwdev->init_state = PBLE_CHUNK_MEM;
+ iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
+ i40iw_register_notifiers();
+ iwdev->init_state = INET_NOTIFIER;
+--
+2.1.3
+
--- /dev/null
+From 415920aa174666c0ac8c47eee974acc9f49efec4 Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Fri, 23 Jun 2017 16:03:56 -0500
+Subject: [PATCH 12648/13040] i40iw: Do not poll CCQ after it is destroyed
+
+Control Queue Pair (CQP) OPs, in this case - Update SDs,
+cannot poll the Control Completion Queue (CCQ) after CCQ is
+destroyed. Instead, poll via registers.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index a027e20..9ec1ae9 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+ }
+
++ cqp->process_cqp_sds = i40iw_update_sds_noccq;
++
+ return ret_code;
+ }
+
+--
+2.1.3
+
--- /dev/null
+From 6c1d94de4e75160d3ea5af3bf51d290341db1d44 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 23 Jun 2017 16:03:57 -0500
+Subject: [PATCH 12649/13040] i40iw: Utilize iwdev->reset during PCI function
+ reset
+
+Utilize iwdev->reset on a PCI function reset notification
+instead of passing in reset flag for resource clean-up.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 51 +++++++++++++++-----------------
+ 1 file changed, 24 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 8fc61b3..3bad7d9 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -274,13 +274,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
+ /**
+ * i40iw_destroy_aeq - destroy aeq
+ * @iwdev: iwarp device
+- * @reset: true if called before reset
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+-static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
++static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
+ {
+ enum i40iw_status_code status = I40IW_ERR_NOT_READY;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+@@ -288,7 +287,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+
+ if (!iwdev->msix_shared)
+ i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
+- if (reset)
++ if (iwdev->reset)
+ goto exit;
+
+ if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
+@@ -304,19 +303,17 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+ * i40iw_destroy_ceq - destroy ceq
+ * @iwdev: iwarp device
+ * @iwceq: ceq to be destroyed
+- * @reset: true if called before reset
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+ static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
+- struct i40iw_ceq *iwceq,
+- bool reset)
++ struct i40iw_ceq *iwceq)
+ {
+ enum i40iw_status_code status;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+- if (reset)
++ if (iwdev->reset)
+ goto exit;
+
+ status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
+@@ -335,12 +332,11 @@ static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
+ /**
+ * i40iw_dele_ceqs - destroy all ceq's
+ * @iwdev: iwarp device
+- * @reset: true if called before reset
+ *
+ * Go through all of the device ceq's and for each ceq
+ * disable the ceq interrupt and destroy the ceq
+ */
+-static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
++static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
+ {
+ u32 i = 0;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+@@ -349,32 +345,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+
+ if (iwdev->msix_shared) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
+- i40iw_destroy_ceq(iwdev, iwceq, reset);
++ i40iw_destroy_ceq(iwdev, iwceq);
+ iwceq++;
+ i++;
+ }
+
+ for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
+- i40iw_destroy_ceq(iwdev, iwceq, reset);
++ i40iw_destroy_ceq(iwdev, iwceq);
+ }
+ }
+
+ /**
+ * i40iw_destroy_ccq - destroy control cq
+ * @iwdev: iwarp device
+- * @reset: true if called before reset
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+-static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
++static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
+ {
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_ccq *ccq = &iwdev->ccq;
+ enum i40iw_status_code status = 0;
+
+- if (!reset)
++ if (!iwdev->reset)
+ status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ i40iw_pr_err("ccq destroy failed %d\n", status);
+@@ -810,7 +805,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
+ iwceq->msix_idx = msix_vec->idx;
+ status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
+ if (status) {
+- i40iw_destroy_ceq(iwdev, iwceq, false);
++ i40iw_destroy_ceq(iwdev, iwceq);
+ break;
+ }
+ i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
+@@ -912,7 +907,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
+
+ status = i40iw_configure_aeq_vector(iwdev);
+ if (status) {
+- i40iw_destroy_aeq(iwdev, false);
++ i40iw_destroy_aeq(iwdev);
+ return status;
+ }
+
+@@ -1442,12 +1437,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+ /**
+ * i40iw_deinit_device - clean up the device resources
+ * @iwdev: iwarp device
+- * @reset: true if called before reset
+ *
+ * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
+ * destroy the device queues and free the pble and the hmc objects
+ */
+-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
++static void i40iw_deinit_device(struct i40iw_device *iwdev)
+ {
+ struct i40e_info *ldev = iwdev->ldev;
+
+@@ -1464,7 +1458,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+ i40iw_destroy_rdma_device(iwdev->iwibdev);
+ /* fallthrough */
+ case IP_ADDR_REGISTERED:
+- if (!reset)
++ if (!iwdev->reset)
+ i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+ /* fallthrough */
+ case INET_NOTIFIER:
+@@ -1478,22 +1472,22 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
+ case CEQ_CREATED:
+- i40iw_dele_ceqs(iwdev, reset);
++ i40iw_dele_ceqs(iwdev);
+ /* fallthrough */
+ case AEQ_CREATED:
+- i40iw_destroy_aeq(iwdev, reset);
++ i40iw_destroy_aeq(iwdev);
+ /* fallthrough */
+ case IEQ_CREATED:
+- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
++ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
+ /* fallthrough */
+ case ILQ_CREATED:
+- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
++ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
+ /* fallthrough */
+ case CCQ_CREATED:
+- i40iw_destroy_ccq(iwdev, reset);
++ i40iw_destroy_ccq(iwdev);
+ /* fallthrough */
+ case HMC_OBJS_CREATED:
+- i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
++ i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
+ /* fallthrough */
+ case CQP_CREATED:
+ i40iw_destroy_cqp(iwdev, true);
+@@ -1694,7 +1688,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+ } while (0);
+
+ i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
+- i40iw_deinit_device(iwdev, false);
++ i40iw_deinit_device(iwdev);
+ return -ERESTART;
+ }
+
+@@ -1775,9 +1769,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
+ iwdev = &hdl->device;
+ iwdev->closing = true;
+
++ if (reset)
++ iwdev->reset = true;
++
+ i40iw_cm_disconnect_all(iwdev);
+ destroy_workqueue(iwdev->virtchnl_wq);
+- i40iw_deinit_device(iwdev, reset);
++ i40iw_deinit_device(iwdev);
+ }
+
+ /**
+--
+2.1.3
+
--- /dev/null
+From 6327cb09dfda103f7255ef218ac18697b293554a Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 23 Jun 2017 16:03:58 -0500
+Subject: [PATCH 12650/13040] i40iw: Release cm_id ref on PCI function reset
+
+On PCI function reset, cm_id reference is not released
+which causes an application hang, as it waits on the
+cm_id to be released on rdma_destroy.
+
+To fix this, call i40iw_cm_disconn during a PCI function
+reset to clean-up resources and release cm_id reference.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 6ae98aa..5a2fa74 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+ if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
+ (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
+- (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
++ (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
++ iwdev->reset)) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ if (!iwqp->flush_issued) {
+@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+ cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
+ attr.qp_state = IB_QPS_ERR;
+ i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
++ if (iwdev->reset)
++ i40iw_cm_disconn(cm_node->iwqp);
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ }
+--
+2.1.3
+
--- /dev/null
+From b5e452a04a10f12763f9836d3d3999f3bb1e56fb Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 23 Jun 2017 16:03:59 -0500
+Subject: [PATCH 12651/13040] i40iw: Free QP resources on CQP destroy QP
+ failure
+
+Current flow leaves software QP structures in memory if
+Control Queue Pair (CQP) destroy QP OP fails. To fix this,
+free QP resources on fail of CQP destroy QP OP.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 56d9869..ded8e48 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -546,8 +546,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+- if (status)
+- i40iw_pr_err("CQP-OP Destroy QP fail");
++ if (!status)
++ return;
++
++ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
++ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
++ i40iw_rem_devusecount(iwdev);
+ }
+
+ /**
+--
+2.1.3
+
--- /dev/null
+From c5c9d27e6c79ab3ab36092fe67fb7f2c6a120171 Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Fri, 23 Jun 2017 16:04:00 -0500
+Subject: [PATCH 12652/13040] i40iw: Add missing memory barrier
+
+Add missing write memory barrier before writing the
+header containing valid bit to the WQE in i40iw_puda_send.
+
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_puda.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index db41ab4..1bb1681 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
+ set_64bit_val(wqe, 16, header[0]);
++
++ /* Ensure all data is written before writing valid bit */
++ wmb();
+ set_64bit_val(wqe, 24, header[1]);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
+--
+2.1.3
+
--- /dev/null
+From c709d7f229a273c7c5664e9dfe5432b031842d0c Mon Sep 17 00:00:00 2001
+From: Henry Orosco <henry.orosco@intel.com>
+Date: Fri, 23 Jun 2017 16:04:01 -0500
+Subject: [PATCH 12653/13040] i40iw: Update list correctly
+
+To avoid infinite loop, in i40iw_ieq_handle_exception, update
+plist inside while loop.
+
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_puda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 1bb1681..71050c5 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -1414,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+
+ if (!list_empty(rxlist)) {
+ tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
+- plist = &tmpbuf->list;
+ while ((struct list_head *)tmpbuf != rxlist) {
+ if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
+ break;
++ plist = &tmpbuf->list;
+ tmpbuf = (struct i40iw_puda_buf *)plist->next;
+ }
+ /* Insert buf before tmpbuf */
+--
+2.1.3
+
--- /dev/null
+From 44b99f88cdd5b47046c511aa64ae71ad2c9e5b1e Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Fri, 23 Jun 2017 16:04:02 -0500
+Subject: [PATCH 12654/13040] i40iw: Avoid memory leak of CQP request objects
+
+Control Queue Pair (CQP) request objects, which have
+not received a completion upon interface close, remain
+in memory.
+
+To fix this, identify and free all pending CQP request
+objects during destroy CQP OP.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h | 1 +
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 2 ++
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 52 +++++++++++++++++++++++++++++++
+ 3 files changed, 55 insertions(+)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index da2eb5a..9b15664 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+ int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
+
++void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
+ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+ void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+ void i40iw_rem_devusecount(struct i40iw_device *iwdev);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 3bad7d9..ae8463f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
+ if (free_hwcqp)
+ dev->cqp_ops->cqp_destroy(dev->cqp);
+
++ i40iw_cleanup_pending_cqp_op(iwdev);
++
+ i40iw_free_dma_mem(dev->hw, &cqp->sq);
+ kfree(cqp->scratch_array);
+ iwdev->cqp.scratch_array = NULL;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index ded8e48..e311ec5 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
+ */
+ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
+ {
++ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
++ wake_up(&iwdev->close_wq);
+ }
+
+ /**
+@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
+ }
+
+ /**
++ * i40iw_free_pending_cqp_request -free pending cqp request objs
++ * @cqp: cqp ptr
++ * @cqp_request: to be put back in cqp list
++ */
++static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
++ struct i40iw_cqp_request *cqp_request)
++{
++ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
++
++ if (cqp_request->waiting) {
++ cqp_request->compl_info.error = true;
++ cqp_request->request_done = true;
++ wake_up(&cqp_request->waitq);
++ }
++ i40iw_put_cqp_request(cqp, cqp_request);
++ wait_event_timeout(iwdev->close_wq,
++ !atomic_read(&cqp_request->refcount),
++ 1000);
++}
++
++/**
++ * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
++ * @iwdev: iwarp device
++ */
++void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
++{
++ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
++ struct i40iw_cqp *cqp = &iwdev->cqp;
++ struct i40iw_cqp_request *cqp_request = NULL;
++ struct cqp_commands_info *pcmdinfo = NULL;
++ u32 i, pending_work, wqe_idx;
++
++ pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
++ wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
++ for (i = 0; i < pending_work; i++) {
++ cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
++ if (cqp_request)
++ i40iw_free_pending_cqp_request(cqp, cqp_request);
++ wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
++ }
++
++ while (!list_empty(&dev->cqp_cmd_head)) {
++ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
++ cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
++ if (cqp_request)
++ i40iw_free_pending_cqp_request(cqp, cqp_request);
++ }
++}
++
++/**
+ * i40iw_free_qp - callback after destroy cqp completes
+ * @cqp_request: cqp request for destroy qp
+ * @num: not used
+--
+2.1.3
+
--- /dev/null
+From af56e53ccd29bda062a1ae75276dc9c0f8eedf47 Mon Sep 17 00:00:00 2001
+From: Tatyana Nikolova <Tatyana.E.Nikolova@intel.com>
+Date: Wed, 5 Jul 2017 21:25:33 -0500
+Subject: [PATCH 12655/13040] i40iw: Free QP PBLEs when the QP is destroyed
+
+If the physical buffer list entries (PBLEs) of a QP are freed
+up at i40iw_dereg_mr, they can be assigned to a newly
+created QP before the previous QP is destroyed. Fix this
+by freeing PBLEs only when the QP is destroyed.
+
+Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Faisal Latif <faisal.latif@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 15 +++++++++++----
+ drivers/infiniband/hw/i40iw/i40iw_verbs.h | 2 +-
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 4dbe61e..4aa0264c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ u32 qp_num)
+ {
++ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
++
+ i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+ if (qp_num)
+ i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
++ if (iwpbl->pbl_allocated)
++ i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
+ kfree(iwqp->kqp.wrid_mem);
+@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ struct i40iw_qp_init_info *init_info)
+ {
+- struct i40iw_pbl *iwpbl = iwqp->iwpbl;
++ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ ucontext = to_ucontext(ibpd->uobject->context);
+
+ if (req.user_wqe_buffers) {
++ struct i40iw_pbl *iwpbl;
++
+ spin_lock_irqsave(
+ &ucontext->qp_reg_mem_list_lock, flags);
+- iwqp->iwpbl = i40iw_get_pbl(
++ iwpbl = i40iw_get_pbl(
+ (unsigned long)req.user_wqe_buffers,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(
+ &ucontext->qp_reg_mem_list_lock, flags);
+
+- if (!iwqp->iwpbl) {
++ if (!iwpbl) {
+ err_code = -ENODATA;
+ i40iw_pr_err("no pbl info\n");
+ goto error;
+ }
++ memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
+ }
+ }
+ err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
+@@ -2063,7 +2070,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
+ ucontext = to_ucontext(ibpd->uobject->context);
+ i40iw_del_memlist(iwmr, ucontext);
+ }
+- if (iwpbl->pbl_allocated)
++ if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ kfree(iwmr);
+ return 0;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+index 07c3fec..9067443 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+@@ -170,7 +170,7 @@ struct i40iw_qp {
+ struct i40iw_qp_kmode kqp;
+ struct i40iw_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+- struct i40iw_pbl *iwpbl;
++ struct i40iw_pbl iwpbl;
+ struct i40iw_dma_mem q2_ctx_mem;
+ struct i40iw_dma_mem ietf_mem;
+ struct completion sq_drained;
+--
+2.1.3
+
--- /dev/null
+From 6031e079aa4656743298ea235b894ee883f45c71 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 13 Jul 2017 10:47:22 +0300
+Subject: [PATCH 12658/13040] IB/i40iw: Fix error code in i40iw_create_cq()
+
+We accidentally forgot to set the error code if ib_copy_from_udata()
+fails. It means we return ERR_PTR(0) which is NULL and results in a
+NULL dereference in the callers.
+
+Fixes: d37498417947 ("i40iw: add files for iwarp interface")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 4aa0264c..02d871d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -1168,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+ memset(&req, 0, sizeof(req));
+ iwcq->user_mode = true;
+ ucontext = to_ucontext(context);
+- if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
++ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
++ err_code = -EFAULT;
+ goto cq_free_resources;
++ }
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
+--
+2.1.3
+
--- /dev/null
+From f67ace2d8868d06710ceea1b10b124eead5040da Mon Sep 17 00:00:00 2001
+From: Chien Tin Tung <chien.tin.tung@intel.com>
+Date: Tue, 8 Aug 2017 20:38:43 -0500
+Subject: [PATCH 12860/13040] i40iw: Fix parsing of query/commit FPM buffers
+
+Parsing of commit/query Host Memory Cache Function Private Memory
+is not skipping over reserved fields and incorrectly assigning
+those values into object's base/cnt/max_cnt fields. Skip over
+reserved fields and set correct values. Also correct memory
+alignment requirement for commit/query FPM buffers.
+
+Signed-off-by: Chien Tin Tung <chien.tin.tung@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Christopher N Bednarz <christopher.n.bednarz@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 121 +++++++++++++++++++++----------
+ drivers/infiniband/hw/i40iw/i40iw_d.h | 4 +-
+ 2 files changed, 83 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 9ec1ae9..ef4a73c 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
+ u64 base = 0;
+ u32 i, j;
+ u32 k = 0;
+- u32 low;
+
+ /* copy base values in obj_info */
+- for (i = I40IW_HMC_IW_QP, j = 0;
+- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
++ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
++ if ((i == I40IW_HMC_IW_SRQ) ||
++ (i == I40IW_HMC_IW_FSIMC) ||
++ (i == I40IW_HMC_IW_FSIAV)) {
++ info[i].base = 0;
++ info[i].cnt = 0;
++ continue;
++ }
+ get_64bit_val(buf, j, &temp);
+ info[i].base = RS_64_1(temp, 32) * 512;
+ if (info[i].base > base) {
+ base = info[i].base;
+ k = i;
+ }
+- low = (u32)(temp);
+- if (low)
+- info[i].cnt = low;
++ if (i == I40IW_HMC_IW_APBVT_ENTRY) {
++ info[i].cnt = 1;
++ continue;
++ }
++ if (i == I40IW_HMC_IW_QP)
++ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
++ else if (i == I40IW_HMC_IW_CQ)
++ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
++ else
++ info[i].cnt = (u32)(temp);
+ }
+ size = info[k].cnt * info[k].size + info[k].base;
+ if (size & 0x1FFFFF)
+@@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
+ }
+
+ /**
++ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
++ * @buf: ptr to fpm query buffer
++ * @buf_idx: index into buf
++ * @info: ptr to i40iw_hmc_obj_info struct
++ * @rsrc_idx: resource index into info
++ *
++ * Decode a 64 bit value from fpm query buffer into max count and size
++ */
++static u64 i40iw_sc_decode_fpm_query(u64 *buf,
++ u32 buf_idx,
++ struct i40iw_hmc_obj_info *obj_info,
++ u32 rsrc_idx)
++{
++ u64 temp;
++ u32 size;
++
++ get_64bit_val(buf, buf_idx, &temp);
++ obj_info[rsrc_idx].max_cnt = (u32)temp;
++ size = (u32)RS_64_1(temp, 32);
++ obj_info[rsrc_idx].size = LS_64_1(1, size);
++
++ return temp;
++}
++
++/**
+ * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @buf: ptr to fpm query buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+ struct i40iw_hmc_info *hmc_info,
+ struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
+ {
+- u64 temp;
+ struct i40iw_hmc_obj_info *obj_info;
+- u32 i, j, size;
++ u64 temp;
++ u32 size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+
+- for (i = I40IW_HMC_IW_QP, j = 8;
+- i <= I40IW_HMC_IW_ARP; i++, j += 8) {
+- get_64bit_val(buf, j, &temp);
+- if (i == I40IW_HMC_IW_QP)
+- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+- else if (i == I40IW_HMC_IW_CQ)
+- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+- else
+- obj_info[i].max_cnt = (u32)temp;
++ get_64bit_val(buf, 8, &temp);
++ obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
++ size = (u32)RS_64_1(temp, 32);
++ obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
+
+- size = (u32)RS_64_1(temp, 32);
+- obj_info[i].size = ((u64)1 << size);
+- }
+- for (i = I40IW_HMC_IW_MR, j = 48;
+- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+- get_64bit_val(buf, j, &temp);
+- obj_info[i].max_cnt = (u32)temp;
+- size = (u32)RS_64_1(temp, 32);
+- obj_info[i].size = LS_64_1(1, size);
+- }
++ get_64bit_val(buf, 16, &temp);
++ obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
++ size = (u32)RS_64_1(temp, 32);
++ obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
++
++ i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
++ i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
++
++ obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
++ obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
++
++ i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
++ i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
+
+- get_64bit_val(buf, 120, &temp);
+- hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+- get_64bit_val(buf, 120, &temp);
+- hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+- get_64bit_val(buf, 120, &temp);
+- hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+ get_64bit_val(buf, 64, &temp);
++ obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
++ obj_info[I40IW_HMC_IW_XFFL].size = 4;
+ hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
+ if (!hmc_fpm_misc->xf_block_size)
+ return I40IW_ERR_INVALID_SIZE;
++
++ i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
++
+ get_64bit_val(buf, 80, &temp);
++ obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
++ obj_info[I40IW_HMC_IW_Q1FL].size = 4;
+ hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
+ if (!hmc_fpm_misc->q1_block_size)
+ return I40IW_ERR_INVALID_SIZE;
++
++ i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
++
++ get_64bit_val(buf, 112, &temp);
++ obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
++ obj_info[I40IW_HMC_IW_PBLE].size = 8;
++
++ get_64bit_val(buf, 120, &temp);
++ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
++ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
++ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
++
+ return 0;
+ }
+
+@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+ }
+
+- /* fill size of objects which are fixed */
+- hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
+- hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
+- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
+- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+-
+ return ret_code;
+ }
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
+index a39ac12..2ebaadb 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
+@@ -1507,8 +1507,8 @@ enum {
+ I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
+ I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
+ I40IW_SHADOWAREA_MASK = (128 - 1),
+- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
+- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
++ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
++ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
+ };
+
+ enum i40iw_alignment {
+--
+2.1.3
+
--- /dev/null
+From 8129331f01a683ed8d9a9a65ed01b5c6ad26403a Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 8 Aug 2017 20:38:44 -0500
+Subject: [PATCH 12861/13040] i40iw: Correct variable names
+
+Fix incorrect naming of status code and struct. Use inline
+instead of immediate.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_status.h | 2 +-
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
+index 91c4217..f7013f1 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
+@@ -62,7 +62,7 @@ enum i40iw_status_code {
+ I40IW_ERR_INVALID_ALIGNMENT = -23,
+ I40IW_ERR_FLUSHED_QUEUE = -24,
+ I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
+- I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
++ I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
+ I40IW_ERR_TIMEOUT = -27,
+ I40IW_ERR_OPCODE_MISMATCH = -28,
+ I40IW_ERR_CQP_COMPL_ERROR = -29,
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index b0d3a0e..70a6b41 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+
+ op_info = &info->op.inline_rdma_write;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
++ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+
+ op_info = &info->op.inline_send;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
++ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+ u8 *wqe_size)
+ {
+ if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
+- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
++ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
+
+ if (data_size <= 16)
+ *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+--
+2.1.3
+
--- /dev/null
+From 29c2415a6669bab354f0aa3445777fe147c7a05d Mon Sep 17 00:00:00 2001
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+Date: Tue, 8 Aug 2017 20:38:46 -0500
+Subject: [PATCH 12862/13040] i40iw: Fix typecast of tcp_seq_num
+
+The typecast of tcp_seq_num incorrectly uses u8. Fix by
+casting to u32.
+
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 70a6b41..1060725 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+- info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
++ info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+
+ info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+--
+2.1.3
+
--- /dev/null
+From a28f047e5f9b987d614eeee34388087ffdda3e53 Mon Sep 17 00:00:00 2001
+From: Christopher N Bednarz <christopher.n.bednarz@intel.com>
+Date: Tue, 8 Aug 2017 20:38:47 -0500
+Subject: [PATCH 12863/13040] i40iw: Use correct alignment for CQ0 memory
+
+Utilize correct alignment variable when allocating
+DMA memory for CQ0.
+
+Signed-off-by: Christopher N Bednarz <christopher.n.bednarz@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_puda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 71050c5..7f5583d 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+ cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
+ tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
+ ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
+- I40IW_CQ0_ALIGNMENT_MASK);
++ I40IW_CQ0_ALIGNMENT);
+ if (ret)
+ return ret;
+
+--
+2.1.3
+
--- /dev/null
+From aa939c12ab8a0c094420ad1b909a957ac590e43e Mon Sep 17 00:00:00 2001
+From: Christopher N Bednarz <christopher.n.bednarz@intel.com>
+Date: Tue, 8 Aug 2017 20:38:48 -0500
+Subject: [PATCH 12864/13040] i40iw: Fix potential fcn_id_array out of bounds
+
+Avoid out of bounds error by utilizing I40IW_MAX_STATS_COUNT
+instead of I40IW_INVALID_FCN_ID.
+
+Signed-off-by: Christopher N Bednarz <christoper.n.bednarz@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index ef4a73c..a49ff2e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -4881,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
+ {
+ u8 fcn_id = vsi->fcn_id;
+
+- if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
++ if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
+ vsi->dev->fcn_id_array[fcn_id] = false;
+ i40iw_hw_stats_stop_timer(vsi);
+ }
+--
+2.1.3
+
--- /dev/null
+From 02654b5ae1a45c0e31060816231086685cfcd841 Mon Sep 17 00:00:00 2001
+From: Christophe Jaillet <christophe.jaillet@wanadoo.fr>
+Date: Sun, 16 Jul 2017 13:09:23 +0200
+Subject: [PATCH 12882/13040] i40iw: Simplify code
+
+Axe a few lines of code and re-use existing error handling path to avoid
+code duplication.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_pble.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+index c87ba16..540aab5 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+@@ -269,10 +269,8 @@ static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
+ status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
+ I40IW_HMC_DIRECT_BP_SIZE);
+- if (status) {
+- i40iw_free_vmalloc_mem(dev->hw, chunk);
+- return status;
+- }
++ if (status)
++ goto error;
+ if (!dev->is_pf) {
+ status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
+ fpm_to_idx(pble_rsrc,
+@@ -280,8 +278,7 @@ static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
+ (info->pages << PBLE_512_SHIFT));
+ if (status) {
+ i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
+- i40iw_free_vmalloc_mem(dev->hw, chunk);
+- return status;
++ goto error;
+ }
+ }
+ addr = chunk->vaddr;
+--
+2.1.3
+
--- /dev/null
+From 83fb1c89e7ee5bb16397b294ccfbd65a9a22e402 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Wed, 19 Jul 2017 13:55:26 -0500
+Subject: [PATCH 12883/13040] i40iw: Fixes for static checker warnings
+
+Remove NULL check for cm_node->listener in i40iw_accept
+as listener is always present at this point.
+
+Remove the check for cm_node->accept_pend and related code
+in i40iw_cm_event_connected as the cm_node in this context
+is only pertinent to active node and cm_node->accept_pend
+is always 0.
+
+This fixes the following smatch warnings,
+
+drivers/infiniband/hw/i40iw/i40iw_cm.c:3691 i40iw_accept()
+error: we previously assumed 'cm_node->listener' could be null
+
+drivers/infiniband/hw/i40iw/i40iw_cm.c:4061 i40iw_cm_event_connected()
+error: we previously assumed 'cm_node->listener' could be null
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 5a2fa74..a2b1350 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3687,8 +3687,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+
+ cm_node->accelerated = 1;
+ if (cm_node->accept_pend) {
+- if (!cm_node->listener)
+- i40iw_pr_err("cm_node->listener NULL for passive node\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+@@ -4056,12 +4054,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ cm_node->accelerated = 1;
+- if (cm_node->accept_pend) {
+- if (!cm_node->listener)
+- i40iw_pr_err("listener is null for passive node\n");
+- atomic_dec(&cm_node->listener->pend_accepts_cnt);
+- cm_node->accept_pend = 0;
+- }
++
+ return;
+
+ error:
+--
+2.1.3
+
--- /dev/null
+From 5a5a3d0cfe6b3c99585d7763cd966ec1654bc4e3 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 21 Jul 2017 23:19:33 +0100
+Subject: [PATCH 12886/13040] i40iw: fix spelling mistake: "allloc_buf" ->
+ "alloc_buf"
+
+Trivial fix to spelling mistake in i40iw_debug message and
+also split up a couple of lines that are too long and cause
+checkpatch warnings
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_puda.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+index 71050c5..40c3137 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
+@@ -949,14 +949,16 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
+ ret = i40iw_puda_qp_create(rsrc);
+ }
+ if (ret) {
+- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
++ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n",
++ __func__);
+ goto error;
+ }
+ rsrc->completion = PUDA_QP_CREATED;
+
+ ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
++ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error alloc_buf\n",
++ __func__);
+ goto error;
+ }
+
+--
+2.1.3
+
--- /dev/null
+From d26875b43d45644e87f4c0b6bb2d7abf3c61d529 Mon Sep 17 00:00:00 2001
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Tue, 8 Aug 2017 20:38:45 -0500
+Subject: [PATCH 12925/13040] i40iw: Improve CQP timeout logic
+
+The current timeout logic for Control Queue-Pair (CQP) OPs
+does not take into account whether CQP makes progress but
+rather blindly waits for a large timeout value, 100000 jiffies
+for the completion event. Improve this by setting the timeout
+based on whether the CQP is making progress or not. If the CQP
+is hung, the timeout will happen sooner, in 5000 jiffies. Each
+time the CQP progress is detetcted, the timeout extends by 5000
+jiffies.
+
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Christopher N Bednarz <christopher.n.bednarz@intel.com>
+Signed-off-by: Henry Orosco <henry.orosco@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 11 +++++++++++
+ drivers/infiniband/hw/i40iw/i40iw_p.h | 14 +++++++++-----
+ drivers/infiniband/hw/i40iw/i40iw_type.h | 5 +++++
+ drivers/infiniband/hw/i40iw/i40iw_utils.c | 22 ++++++++++++++--------
+ 4 files changed, 39 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index a49ff2e..d1f5345 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -54,6 +54,17 @@ static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+ set_64bit_val(wqe, 24, header);
+ }
+
++void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
++{
++ if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
++ cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
++ cqp_timeout->count = 0;
++ } else {
++ if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
++ cqp_timeout->count++;
++ }
++}
++
+ /**
+ * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
+index 28a92fe..e217a12 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
+@@ -35,11 +35,13 @@
+ #ifndef I40IW_P_H
+ #define I40IW_P_H
+
+-#define PAUSE_TIMER_VALUE 0xFFFF
+-#define REFRESH_THRESHOLD 0x7FFF
+-#define HIGH_THRESHOLD 0x800
+-#define LOW_THRESHOLD 0x200
+-#define ALL_TC2PFC 0xFF
++#define PAUSE_TIMER_VALUE 0xFFFF
++#define REFRESH_THRESHOLD 0x7FFF
++#define HIGH_THRESHOLD 0x800
++#define LOW_THRESHOLD 0x200
++#define ALL_TC2PFC 0xFF
++#define CQP_COMPL_WAIT_TIME 0x3E8
++#define CQP_TIMEOUT_THRESHOLD 5
+
+ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+ char *desc, u64 *buf, u32 size);
+@@ -51,6 +53,8 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+
+ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+
++void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);
++
+ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
+ struct i40iw_fast_reg_stag_info *info,
+ bool post_sq);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
+index 959ec81..63118f6 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
+@@ -1345,4 +1345,9 @@ struct i40iw_virtchnl_work_info {
+ void *worker_vf_dev;
+ };
+
++struct i40iw_cqp_timeout {
++ u64 compl_cqp_cmds;
++ u8 count;
++};
++
+ #endif
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index e311ec5..62f1f45 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -445,23 +445,29 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
+ {
+ struct cqp_commands_info *info = &cqp_request->info;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
++ struct i40iw_cqp_timeout cqp_timeout;
+ bool cqp_error = false;
+ int err_code = 0;
+- int timeout_ret = 0;
++ memset(&cqp_timeout, 0, sizeof(cqp_timeout));
++ cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
++ do {
++ if (wait_event_timeout(cqp_request->waitq,
++ cqp_request->request_done, CQP_COMPL_WAIT_TIME))
++ break;
+
+- timeout_ret = wait_event_timeout(cqp_request->waitq,
+- cqp_request->request_done,
+- I40IW_EVENT_TIMEOUT);
+- if (!timeout_ret) {
+- i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
+- info->cqp_cmd, timeout_ret);
++ i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
++
++ if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
++ continue;
++
++ i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
+ err_code = -ETIME;
+ if (!iwdev->reset) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
+ goto done;
+- }
++ } while (1);
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
+--
+2.1.3
+
--- /dev/null
+From accbef5cc624be745c1de903dd3a05681aaa0ac1 Mon Sep 17 00:00:00 2001
+From: Yuval Shaia <yuval.shaia@oracle.com>
+Date: Thu, 24 Aug 2017 20:11:42 +0300
+Subject: [PATCH 12963/13040] RDMA/i40iw: Remove unused argument
+
+None of the calls to i40iw_netdev_vlan_ipv6 are using mac so let's
+remove it from func's args-list.
+
+Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index a2b1350..14f36ba 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -1582,15 +1582,14 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
+ }
+
+ /**
+- * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
++ * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+- * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+- * vlan id and mac for that address.
++ * vlan id for that address.
+ */
+-static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
++static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)
+ {
+ struct net_device *ip_dev = NULL;
+ struct in6_addr laddr6;
+@@ -1600,15 +1599,11 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
+ i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = I40IW_NO_VLAN;
+- if (mac)
+- eth_zero_addr(mac);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+- if (ip_dev->dev_addr && mac)
+- ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+@@ -3588,7 +3583,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+- i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
++ i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);
+ }
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+@@ -3787,7 +3782,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+- i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
++ i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);
+ }
+ cm_info.cm_id = cm_id;
+ cm_info.tos = cm_id->tos;
+@@ -3929,8 +3924,7 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
+- &cm_info.vlan_id,
+- NULL);
++ &cm_info.vlan_id);
+ else
+ wildcard = true;
+ }
+--
+2.1.3
+
--- /dev/null
+From 7f6856b789ff13ccfd317c936e4b161c0bbd88a3 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Fri, 16 Dec 2016 17:05:42 -0800
+Subject: [PATCH] RDMA/i40iw: use designated initializers
+
+Prepare to mark sensitive kernel structures for randomization by making
+sure they're using designated initializers. These were identified during
+allyesconfig builds of x86, arm, and arm64, with most initializer fixes
+extracted from grsecurity.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 128 +++++++++++++++----------------
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 34 ++++----
+ 2 files changed, 80 insertions(+), 82 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 98923a8..dced4f4 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -4851,46 +4851,46 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
+ }
+
+ static struct i40iw_cqp_ops iw_cqp_ops = {
+- i40iw_sc_cqp_init,
+- i40iw_sc_cqp_create,
+- i40iw_sc_cqp_post_sq,
+- i40iw_sc_cqp_get_next_send_wqe,
+- i40iw_sc_cqp_destroy,
+- i40iw_sc_poll_for_cqp_op_done
++ .cqp_init = i40iw_sc_cqp_init,
++ .cqp_create = i40iw_sc_cqp_create,
++ .cqp_post_sq = i40iw_sc_cqp_post_sq,
++ .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
++ .cqp_destroy = i40iw_sc_cqp_destroy,
++ .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
+ };
+
+ static struct i40iw_ccq_ops iw_ccq_ops = {
+- i40iw_sc_ccq_init,
+- i40iw_sc_ccq_create,
+- i40iw_sc_ccq_destroy,
+- i40iw_sc_ccq_create_done,
+- i40iw_sc_ccq_get_cqe_info,
+- i40iw_sc_ccq_arm
++ .ccq_init = i40iw_sc_ccq_init,
++ .ccq_create = i40iw_sc_ccq_create,
++ .ccq_destroy = i40iw_sc_ccq_destroy,
++ .ccq_create_done = i40iw_sc_ccq_create_done,
++ .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
++ .ccq_arm = i40iw_sc_ccq_arm
+ };
+
+ static struct i40iw_ceq_ops iw_ceq_ops = {
+- i40iw_sc_ceq_init,
+- i40iw_sc_ceq_create,
+- i40iw_sc_cceq_create_done,
+- i40iw_sc_cceq_destroy_done,
+- i40iw_sc_cceq_create,
+- i40iw_sc_ceq_destroy,
+- i40iw_sc_process_ceq
++ .ceq_init = i40iw_sc_ceq_init,
++ .ceq_create = i40iw_sc_ceq_create,
++ .cceq_create_done = i40iw_sc_cceq_create_done,
++ .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
++ .cceq_create = i40iw_sc_cceq_create,
++ .ceq_destroy = i40iw_sc_ceq_destroy,
++ .process_ceq = i40iw_sc_process_ceq
+ };
+
+ static struct i40iw_aeq_ops iw_aeq_ops = {
+- i40iw_sc_aeq_init,
+- i40iw_sc_aeq_create,
+- i40iw_sc_aeq_destroy,
+- i40iw_sc_get_next_aeqe,
+- i40iw_sc_repost_aeq_entries,
+- i40iw_sc_aeq_create_done,
+- i40iw_sc_aeq_destroy_done
++ .aeq_init = i40iw_sc_aeq_init,
++ .aeq_create = i40iw_sc_aeq_create,
++ .aeq_destroy = i40iw_sc_aeq_destroy,
++ .get_next_aeqe = i40iw_sc_get_next_aeqe,
++ .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
++ .aeq_create_done = i40iw_sc_aeq_create_done,
++ .aeq_destroy_done = i40iw_sc_aeq_destroy_done
+ };
+
+ /* iwarp pd ops */
+ static struct i40iw_pd_ops iw_pd_ops = {
+- i40iw_sc_pd_init,
++ .pd_init = i40iw_sc_pd_init,
+ };
+
+ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+@@ -4909,53 +4909,51 @@ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+ };
+
+ static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+- i40iw_sc_cq_init,
+- i40iw_sc_cq_create,
+- i40iw_sc_cq_destroy,
+- i40iw_sc_cq_modify,
++ .cq_init = i40iw_sc_cq_init,
++ .cq_create = i40iw_sc_cq_create,
++ .cq_destroy = i40iw_sc_cq_destroy,
++ .cq_modify = i40iw_sc_cq_modify,
+ };
+
+ static struct i40iw_mr_ops iw_mr_ops = {
+- i40iw_sc_alloc_stag,
+- i40iw_sc_mr_reg_non_shared,
+- i40iw_sc_mr_reg_shared,
+- i40iw_sc_dealloc_stag,
+- i40iw_sc_query_stag,
+- i40iw_sc_mw_alloc
++ .alloc_stag = i40iw_sc_alloc_stag,
++ .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
++ .mr_reg_shared = i40iw_sc_mr_reg_shared,
++ .dealloc_stag = i40iw_sc_dealloc_stag,
++ .query_stag = i40iw_sc_query_stag,
++ .mw_alloc = i40iw_sc_mw_alloc
+ };
+
+ static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
+- i40iw_sc_manage_push_page,
+- i40iw_sc_manage_hmc_pm_func_table,
+- i40iw_sc_set_hmc_resource_profile,
+- i40iw_sc_commit_fpm_values,
+- i40iw_sc_query_fpm_values,
+- i40iw_sc_static_hmc_pages_allocated,
+- i40iw_sc_add_arp_cache_entry,
+- i40iw_sc_del_arp_cache_entry,
+- i40iw_sc_query_arp_cache_entry,
+- i40iw_sc_manage_apbvt_entry,
+- i40iw_sc_manage_qhash_table_entry,
+- i40iw_sc_alloc_local_mac_ipaddr_entry,
+- i40iw_sc_add_local_mac_ipaddr_entry,
+- i40iw_sc_del_local_mac_ipaddr_entry,
+- i40iw_sc_cqp_nop,
+- i40iw_sc_commit_fpm_values_done,
+- i40iw_sc_query_fpm_values_done,
+- i40iw_sc_manage_hmc_pm_func_table_done,
+- i40iw_sc_suspend_qp,
+- i40iw_sc_resume_qp
++ .manage_push_page = i40iw_sc_manage_push_page,
++ .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
++ .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
++ .commit_fpm_values = i40iw_sc_commit_fpm_values,
++ .query_fpm_values = i40iw_sc_query_fpm_values,
++ .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
++ .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
++ .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
++ .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
++ .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
++ .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
++ .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
++ .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
++ .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
++ .cqp_nop = i40iw_sc_cqp_nop,
++ .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
++ .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
++ .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
++ .update_suspend_qp = i40iw_sc_suspend_qp,
++ .update_resume_qp = i40iw_sc_resume_qp
+ };
+
+ static struct i40iw_hmc_ops iw_hmc_ops = {
+- i40iw_sc_init_iw_hmc,
+- i40iw_sc_parse_fpm_query_buf,
+- i40iw_sc_configure_iw_fpm,
+- i40iw_sc_parse_fpm_commit_buf,
+- i40iw_sc_create_hmc_obj,
+- i40iw_sc_del_hmc_obj,
+- NULL,
+- NULL
++ .init_iw_hmc = i40iw_sc_init_iw_hmc,
++ .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
++ .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
++ .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
++ .create_hmc_object = i40iw_sc_create_hmc_obj,
++ .del_hmc_object = i40iw_sc_del_hmc_obj
+ };
+
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 2800f79..b0d3a0e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -913,29 +913,29 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data
+ }
+
+ static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+- i40iw_qp_post_wr,
+- i40iw_qp_ring_push_db,
+- i40iw_rdma_write,
+- i40iw_rdma_read,
+- i40iw_send,
+- i40iw_inline_rdma_write,
+- i40iw_inline_send,
+- i40iw_stag_local_invalidate,
+- i40iw_mw_bind,
+- i40iw_post_receive,
+- i40iw_nop
++ .iw_qp_post_wr = i40iw_qp_post_wr,
++ .iw_qp_ring_push_db = i40iw_qp_ring_push_db,
++ .iw_rdma_write = i40iw_rdma_write,
++ .iw_rdma_read = i40iw_rdma_read,
++ .iw_send = i40iw_send,
++ .iw_inline_rdma_write = i40iw_inline_rdma_write,
++ .iw_inline_send = i40iw_inline_send,
++ .iw_stag_local_invalidate = i40iw_stag_local_invalidate,
++ .iw_mw_bind = i40iw_mw_bind,
++ .iw_post_receive = i40iw_post_receive,
++ .iw_post_nop = i40iw_nop
+ };
+
+ static struct i40iw_cq_ops iw_cq_ops = {
+- i40iw_cq_request_notification,
+- i40iw_cq_poll_completion,
+- i40iw_cq_post_entries,
+- i40iw_clean_cq
++ .iw_cq_request_notification = i40iw_cq_request_notification,
++ .iw_cq_poll_completion = i40iw_cq_poll_completion,
++ .iw_cq_post_entries = i40iw_cq_post_entries,
++ .iw_cq_clean = i40iw_clean_cq
+ };
+
+ static struct i40iw_device_uk_ops iw_device_uk_ops = {
+- i40iw_cq_uk_init,
+- i40iw_qp_uk_init,
++ .iwarp_cq_uk_init = i40iw_cq_uk_init,
++ .iwarp_qp_uk_init = i40iw_qp_uk_init,
+ };
+
+ /**
+--
+2.1.3
+
--- /dev/null
+From cfeca08faf452acaf807576859275968cdb7e7a2 Mon Sep 17 00:00:00 2001
+From: Bhumika Goyal <bhumirks@gmail.com>
+Date: Mon, 28 Aug 2017 21:51:23 +0530
+Subject: [PATCH 12991/13040] i40iw: make some structures const
+
+Make some structures const as they are only used during a copy
+operation.
+
+Signed-off-by: Bhumika Goyal <bhumirks@gmail.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/i40iw/i40iw_uk.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 1060725..0aadb7a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -912,7 +912,7 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data
+ return 0;
+ }
+
+-static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
++static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+ .iw_qp_post_wr = i40iw_qp_post_wr,
+ .iw_qp_ring_push_db = i40iw_qp_ring_push_db,
+ .iw_rdma_write = i40iw_rdma_write,
+@@ -926,14 +926,14 @@ static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+ .iw_post_nop = i40iw_nop
+ };
+
+-static struct i40iw_cq_ops iw_cq_ops = {
++static const struct i40iw_cq_ops iw_cq_ops = {
+ .iw_cq_request_notification = i40iw_cq_request_notification,
+ .iw_cq_poll_completion = i40iw_cq_poll_completion,
+ .iw_cq_post_entries = i40iw_cq_post_entries,
+ .iw_cq_clean = i40iw_clean_cq
+ };
+
+-static struct i40iw_device_uk_ops iw_device_uk_ops = {
++static const struct i40iw_device_uk_ops iw_device_uk_ops = {
+ .iwarp_cq_uk_init = i40iw_cq_uk_init,
+ .iwarp_qp_uk_init = i40iw_qp_uk_init,
+ };
+--
+2.1.3
+