]> git.openfabrics.org - ~shefty/rdma-win.git/commitdiff
branches/winverbs: remove files no longer in trunk
authorshefty <shefty@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 16 Apr 2009 22:29:07 +0000 (22:29 +0000)
committershefty <shefty@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 16 Apr 2009 22:29:07 +0000 (22:29 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1@2106 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

branches/winverbs/hw/mlx4/kernel/hca/verbs.c [deleted file]
branches/winverbs/hw/mlx4/kernel/hca/verbs.h [deleted file]

diff --git a/branches/winverbs/hw/mlx4/kernel/hca/verbs.c b/branches/winverbs/hw/mlx4/kernel/hca/verbs.c
deleted file mode 100644 (file)
index cb35339..0000000
+++ /dev/null
@@ -1,673 +0,0 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
- * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $\r
- */\r
-\r
-\r
-#include "precomp.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "verbs.tmh"\r
-#endif\r
-\r
-\r
-/* Memory regions */\r
-\r
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, enum ib_access_flags mr_access_flags)\r
-{\r
-       struct ib_mr *mr;\r
-\r
-       mr = pd->device->get_dma_mr(pd, mr_access_flags);\r
\r
-       if (!IS_ERR(mr)) {\r
-               mr->device  = pd->device;\r
-               mr->pd      = pd;\r
-               mr->p_uctx = pd->p_uctx;\r
-               atomic_inc(&pd->usecnt);\r
-               atomic_set(&mr->usecnt, 0);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return mr;\r
-}\r
-\r
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,\r
-                                 struct ib_phys_buf *phys_buf_array,\r
-                                 int num_phys_buf,\r
-                                 enum ib_access_flags mr_access_flags,\r
-                                 u64 *iova_start)\r
-{\r
-       struct ib_mr *mr;\r
-\r
-       if ( pd->device->reg_phys_mr )\r
-               mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,\r
-                       mr_access_flags, iova_start);\r
-       else\r
-               mr = ERR_PTR(-ENOSYS);\r
-\r
-       if (!IS_ERR(mr)) {\r
-               mr->device  = pd->device;\r
-               mr->pd   = pd;\r
-               mr->p_uctx = pd->p_uctx;\r
-               atomic_inc(&pd->usecnt);\r
-               atomic_set(&mr->usecnt, 0);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return mr;\r
-}\r
-\r
-\r
- struct ib_mr *ibv_reg_mr(struct ib_pd *pd, \r
-       u64 start, u64 length,\r
-       u64 virt_addr,\r
-       int mr_access_flags,\r
-       ci_umv_buf_t* const p_umv_buf )\r
-{\r
-       struct ib_mr *ib_mr;\r
-       int err;\r
-       HCA_ENTER(HCA_DBG_MEMORY);\r
-\r
-       if (p_umv_buf  && p_umv_buf->command) {\r
-               err = -ENOSYS;\r
-               goto err_not_supported;\r
-       }\r
-\r
-       ib_mr = pd->device->reg_user_mr(pd, start, length, virt_addr, mr_access_flags, NULL);\r
-       if (IS_ERR(ib_mr)) {\r
-               err = PTR_ERR(ib_mr);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err));\r
-               goto err_reg_user_mr;\r
-       }\r
-\r
-       ib_mr->device  = pd->device;\r
-       ib_mr->pd      = pd;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_set(&ib_mr->usecnt, 0);\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-               ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-       HCA_EXIT(HCA_DBG_MEMORY);\r
-       return ib_mr;\r
-\r
-err_reg_user_mr:\r
-err_not_supported:\r
-       HCA_EXIT(HCA_DBG_MEMORY);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_dereg_mr(struct ib_mr *mr)\r
-{\r
-       int ret;\r
-       struct ib_pd *pd;\r
-       struct ib_device *p_ibdev;\r
-\r
-       if (atomic_read(&mr->usecnt))\r
-               return -EBUSY;\r
-\r
-       p_ibdev = mr->device;\r
-       pd = mr->pd;\r
-       ret = p_ibdev->dereg_mr(mr);\r
-       if (!ret) {\r
-               atomic_dec(&pd->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_MEMORY ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-static void release_user_cq_qp_resources(\r
-       struct ib_ucontext      *p_uctx)\r
-{\r
-       if (p_uctx) {\r
-               atomic_dec(&p_uctx->x.usecnt);\r
-               if (!atomic_read(&p_uctx->x.usecnt) && p_uctx->closing) {\r
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("User resources are released. Removing context\n"));\r
-                       ibv_um_close(p_uctx);\r
-               }\r
-       }\r
-}\r
-\r
-//\r
-// Completion queues\r
-//\r
-\r
-struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,\r
-                          ib_comp_handler comp_handler,\r
-                          void (*event_handler)(ib_event_rec_t *),\r
-                          void *cq_context, int cqe, \r
-                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_cq *cq;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_cq *p_req;\r
-       struct ibv_create_cq_resp *p_resp = NULL;\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)\r
-                       p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, \r
-                       sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp));\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       // create cq\r
-       cq = p_ibdev->create_cq(p_ibdev, cqe, 0, p_uctx, p_udata);\r
-       if (IS_ERR(cq)) {\r
-               err = PTR_ERR(cq);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err));\r
-               goto err_create_cq;\r
-       }\r
-\r
-       cq->device        = p_ibdev;\r
-       cq->p_uctx        = p_uctx;\r
-       cq->comp_handler  = comp_handler;\r
-       cq->event_handler = event_handler;\r
-       cq->cq_context    = cq_context;\r
-       atomic_set(&cq->usecnt, 0);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
-               ("created CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
-\r
-       // fill results\r
-       if (p_umv_buf) {\r
-               p_resp->cq_handle = (u64)(ULONG_PTR)cq;\r
-               p_resp->cqe = cq->cqe;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
-       }\r
-       \r
-       return cq;\r
-\r
-err_create_cq:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_cq(struct ib_cq *cq)\r
-{\r
-       int ret;\r
-       struct ib_ucontext      *p_uctx = cq->p_uctx;\r
-       \r
-       if (atomic_read(&cq->usecnt))\r
-               return -EBUSY;\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_CQ ,\r
-               ("destroying CQ: cqn %#x:%#x \n", ((struct mlx4_ib_cq*)cq)->mcq.cqn, cq->cqe ));\r
-\r
-       ret = cq->device->destroy_cq(cq);\r
-       release_user_cq_qp_resources(p_uctx);\r
-       return ret;\r
-}\r
-\r
-//\r
-// Queue pairs \r
-//\r
-\r
-static char *__print_qtype(enum ib_qp_type qtype)\r
-{\r
-       char *str = NULL;\r
-       switch (qtype) {\r
-               case IB_QPT_SMI: str = "SMI"; break;\r
-               case IB_QPT_GSI: str = "GSI"; break;\r
-               case IB_QPT_RC: str = "RC"; break;\r
-               case IB_QPT_UC: str = "UC"; break;\r
-               case IB_QPT_UD: str = "UD"; break;\r
-               case IB_QPT_RAW_IP_V6: str = "IP_V6"; break;\r
-               case IB_QPT_RAW_ETY: str = "ETY"; break;\r
-               default: str = "UKNWN"; break;\r
-       }\r
-       return str;\r
-}\r
-\r
-struct ib_qp *ibv_create_qp(struct ib_pd *pd,\r
-       struct ib_qp_init_attr *qp_init_attr,\r
-       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_qp *p_ib_qp;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_qp *p_req = NULL;\r
-       struct ibv_create_qp_resp *p_resp= NULL;\r
-\r
-       HCA_ENTER(HCA_DBG_QP);\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->command ) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, NULL, \r
-                       sizeof(struct mlx4_ib_create_qp), 0);\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       p_ib_qp = pd->device->create_qp( pd, qp_init_attr, p_udata );\r
-\r
-       if (IS_ERR(p_ib_qp)) {\r
-               err = PTR_ERR(p_ib_qp);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err));\r
-               goto err_create_qp;\r
-       }\r
-\r
-       // fill results\r
-       p_ib_qp->device                         = pd->device;\r
-       p_ib_qp->pd                             = pd;\r
-       p_ib_qp->send_cq                        = qp_init_attr->send_cq;\r
-       p_ib_qp->recv_cq                        = qp_init_attr->recv_cq;\r
-       p_ib_qp->srq                            = qp_init_attr->srq;\r
-       p_ib_qp->p_uctx                                 = p_uctx;\r
-       p_ib_qp->event_handler                  = qp_init_attr->event_handler;\r
-       p_ib_qp->qp_context                     = qp_init_attr->qp_context;\r
-       p_ib_qp->qp_type                                = qp_init_attr->qp_type;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_inc(&qp_init_attr->send_cq->usecnt);\r
-       atomic_inc(&qp_init_attr->recv_cq->usecnt);\r
-       if (qp_init_attr->srq)\r
-               atomic_inc(&qp_init_attr->srq->usecnt);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,\r
-               ("qtype %s (%d), qnum %#x, q_num  %#x, ssz %d, rsz %d, scq %#x:%#x, rcq %#x:%#x, port_num %d \n",\r
-               __print_qtype(p_ib_qp->qp_type), p_ib_qp->qp_type,\r
-               ((struct mlx4_ib_qp*)p_ib_qp)->mqp.qpn, p_ib_qp->qp_num, \r
-               qp_init_attr->cap.max_send_wr, qp_init_attr->cap.max_recv_wr,\r
-               ((struct mlx4_ib_cq*)p_ib_qp->send_cq)->mcq.cqn, p_ib_qp->send_cq->cqe,\r
-               ((struct mlx4_ib_cq*)p_ib_qp->recv_cq)->mcq.cqn, p_ib_qp->recv_cq->cqe,\r
-               qp_init_attr->port_num\r
-               ) );\r
-\r
-       // fill results for user\r
-       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp;\r
-               p_resp->qp_handle = (__u64)(ULONG_PTR)p_ib_qp;\r
-               p_resp->qpn = p_mib_qp->mqp.qpn;\r
-               p_resp->max_send_wr = p_mib_qp->sq.max_post;\r
-               p_resp->max_recv_wr = p_mib_qp->rq.max_post;\r
-               p_resp->max_send_sge = p_mib_qp->sq.max_gs;\r
-               p_resp->max_recv_sge = p_mib_qp->rq.max_gs;\r
-               /*\r
-                * We don't support inline sends for kernel QPs (yet), and we\r
-                * don't know what userspace's value should be.\r
-                */\r
-               p_resp->max_inline_data = 0;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
-       }\r
-\r
-       return p_ib_qp;\r
-\r
-err_create_qp:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       HCA_EXIT(HCA_DBG_QP);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_qp(struct ib_qp *qp)\r
-{\r
-       struct ib_pd *p_ib_pd;\r
-       struct ib_cq *scq, *rcq;\r
-       struct ib_srq *srq;\r
-       struct ib_ucontext      *p_uctx;\r
-       int ret;\r
-\r
-       p_ib_pd  = qp->pd;\r
-       scq = qp->send_cq;\r
-       rcq = qp->recv_cq;\r
-       srq = qp->srq;\r
-       p_uctx = p_ib_pd->p_uctx;\r
-\r
-       ret = qp->device->destroy_qp(qp);\r
-       if (!ret) {\r
-               atomic_dec(&p_ib_pd->usecnt);\r
-               atomic_dec(&scq->usecnt);\r
-               atomic_dec(&rcq->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
-               if (srq)\r
-                       atomic_dec(&srq->usecnt);\r
-               release_user_cq_qp_resources(p_uctx);\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-//\r
-// Shared receive queues\r
-//\r
-\r
-\r
-/* Shared receive queues */\r
-\r
-struct ib_srq *ibv_create_srq(struct ib_pd *pd,\r
-       struct ib_srq_init_attr *srq_init_attr,\r
-       struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf)\r
-{\r
-       int err;\r
-       struct ib_srq *p_ib_srq;\r
-       struct ib_udata udata, *p_udata = &udata;\r
-       struct ibv_create_srq *p_req = NULL;\r
-       struct ibv_create_srq_resp *p_resp= NULL;\r
-\r
-       if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               // prepare user parameters\r
-               p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-               INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, \r
-                       sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp));\r
-       }\r
-       else \r
-               p_udata = NULL;\r
-\r
-       p_ib_srq = pd->device->create_srq( pd, srq_init_attr, p_udata );\r
-       if (IS_ERR(p_ib_srq)) {\r
-               err = PTR_ERR(p_ib_srq);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err));\r
-               goto err_create_srq;\r
-       }\r
-\r
-       // fill results\r
-       p_ib_srq->device                        = pd->device;\r
-       p_ib_srq->pd                            = pd;\r
-       p_ib_srq->p_uctx                                = p_uctx;\r
-       p_ib_srq->event_handler                 = srq_init_attr->event_handler;\r
-       p_ib_srq->srq_context                   = srq_init_attr->srq_context;\r
-       atomic_inc(&pd->usecnt);\r
-       atomic_set(&p_ib_srq->usecnt, 0);\r
-       if (p_uctx)\r
-               atomic_inc(&p_uctx->x.usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt, pd, pd->p_uctx));\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ ,\r
-               ("uctx %p, qhndl %p, qnum %#x \n", \r
-               pd->p_uctx, p_ib_srq, ((struct mlx4_ib_srq*)p_ib_srq)->msrq.srqn ) );\r
-\r
-       // fill results for user\r
-       if (p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
-               struct mlx4_ib_srq* p_mib_srq = (struct mlx4_ib_srq*)p_ib_srq;\r
-               p_resp->srq_handle = (__u64)(ULONG_PTR)p_ib_srq;\r
-               p_resp->max_wr = p_mib_srq->msrq.max - 1;\r
-               p_resp->max_sge = p_mib_srq->msrq.max_gs;\r
-               p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", \r
-                       ((struct mlx4_ib_pd*)pd)->pdn, pd->usecnt));\r
-       }\r
-\r
-       return p_ib_srq;\r
-       \r
-err_create_srq:\r
-       if( p_umv_buf && p_umv_buf->command ) \r
-               p_umv_buf->status = IB_ERROR;\r
-       HCA_EXIT(HCA_DBG_QP);\r
-       return ERR_PTR(err);\r
-}\r
-\r
-int ib_destroy_srq(struct ib_srq *srq)\r
-{\r
-       int ret;\r
-       struct ib_pd *p_ib_pd = srq->pd;\r
-       struct ib_ucontext      *p_uctx = p_ib_pd->p_uctx;\r
-\r
-       ret = srq->device->destroy_srq(srq);\r
-       if (!ret) {\r
-               atomic_dec(&p_ib_pd->usecnt);\r
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", \r
-                       ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
-               release_user_cq_qp_resources(p_uctx);\r
-       }\r
-\r
-       return ret;\r
-}\r
-\r
-//\r
-// User context\r
-//\r
-static NTSTATUS __map_memory_for_user(\r
-       IN              io_addr_t       addr,\r
-       IN              SIZE_T          size,\r
-       IN              MEMORY_CACHING_TYPE mem_type,\r
-       OUT             umap_t  *       p_map\r
-       )\r
-{\r
-       NTSTATUS status;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       p_map->mapped = 0;\r
-       \r
-       // map UAR to kernel \r
-       p_map->kva = ioremap(addr, size);\r
-       if (!p_map->kva) {\r
-               HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,\r
-                       ("Couldn't map kernel access region, aborting.\n") );\r
-               status = IB_INSUFFICIENT_MEMORY;\r
-               goto err_ioremap;\r
-       }\r
-\r
-       // build MDL \r
-       p_map->mdl = IoAllocateMdl( p_map->kva, (ULONG)size,\r
-               FALSE, TRUE, NULL );\r
-       if( !p_map->mdl ) {\r
-               status = IB_INSUFFICIENT_MEMORY;\r
-               goto err_alloc_mdl;\r
-       }\r
-       MmBuildMdlForNonPagedPool( p_map->mdl );\r
-\r
-       /* Map the memory into the calling process's address space. */\r
-       __try   {\r
-               p_map->uva = MmMapLockedPagesSpecifyCache( p_map->mdl,\r
-                       UserMode, mem_type, NULL, FALSE, NormalPagePriority );\r
-       }\r
-       __except(EXCEPTION_EXECUTE_HANDLER) {\r
-               status = IB_INVALID_PERMISSION;\r
-               goto err_map;\r
-       }\r
-\r
-       p_map->mapped = 1;\r
-       status = STATUS_SUCCESS;\r
-       goto done;\r
-\r
-err_map:\r
-       IoFreeMdl(p_map->mdl);\r
-\r
-err_alloc_mdl: \r
-       iounmap(p_map->kva, PAGE_SIZE);\r
-\r
-err_ioremap:\r
-done:  \r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return status;\r
-}\r
-\r
-static void __unmap_memory_for_user(\r
-       IN              umap_t  *       p_map\r
-       )\r
-{\r
-       if (p_map->mapped) {\r
-               p_map->mapped = 0;\r
-               MmUnmapLockedPages( p_map->uva, p_map->mdl );\r
-               IoFreeMdl(p_map->mdl);\r
-               iounmap(p_map->kva, PAGE_SIZE);\r
-       }\r
-}\r
-\r
-ib_api_status_t ibv_um_open(   \r
-       IN                      struct ib_device                *       p_ibdev,\r
-       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,\r
-       OUT                     struct ib_ucontext              **      pp_uctx )\r
-{\r
-       int err;\r
-       ib_api_status_t         status;\r
-       struct mlx4_ib_ucontext *p_muctx;\r
-       struct ibv_get_context_resp *p_uresp;\r
-       struct mlx4_ib_alloc_ucontext_resp ib_alloc_ucontext_resp;\r
-       struct ib_ucontext              *p_uctx;\r
-       struct ib_udata udata;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       // create user context in kernel\r
-       INIT_UDATA(&udata, NULL, &ib_alloc_ucontext_resp, \r
-               0, sizeof(struct mlx4_ib_alloc_ucontext_resp));\r
-\r
-       p_uctx = p_ibdev->alloc_ucontext(p_ibdev, &udata);\r
-       if (IS_ERR(p_uctx)) {\r
-               err = PTR_ERR(p_uctx);\r
-               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
-                       ("mthca_alloc_ucontext failed (%d)\n", err));\r
-               status = errno_to_iberr(err);\r
-               goto err_alloc_ucontext;\r
-       }\r
-       p_muctx = to_mucontext(p_uctx);\r
-       p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
-\r
-       // fill the rest of ib_ucontext fields \r
-       p_uctx->device = p_ibdev;\r
-       p_uctx->closing = 0;\r
-\r
-       // livefish\r
-       if (hca_is_livefish(p_ibdev->x.p_fdo))\r
-               goto done;\r
-       \r
-       // map uar to user space\r
-       status = __map_memory_for_user( \r
-               (io_addr_t)p_muctx->uar.pfn << PAGE_SHIFT, \r
-               PAGE_SIZE, MmNonCached, &p_uctx->x.uar );\r
-       if( status != IB_SUCCESS ) {\r
-               goto err_map_uar;\r
-       }\r
-       p_uresp->uar_addr        = (u64)(ULONG_PTR)p_uctx->x.uar.uva;\r
-\r
-       // map BF to user space\r
-       if (ib_alloc_ucontext_resp.bf_reg_size) {\r
-               status = __map_memory_for_user( \r
-                       (io_addr_t)(p_muctx->uar.pfn + \r
-                       to_mdev(p_ibdev)->dev->caps.num_uars) << PAGE_SHIFT, \r
-                       PAGE_SIZE, MmWriteCombined, &p_uctx->x.bf );\r
-               if( !NT_SUCCESS(status) ) {\r
-                       HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM,\r
-                               ("BlueFlame available, but failed to be mapped (%#x)\n", status));\r
-                       p_uresp->bf_page         = 0;\r
-                       p_uresp->bf_buf_size = 0;\r
-               } \r
-               else {\r
-                       p_uresp->bf_page         = (u64)(ULONG_PTR)p_uctx->x.bf.uva;\r
-                       p_uresp->bf_buf_size = ib_alloc_ucontext_resp.bf_reg_size / 2;\r
-                       p_uresp->bf_offset       = 0;\r
-               }\r
-       }\r
-       else {\r
-                       p_uresp->bf_page         = 0;\r
-                       p_uresp->bf_buf_size = 0;\r
-       }\r
-\r
-done:\r
-       // fill the response\r
-       p_uresp->bf_reg_size             = ib_alloc_ucontext_resp.bf_reg_size;\r
-       p_uresp->bf_regs_per_page        = ib_alloc_ucontext_resp.bf_regs_per_page;\r
-       p_uresp->qp_tab_size             = ib_alloc_ucontext_resp.qp_tab_size;\r
-\r
-       *pp_uctx = p_uctx;\r
-       status = IB_SUCCESS;\r
-       goto end;\r
-\r
-err_map_uar:\r
-       p_ibdev->dealloc_ucontext(p_uctx);\r
-err_alloc_ucontext: \r
-end:\r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return status;\r
-}\r
-\r
-\r
-void ibv_um_close(     struct ib_ucontext * h_um_ca )\r
-{\r
-       int err;\r
-       ib_api_status_t         status;\r
-       struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca;\r
-       PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo;\r
-\r
-       HCA_ENTER(HCA_DBG_SHIM);\r
-\r
-       p_uctx->closing = 1;\r
-\r
-       if (atomic_read(&p_uctx->x.usecnt)) {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("resources are not released (cnt %d)\n", p_uctx->x.usecnt));\r
-               status = IB_RESOURCE_BUSY;\r
-               goto err_usage;\r
-       }\r
-       \r
-       if ( !hca_is_livefish(p_fdo)) {\r
-               __unmap_memory_for_user( &p_uctx->x.bf );\r
-               __unmap_memory_for_user( &p_uctx->x.uar );\r
-       }\r
-\r
-       err = p_fdo->bus_ib_ifc.p_ibdev->dealloc_ucontext(p_uctx);\r
-       if (err) {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("mthca_dealloc_ucontext failed (%d)\n", err));\r
-               status = errno_to_iberr(err);\r
-               goto err_dealloc_ucontext;\r
-       }\r
-\r
-       HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM,\r
-               ("pcs %p\n", PsGetCurrentProcess()) );\r
-       status = IB_SUCCESS;\r
-       goto end;\r
-       \r
-err_dealloc_ucontext: \r
-err_usage:\r
-end:\r
-       if (status != IB_SUCCESS)\r
-       {\r
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
-                       ("completes with ERROR status %x\n", status));\r
-       }\r
-       HCA_EXIT(HCA_DBG_SHIM);\r
-       return;\r
-}\r
-\r
diff --git a/branches/winverbs/hw/mlx4/kernel/hca/verbs.h b/branches/winverbs/hw/mlx4/kernel/hca/verbs.h
deleted file mode 100644 (file)
index 8085257..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
- * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
- * Copyright (c) 2004 Intel Corporation.  All rights reserved.
- * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
- * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: ib_verbs.h 1889 2006-12-31 08:33:06Z sleybo $
- */
-
-#pragma once
-
-#include "ib_verbs.h"
-
-struct ib_mr *ibv_reg_mr(struct ib_pd *pd, 
-       u64 start, u64 length,
-       u64 virt_addr,
-       int mr_access_flags,
-       ci_umv_buf_t* const p_umv_buf );
-
-struct ib_cq *ibv_create_cq(struct ib_device *p_ibdev,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(ib_event_rec_t *),
-                          void *cq_context, int cqe, 
-                          struct ib_ucontext *p_uctx, ci_umv_buf_t* const p_umv_buf);
-
-struct ib_qp *ibv_create_qp(struct ib_pd *pd,
-       struct ib_qp_init_attr *qp_init_attr,
-       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
-
-struct ib_srq *ibv_create_srq(struct ib_pd *pd,
-       struct ib_srq_init_attr *srq_init_attr,
-       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
-
-ib_api_status_t ibv_um_open(   
-       IN                      struct ib_device                *       p_ibdev,
-       IN      OUT             ci_umv_buf_t* const                     p_umv_buf,
-       OUT                     struct ib_ucontext              **      pp_uctx );
-
-void ibv_um_close(     struct ib_ucontext * h_um_ca );
-
-