mro_p->iobuf = iobuf;\r
#endif\r
*p_lkey = mr_p->lkey;\r
- *p_rkey = mr_p->rkey;\r
+ *p_rkey = cl_hton32( mr_p->rkey );\r
if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;\r
status = IB_SUCCESS;\r
\r
// results\r
if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;\r
*p_lkey = mr_p->lkey;\r
- *p_rkey = mr_p->rkey;\r
+ *p_rkey = cl_hton32( mr_p->rkey );\r
//NB: p_vaddr was not changed\r
status = IB_SUCCESS;\r
\r
\r
// debug print\r
{\r
- HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_MAD, \r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
(uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
(uint32_t)((ib_smp_t *)p_mad_in)->method, \r
// return the result\r
if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
\r
- if( p_context )\r
- {\r
- struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;\r
- cl_memcpy( &create_ah_resp->av_attr, p_addr_vector, sizeof(create_ah_resp->av_attr) );\r
- p_umv_buf->status = IB_SUCCESS;\r
- }\r
-\r
status = IB_SUCCESS;\r
\r
err_alloc_av: \r
err_inval_params:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
return status;\r
};
struct ib_pd {
+ struct list_head list; /* for chaining AV MRs (for user mode only) */
struct ib_device *device;
struct ib_ucontext *ucontext;
atomic_t usecnt; /* count all resources */
+ KMUTEX mutex; /* for chaining AV MRs (for user mode only) */
};
struct ib_ah {
struct ib_device *device;
struct ib_pd *pd;
struct ib_ucontext *ucontext;
- struct ib_mr *ib_mr;
};
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
};
struct ib_mr {
+ struct list_head list; /* for chaining AV MRs (for user mode only) */
struct ib_device *device;
struct ib_pd *pd;
u32 lkey;
#define PCI_VENDOR_ID_TOPSPIN 0x1867
#endif
+/* live fishes */
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD 0x5a45
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD 0x6279
// ===========================================
// TYPES
pd->device = device;
pd->ucontext = context;
atomic_set(&pd->usecnt, 0);
+ KeInitializeMutex( &pd->mutex, 0 );
+ INIT_LIST_HEAD( &pd->list );
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
}
int ibv_dealloc_pd(struct ib_pd *pd)
{
+ // we need first to release list of AV MRs to decrease pd->usecnt
+ if (pd->ucontext) {
+ struct ib_mr *ib_mr, *tmp;
+ down(&pd->mutex );
+ list_for_each_entry_safe(ib_mr, tmp, &pd->list, list,struct ib_mr,struct ib_mr) {
+ ibv_dereg_mr( ib_mr );
+ }
+ up(&pd->mutex );
+ }
+
if (atomic_read(&pd->usecnt)) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,("resources are not released (cnt %d)\n", pd->usecnt));
return -EBUSY;
struct ib_ah *ah;
struct ib_mr *ib_mr = NULL;
u64 start = 0;
+ u64 user_handle = 0;
// for user call we need also allocate MR
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(void*)p_umv_buf->p_inout_buf;
- // create region
+ // create region; destroy will be done on dealloc_pd
ib_mr = ibv_reg_mr(
pd,
create_ah->mr.access_flags,
}
start = create_ah->mr.start;
+ user_handle = create_ah->user_handle;
+
+ // chain this MR to PD list
+ down(&pd->mutex );
+ list_add_tail(&ib_mr->list, &pd->list);
+ up(&pd->mutex );
}
ah = pd->device->create_ah(pd, ah_attr);
atomic_inc(&pd->usecnt);
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_AV ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
- if (context)
- atomic_inc(&context->usecnt);
// fill results for user
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
- ah->ib_mr = ib_mr;
create_ah_resp->start = start;
+ create_ah_resp->user_handle = user_handle;
create_ah_resp->mr.lkey = ib_mr->lkey;
create_ah_resp->mr.rkey = ib_mr->rkey;
create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
{
struct ib_pd *pd;
int ret;
- struct ib_ucontext *ucontext;
- struct ib_mr * ib_mr;
HCA_ENTER(HCA_DBG_AV);
pd = ah->pd;
- ucontext = ah->ucontext;
- ib_mr = ah->ib_mr;
ret = ah->device->destroy_ah(ah);
if (!ret) {
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_AV ,("PD%d use cnt %d, pd_handle %p, ctx %p \n",
((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
}
- release_user_cq_qp_resources(ucontext, ib_mr);
HCA_EXIT(HCA_DBG_AV);
return ret;
}
Signature="$Windows NT$"\r
Class=InfiniBandHca\r
ClassGUID={58517E00-D3CF-40c9-A679-CEE5752F4491}\r
-Provider=%MTL%\r
-CatalogFile=mthca.cat\r
+Provider=%OPENIB%\r
; must be synchronized with MTHCA_DEV.H\r
DriverVer=04/10/2006,1.0.0000.308\r
\r
DefaultDestDir=%DIRID_DRIVERS%\r
ClassCopyFiles=%DIRID_SYSTEM%\r
MTHCA.UMCopyFiles=%DIRID_SYSTEM%\r
-MTHCA.WOW64CopyFiles=%DIRID_WINDOWS%\SysWOW64\r
+MTHCA.WOW64CopyFiles=%DIRID_SYSTEM_X86%\r
\r
; ================= Class Install section =====================\r
\r
mthca.sys=1\r
mthcau.dll=1\r
mthcaud.dll=1\r
-;uvpd32.dll=1\r
-;uvpd32d.dll=1\r
+mthca32.dll=1\r
+mthca32d.dll=1\r
\r
[SourceDisksFiles.ia64]\r
IbInstaller.dll=1\r
mthca.sys=1\r
mthcau.dll=1\r
mthcaud.dll=1\r
-;uvpd32.dll=1\r
-;uvpd32d.dll=1\r
+mthca32.dll=1\r
+mthca32d.dll=1\r
\r
[Manufacturer]\r
%MTL% = HCA.DeviceSection,ntx86,ntamd64,ntia64\r
%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C\r
%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274\r
\r
-\r
-[HCA.DeviceSection.ntamd64]\r
+ [HCA.DeviceSection.ntamd64]\r
%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44\r
%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278\r
%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282\r
mthca.sys\r
\r
[MTHCA.UMCopyFiles]\r
-; 2 lines excluded temporary\r
-mthcau.dll,,,2\r
+ mthcau.dll,,,2\r
mthcaud.dll,,,2\r
\r
[MTHCA.WOW64CopyFiles]\r
-mthcau.dll,uvpd32.dll,,2\r
-mthcaud.dll,uvpd32d.dll,,2\r
+ mthcau.dll,mthca32.dll,,2\r
+mthcaud.dll,mthca32d.dll,,2\r
\r
;\r
; ============= Service Install section ==============\r
\r
[Strings]\r
HcaClassGuid = "{58517E00-D3CF-40c9-A679-CEE5752F4491}"\r
+OPENIB = "OpenIB Alliance"\r
MTL="Mellanox Technologies Ltd."\r
MTHCA.ServiceDesc = "Driver for Mellanox InfiniHost Devices"\r
MT23108.DeviceDesc="InfiniHost (MT23108) - Mellanox InfiniBand HCA"\r
REG_MULTI_SZ_APPEND = 0x00010008\r
DIRID_SYSTEM = 11\r
DIRID_DRIVERS = 12\r
+DIRID_SYSTEM_X86 = 16425\r
+\r
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
uint8_t reserved[3];
};
+struct __ibv_ah {
+ uint64_t user_handle;
+ int use_mr;
+};
+
struct ibv_create_ah {
+ struct __ibv_ah;
struct ibv_reg_mr mr;
};
struct ibv_create_ah_resp {
+ struct __ibv_ah;
uint64_t start;
struct ibv_reg_mr_resp mr;
- ib_av_attr_t av_attr;
};
+++ /dev/null
-/*
- * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: driver.h 3825 2005-10-19 21:25:34Z roland $
- */
-
-#ifndef INFINIBAND_DRIVER_H
-#define INFINIBAND_DRIVER_H
-
-#include <iba/ib_types.h>
-#include <verbs.h>
-#include <kern-abi.h>
-
-#ifdef __cplusplus
-# define BEGIN_C_DECLS extern "C" {
-# define END_C_DECLS }
-#else /* !__cplusplus */
-# define BEGIN_C_DECLS
-# define END_C_DECLS
-#endif /* __cplusplus */
-
-/*
- * Device-specific drivers should declare their device init function
- * as below (the name must be "openib_driver_init"):
- *
- * struct ibv_device *openib_driver_init(struct sysfs_class_device *);
- *
- * libibverbs will call each driver's openib_driver_init() function
- * once for each InfiniBand device. If the device is one that the
- * driver can support, it should return a struct ibv_device * with the
- * ops member filled in. If the driver does not support the device,
- * it should return NULL from openib_driver_init().
- */
-
-typedef struct ibv_device *(*ibv_driver_init_func)(struct sysfs_class_device *);
-
- int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
- size_t cmd_size, struct ibv_get_context_resp *resp,
- size_t resp_size);
- int ibv_cmd_query_device(struct ibv_context *context,
- struct ibv_device_attr *device_attr,
- uint64_t *raw_fw_ver,
- struct ibv_query_device *cmd, size_t cmd_size);
- int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
- struct ibv_port_attr *port_attr,
- struct ibv_query_port *cmd, size_t cmd_size);
- int ibv_cmd_query_gid(struct ibv_context *context, uint8_t port_num,
- int index, union ibv_gid *gid);
- int ibv_cmd_query_pkey(struct ibv_context *context, uint8_t port_num,
- int index, uint16_t *pkey);
- int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
- struct ibv_alloc_pd *cmd, size_t cmd_size,
- struct ibv_alloc_pd_resp *resp, size_t resp_size);
- int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
- int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
- uint64_t hca_va, enum ibv_access_flags access,
- struct ibv_mr *mr, struct ibv_reg_mr *cmd,
- size_t cmd_size);
- int ibv_cmd_dereg_mr(struct ibv_mr *mr);
- int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
- struct ibv_comp_channel *channel,
- int comp_vector, struct ibv_cq *cq,
- struct ibv_create_cq *cmd, size_t cmd_size,
- struct ibv_create_cq_resp *resp, size_t resp_size);
- int ibv_cmd_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
- int ibv_cmd_req_notify_cq(struct ibv_cq *cq, int solicited_only);
- int ibv_cmd_destroy_cq(struct ibv_cq *cq);
-
- int ibv_cmd_create_srq(struct ibv_pd *pd,
- struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
- struct ibv_create_srq *cmd, size_t cmd_size,
- struct ibv_create_srq_resp *resp, size_t resp_size);
- int ibv_cmd_modify_srq(struct ibv_srq *srq,
- struct ibv_srq_attr *srq_attr,
- enum ibv_srq_attr_mask srq_attr_mask,
- struct ibv_modify_srq *cmd, size_t cmd_size);
- int ibv_cmd_destroy_srq(struct ibv_srq *srq);
-
- int ibv_cmd_create_qp(struct ibv_pd *pd,
- struct ibv_qp *qp, struct ibv_qp_init_attr *attr,
- struct ibv_create_qp *cmd, size_t cmd_size);
- int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- enum ibv_qp_attr_mask attr_mask,
- struct ibv_modify_qp *cmd, size_t cmd_size);
- int ibv_cmd_destroy_qp(struct ibv_qp *qp);
- int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
- struct ibv_send_wr **bad_wr);
- int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
- struct ibv_recv_wr **bad_wr);
- int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
- struct ibv_recv_wr **bad_wr);
- int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
- struct ibv_ah_attr *attr);
- int ibv_cmd_destroy_ah(struct ibv_ah *ah);
- int ibv_cmd_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
- int ibv_cmd_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
-
-#endif /* INFINIBAND_DRIVER_H */
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
int err;\r
+ struct mthca_ah *ah;\r
+ struct ibv_ah_attr attr;\r
struct ibv_create_ah *p_create_av;\r
ib_api_status_t status = IB_SUCCESS;\r
+ int AV_created = TRUE;\r
size_t size = max( sizeof(struct ibv_create_ah), sizeof(struct ibv_create_ah_resp) );\r
mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
\r
CL_ASSERT(p_umv_buf);\r
\r
- if( !p_umv_buf->p_inout_buf )\r
- {\r
+ // sanity check\r
+ if (p_av_attr->port_num == 0 || p_av_attr->port_num > p_hobul->p_hca_attr->num_ports) {\r
+ UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV ,\r
+ (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
+ status = IB_INVALID_PORT;\r
+ goto end;\r
+ }\r
+\r
+ // convert parameters \r
+ cl_memset( &attr, 0, sizeof(attr));\r
+ status = map_itom_av_attr (p_hobul->p_hca_attr, p_av_attr, &attr);\r
+ if(status != IB_SUCCESS ) \r
+ goto end;\r
+\r
+ // allocate Ah object\r
+ ah = cl_zalloc( sizeof *ah );\r
+ if( !ah ) {\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto end;\r
+ }\r
+\r
+ // fill AH partly\r
+ ah->h_uvp_pd = h_uvp_pd;\r
+ cl_memcpy( &ah->av_attr, p_av_attr, sizeof(ah->av_attr) );\r
+\r
+ // try to create AV\r
+ err = mthca_alloc_av(to_mpd(p_pd->ibv_pd), &attr, ah, NULL);\r
+ if (err == -EAGAIN) \r
+ AV_created = FALSE;\r
+ else\r
+ if (err) {\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_alloc_av;\r
+ }\r
+\r
+ // allocate parameters\r
+ if( !p_umv_buf->p_inout_buf ) {\r
p_umv_buf->p_inout_buf = cl_zalloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
- goto err_memory;\r
+ goto err_mem;\r
}\r
}\r
\r
- if (p_av_attr->port_num == 0 || p_av_attr->port_num > p_hobul->p_hca_attr->num_ports) {\r
- UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV ,\r
- (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
- return IB_INVALID_PORT;\r
- }\r
-\r
+ // fill the parameters\r
p_umv_buf->input_size = sizeof(struct ibv_create_ah);\r
p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp);\r
p_umv_buf->command = TRUE;\r
-\r
- /* allocate ibv_ah */\r
p_create_av = (struct ibv_create_ah *)p_umv_buf->p_inout_buf;\r
- err = p_hobul->ibv_ctx->ops.create_ah_pre(p_pd->ibv_pd, p_create_av);\r
- if (err) {\r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av_pre failed (%d)\n", err));\r
- status = errno_to_iberr(err);\r
- goto err_alloc_av;\r
+ p_create_av->user_handle = (uint64_t)(ULONG_PTR)ah;\r
+ if (!AV_created) {\r
+ struct mthca_ah_page *page = ah->page;\r
+ p_create_av->mr.start = (uint64_t)(ULONG_PTR)page->buf;\r
+ p_create_av->mr.length = g_page_size;\r
+ p_create_av->mr.hca_va = (uint64_t)(ULONG_PTR)page->buf;\r
+ p_create_av->mr.pd_handle = p_pd->ibv_pd->handle;\r
+ p_create_av->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn;\r
+ p_create_av->mr.access_flags = 0; //local read\r
+ p_create_av->use_mr = TRUE;\r
+ status = IB_SUCCESS;\r
}\r
+ else\r
+ status = IB_VERBS_PROCESSING_DONE;\r
\r
goto end;\r
- \r
-err_alloc_av:\r
- cl_free(p_umv_buf->p_inout_buf);\r
-err_memory:\r
+\r
+err_mem: \r
+ mthca_free_av(ah);\r
+err_alloc_av: \r
+ cl_free(ah);\r
end:\r
- UVP_EXIT(UVP_DBG_AV);\r
- return status;\r
+ UVP_EXIT(UVP_DBG_AV);\r
+ return status;\r
}\r
\r
\r
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
int err;\r
- struct ibv_ah_attr attr;\r
- struct ibv_ah *ibv_ah;\r
+ struct mthca_ah *ah;\r
+ struct mthca_ah_page *page;\r
struct ibv_create_ah_resp *p_resp;\r
- mlnx_ual_av_info_t *av_info;\r
ib_api_status_t status = IB_SUCCESS;\r
mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
- mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
\r
UVP_ENTER(UVP_DBG_AV);\r
\r
- CL_ASSERT(p_hobul);\r
CL_ASSERT(p_umv_buf);\r
\r
p_resp = (struct ibv_create_ah_resp *)p_umv_buf->p_inout_buf;\r
+ ah = (struct mthca_ah *)(ULONG_PTR)p_resp->user_handle;\r
\r
if (IB_SUCCESS == ioctl_status) {\r
\r
- /* convert parameters */\r
- cl_memset( &attr, 0, sizeof(attr));\r
- status = map_itom_av_attr (p_hobul->p_hca_attr, &p_resp->av_attr, &attr);\r
- if(status != IB_SUCCESS ) \r
- goto err_map_itom;\r
- /* allocate ibv_av */\r
- ibv_ah = p_hobul->ibv_ctx->ops.create_ah_post(p_pd->ibv_pd, &attr, p_resp);\r
- if (IS_ERR(ibv_ah)) {\r
- err = PTR_ERR(ibv_ah);\r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_create_av_post failed (%d)\n", err));\r
- status = errno_to_iberr(err);\r
- goto err_create_ah;\r
- }\r
-\r
- /* allocate av */\r
- av_info = (mlnx_ual_av_info_t *)cl_zalloc( sizeof(mlnx_ual_av_info_t) );\r
- if( !av_info ) {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_alloc_av_info;\r
+ page = ah->page;\r
+ if (p_resp->use_mr) {\r
+ // fill mr parameters\r
+ page->mr.handle = p_resp->mr.mr_handle;\r
+ page->mr.lkey = p_resp->mr.lkey;\r
+ page->mr.rkey = p_resp->mr.rkey;\r
+ page->mr.pd = p_pd->ibv_pd;\r
+ page->mr.context = p_pd->ibv_pd->context;\r
}\r
-\r
- /* return results */\r
- cl_memcpy( &av_info->av_attr, &p_resp->av_attr, sizeof(av_info->av_attr) );\r
- av_info->h_uvp_pd = p_pd;\r
- av_info->ibv_ah = ibv_ah;\r
- *ph_uvp_av = (ib_av_handle_t)av_info;\r
-\r
+ ah->key = page->mr.lkey;\r
+ *ph_uvp_av = (ib_av_handle_t)ah;\r
}\r
goto end;\r
\r
-err_alloc_av_info: \r
- p_hobul->ibv_ctx->ops.destroy_ah(ibv_ah);\r
-err_map_itom: \r
-err_create_ah:\r
end: \r
if (p_resp)\r
cl_free( p_resp );\r
UVP_EXIT(UVP_DBG_AV);\r
- return;\r
}\r
\r
ib_api_status_t\r
IN OUT ib_pd_handle_t *ph_pd,\r
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
- mlnx_ual_av_info_t *av_info = (mlnx_ual_av_info_t *)h_uvp_av;\r
+ struct mthca_ah *ah = (struct mthca_ah *)h_uvp_av;\r
UNREFERENCED_PARAMETER(p_umv_buf);\r
\r
UVP_ENTER(UVP_DBG_AV);\r
\r
if (ioctl_status == IB_SUCCESS)\r
{\r
- cl_memcpy (p_addr_vector, &av_info->av_attr, sizeof (ib_av_attr_t));\r
+ cl_memcpy (p_addr_vector, &ah->av_attr, sizeof (ib_av_attr_t));\r
if (ph_pd)\r
- *ph_pd = (ib_pd_handle_t)av_info->h_uvp_pd;\r
+ *ph_pd = (ib_pd_handle_t)ah->h_uvp_pd;\r
}\r
\r
UVP_EXIT(UVP_DBG_AV);\r
}\r
\r
-void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr );\r
+void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr );\r
\r
ib_api_status_t\r
mlnx_pre_modify_av (\r
IN OUT ci_umv_buf_t *p_umv_buf)\r
{\r
ib_api_status_t status ;\r
- struct ibv_ah *ibv_ah = (struct ibv_ah *)h_uvp_av->ibv_ah;\r
- struct mthca_ah *mthca_ah = (struct mthca_ah *)ibv_ah;\r
+ struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av;\r
mlnx_ual_pd_info_t *p_pd_info;\r
mlnx_ual_hobul_t *p_hobul;\r
struct ibv_ah_attr attr;\r
- int err;\r
\r
UNREFERENCED_PARAMETER(p_umv_buf);\r
\r
\r
CL_ASSERT(p_umv_buf);\r
\r
- p_pd_info = h_uvp_av->h_uvp_pd;\r
+ p_pd_info = mthca_ah->h_uvp_pd;\r
CL_ASSERT (p_pd_info);\r
\r
p_hobul = p_pd_info->p_hobul;\r
if(status != IB_SUCCESS) return status;\r
\r
mthca_set_av_params( mthca_ah, &attr);\r
- cl_memcpy (&h_uvp_av->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
+ cl_memcpy (&mthca_ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
\r
UVP_EXIT(UVP_DBG_AV);\r
\r
IN const ib_av_handle_t h_uvp_av,\r
IN ib_api_status_t ioctl_status)\r
{\r
+ struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av;\r
UNREFERENCED_PARAMETER(ioctl_status);\r
\r
- UVP_ENTER(UVP_DBG_AV);\r
- CL_ASSERT (h_uvp_av || h_uvp_av->ibv_ah);\r
- h_uvp_av->ibv_ah->pd->context->ops.destroy_ah(h_uvp_av->ibv_ah);\r
- UVP_EXIT(UVP_DBG_AV);\r
- return;\r
+ UVP_ENTER(UVP_DBG_AV);\r
+ CL_ASSERT (h_uvp_av);\r
+ mthca_free_av(mthca_ah);\r
+ cl_free(mthca_ah);\r
+ UVP_EXIT(UVP_DBG_AV);\r
+ return;\r
}\r
\r
uint32_t rkey;\r
} mlnx_ual_mw_info_t;\r
\r
-\r
-typedef struct _ib_av\r
-{\r
- struct ibv_ah *ibv_ah;\r
- ib_pd_handle_t h_uvp_pd; \r
- ib_av_attr_t av_attr;\r
-} mlnx_ual_av_info_t;\r
mthca_destroy_qp,
NULL, /* post_send */
NULL, /* post_recv */
- mthca_create_ah_pre,
- mthca_create_ah_post,
- mthca_destroy_ah,
mthca_attach_mcast,
mthca_detach_mcast
};
};
struct mthca_ah {
- struct ibv_ah ibv_ah;
struct mthca_av *av;
+ ib_av_attr_t av_attr;
+ ib_pd_handle_t h_uvp_pd;
struct mthca_ah_page *page;
uint32_t key;
};
+struct mthca_ah_page {
+ struct mthca_ah_page *prev, *next;
+ void *buf;
+ struct ibv_mr mr;
+ int use_cnt;
+ unsigned free[0];
+};
+
static inline unsigned long align(unsigned long val, unsigned long align)
{
return (val + align - 1) & ~(align - 1);
return to_mxxx(qp, qp);
}
-static inline struct mthca_ah *to_mah(struct ibv_ah *ibah)
-{
- return to_mxxx(ah, ah);
-}
-
static inline int mthca_is_memfree(struct ibv_context *ibctx)
{
return to_mctx(ibctx)->hca_type == MTHCA_ARBEL;
void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);
int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
int index, int *dbd, uint32_t *new_wqe);
-int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req);
- struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd,
- struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp);
-int mthca_destroy_ah(struct ibv_ah *ah);
int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
struct mthca_ah *ah, struct ibv_create_ah_resp *resp);
void mthca_free_av(struct mthca_ah *ah);
#include "mt_l2w.h"
#include "mlnx_uvp.h"
+#include "mlnx_ual_data.h"
#include "mx_abi.h"
-struct mthca_ah_page {
- struct mthca_ah_page *prev, *next;
- void *buf;
- struct ibv_mr mr;
- int use_cnt;
- unsigned free[0];
-};
-
-static struct mthca_ah_page *__add_page(struct mthca_pd *pd, int per_page)
+static struct mthca_ah_page *__add_page(
+ struct mthca_pd *pd, int page_size, int per_page)
{
struct mthca_ah_page *page;
int i;
if (!page)
return NULL;
+ if (posix_memalign(&page->buf, page_size, page_size)) {
+ cl_free(page);
+ return NULL;
+ }
+
page->use_cnt = 0;
for (i = 0; i < per_page; ++i)
page->free[i] = ~0;
int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
struct mthca_ah *ah, struct ibv_create_ah_resp *resp)
{
+ int added_page = FALSE;
if (mthca_is_memfree(pd->ibv_pd.context)) {
ah->av = cl_malloc(sizeof *ah->av);
if (!ah->av)
if (page->free[i])
goto found;
- page = __add_page(pd, pp);
+ page = __add_page(pd, ps, pp);
if (!page) {
ReleaseMutex( pd->ah_mutex );
return -ENOMEM;
}
+ added_page = TRUE;
found:
-
- // fill mr parameters
- page->buf = (void*)(ULONG_PTR)resp->start;
- page->mr.handle = resp->mr.mr_handle;
- page->mr.lkey = resp->mr.lkey;
- page->mr.rkey = resp->mr.rkey;
- page->mr.pd = (struct ibv_pd*)pd;
- page->mr.context = pd->ibv_pd.context;
-
++page->use_cnt;
for (i = 0, j = -1; i < pp; ++i)
break;
}
- ah->key = page->mr.lkey;
ah->page = page;
ReleaseMutex( pd->ah_mutex );
ah->av->dgid[3] = cl_hton32(2);
}
- return 0;
+ if (added_page)
+ return -EAGAIN;
+ else
+ return 0;
}
void mthca_free_av(struct mthca_ah *ah)
{
- if (mthca_is_memfree(ah->ibv_ah.pd->context)) {
+ mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah->h_uvp_pd;
+ if (mthca_is_memfree(p_pd->ibv_pd->context)) {
cl_free(ah->av);
} else {
- struct mthca_pd *pd = to_mpd(ah->ibv_ah.pd);
+ struct mthca_pd *pd = to_mpd(p_pd->ibv_pd);
struct mthca_ah_page *page;
int i;
void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
{
struct mthca_av *av = ah_p->av;
- struct ibv_ah *ib_ah_p = (struct ibv_ah *)ah_p;
- struct mthca_pd *pd = (struct mthca_pd *)ib_ah_p->pd;
+ mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah_p->h_uvp_pd;
+ struct mthca_pd *pd =to_mpd(p_pd->ibv_pd);
// taken from mthca_alloc_av
//TODO: why cl_hton32 ?
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
case IB_QPT_UNRELIABLE_DGRM:
{
- struct ibv_ah *ibv_ah = ((mlnx_ual_av_info_t*)wr->dgrm.ud.h_av)->ibv_ah;
+ struct mthca_ah *ah = ((struct mthca_ah *)wr->dgrm.ud.h_av);
((struct mthca_tavor_ud_seg *) wqe)->lkey =
- cl_hton32(to_mah(ibv_ah)->key);
+ cl_hton32(ah->key);
((struct mthca_tavor_ud_seg *) wqe)->av_addr =
- cl_hton64((uint64_t)to_mah(ibv_ah)->av);
+ cl_hton64((uint64_t)ah->av);
((struct mthca_tavor_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
((struct mthca_tavor_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32((wr->remote_ops.rkey));
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
((struct mthca_raddr_seg *) wqe)->raddr =
cl_hton64(wr->remote_ops.vaddr);
((struct mthca_raddr_seg *) wqe)->rkey =
- cl_hton32(wr->remote_ops.rkey);
+ wr->remote_ops.rkey;
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
case IB_QPT_UNRELIABLE_DGRM:
{
- struct ibv_ah *ibv_ah = ((mlnx_ual_av_info_t*)wr->dgrm.ud.h_av)->ibv_ah;
+ struct mthca_ah *ah = ((struct mthca_ah *)wr->dgrm.ud.h_av);
memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
- to_mah(ibv_ah)->av, sizeof ( struct mthca_av));
+ ah->av, sizeof ( struct mthca_av));
((struct mthca_arbel_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp;
((struct mthca_arbel_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey;
next->ee_nds = sz;
for (scatter = (void *) (next + 1);
- (void *) scatter < (void *) ((char*)next + (1 << qp->rq.wqe_shift));
+ (void *) scatter < (void *) ((char *)next + (1 << qp->rq.wqe_shift));
++scatter)
scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY);
}
req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
req->user_handle = (uint64_t)(ULONG_PTR)cq;
+#if 1
req->cqe = *p_cqe;
*p_cqe = nent-1;
// *p_cqe = *p_cqe; // return the same value
// cq->ibv_cq.cqe = nent -1;
+#else
+ req->cqe = nent;
+ *p_cqe = *p_cqe; // return the same value
+#endif
return &cq->ibv_cq;
err_set_db:
return 0;
}
-
-int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req)
-{
- void *buf;
-
- if (posix_memalign(&buf, g_page_size, g_page_size))
- return -ENOMEM;
-
- req->mr.start = (uint64_t)(ULONG_PTR)buf;
- req->mr.length = g_page_size;
- req->mr.hca_va = (uint64_t)(ULONG_PTR)buf;
- req->mr.pd_handle = pd->handle;
- req->mr.pdn = to_mpd(pd)->pdn;
- req->mr.access_flags = 0; //local read
- return 0;
-}
-
-struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd,
- struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp)
-{
- struct mthca_ah *ah;
-
- ah = cl_malloc(sizeof *ah);
- if (!ah)
- return NULL;
-
- if (mthca_alloc_av(to_mpd(pd), attr, ah, resp)) {
- cl_free(ah);
- return NULL;
- }
- ah->ibv_ah.pd = pd;
-
- return &ah->ibv_ah;
-}
-
-int mthca_destroy_ah(struct ibv_ah *ah)
-{
- mthca_free_av(to_mah(ah));
- cl_free(to_mah(ah));
-
- return 0;
-}
-
int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
{
#ifdef WIN_TO_BE_CHANGED
uint32_t rkey;
} atomic;
struct {
- struct ibv_ah *ah;
+ struct mthca_ah *ah;
uint32_t remote_qpn;
uint32_t remote_qkey;
} ud;
struct _ib_send_wr **bad_wr);
int (*post_recv)(struct ibv_qp *qp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
- int (*create_ah_pre)(struct ibv_pd *pd, struct ibv_create_ah *req);
- struct ibv_ah *(*create_ah_post)(struct ibv_pd *pd,
- struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp);
- int (*destroy_ah)(struct ibv_ah *ah);
int (*attach_mcast)(struct ibv_qp *qp, union ibv_gid *gid,
uint16_t lid);
int (*detach_mcast)(struct ibv_qp *qp, union ibv_gid *gid,