void\r
setup_ci_interface(\r
IN const ib_net64_t ca_guid,\r
+ IN const int is_livefish,\r
OUT ci_interface_t *p_interface );\r
\r
void\r
mlnx_memory_if(\r
IN OUT ci_interface_t *p_interface );\r
\r
+void\r
+mlnx_memory_if_livefish(\r
+ IN OUT ci_interface_t *p_interface );\r
+\r
void\r
mlnx_ecc_if(\r
IN OUT ci_interface_t *p_interface );\r
} hca_dev_ext_t;\r
\r
#define EXT_FROM_HOB(hob_p) (container_of(hob_p, hca_dev_ext_t, hca.hob))\r
+#define HCA_FROM_HOB(hob_p) (container_of(hob_p, mlnx_hca_t, hob))\r
+#define MDEV_FROM_HOB(hob_p) (HCA_FROM_HOB(hob_p)->mdev)\r
#define IBDEV_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev)\r
#define HOBUL_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.hobul)\r
#define HOB_FROM_IBDEV(dev_p) (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob\r
//NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same\r
\r
// register pmr \r
- if (p_pmr_create->length == (uint64_t)-1LL)\r
+ if (p_pmr_create->length == (uint64_t)-1i64)\r
{\r
mr_p = ibv_get_dma_mr( ib_pd_p,\r
map_qp_ibal_acl(p_pmr_create->access_ctrl) );\r
p_interface->destroy_mw = mlnx_destroy_mw;\r
}\r
\r
+void\r
+mlnx_memory_if_livefish(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->register_pmr = mlnx_register_pmr;\r
+ p_interface->deregister_mr = mlnx_deregister_mr;\r
+}\r
+\r
void hca_disable_pci(PBUS_INTERFACE_STANDARD phcaBusIfc)\r
{\r
// no need to disable the card, so just release the PCI bus i/f\r
- phcaBusIfc->InterfaceDereference( phcaBusIfc->Context );\r
+ if (phcaBusIfc) {\r
+ phcaBusIfc->InterfaceDereference( phcaBusIfc->Context );\r
+ phcaBusIfc = NULL;\r
+ }\r
}\r
\r
\r
extern const char *mthca_version;\r
\r
-\r
static NTSTATUS\r
hca_start(\r
IN DEVICE_OBJECT* const p_dev_obj,\r
return NULL;\r
}\r
\r
- setup_ci_interface( p_ext->hca.guid, pIfc );\r
+ setup_ci_interface( p_ext->hca.guid,\r
+ !!mthca_is_livefish(p_ext->hca.mdev),\r
+ pIfc );\r
\r
pIfc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID;\r
}\r
\r
\r
+static int mthca_get_livefish_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id)\r
+{\r
+ *node_guid = cl_hton64((uint64_t)(ULONG_PTR)mdev);\r
+ mdev->ib_dev.node_guid = *node_guid;\r
+ *hw_id = 0;\r
+ return 0;\r
+}\r
+\r
static NTSTATUS\r
hca_start(\r
IN DEVICE_OBJECT* const p_dev_obj,\r
\r
/*leo: get node GUID */\r
{\r
- int err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+ int err;\r
+ if (mthca_is_livefish(p_ext->hca.mdev)) \r
+ err = mthca_get_livefish_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+ else\r
+ err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
if (err) {\r\r
//TODO: no cleanup on error\r
HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
\r
ib_dev = &p_hca->mdev->ib_dev;\r
\r
+ if (mthca_is_livefish(p_hca->mdev)) \r
+ goto done;\r
+\r
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,\r
("context 0x%p\n", ca_context));\r
status = mlnx_hobs_set_cb(&p_hca->hob,\r
//TODO: do we need something for kernel users ?\r
\r
// Return pointer to HOB object\r
+done: \r
if (ph_ca) *ph_ca = &p_hca->hob;\r
status = IB_SUCCESS;\r
\r
HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("mthca_modify_port failed (%d) \n",err));\r
goto err_modify_port;\r
}\r
- \r
+\r
status = IB_SUCCESS;\r
\r
err_modify_port:\r
mlnx_close_ca (\r
IN ib_ca_handle_t h_ca)\r
{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca;\r
HCA_ENTER(HCA_DBG_SHIM);\r
\r
- // release HOB resources\r
- mlnx_hobs_remove(h_ca);\r
+ if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) \r
+ goto done;\r
\r
- //TODO: release HOBUL resources\r
+ mlnx_hobs_remove(h_ca);\r
\r
+done:\r
HCA_EXIT(HCA_DBG_SHIM);\r
\r
return IB_SUCCESS;\r
struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
UNREFERENCED_PARAMETER(h_ca);\r
\r
+ if (mthca_is_livefish(to_mdev(p_ucontext->device)))\r
+ goto done;\r
unmap_crspace_for_all(p_ucontext);\r
+done: \r
if( !p_ucontext->pd )\r
cl_free( h_um_ca );\r
else\r
if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
\r
status = IB_SUCCESS;\r
- \r
+\r
err_alloc_av: \r
err_inval_params:\r
if (p_umv_buf && p_umv_buf->command) \r
OUT ib_qp_handle_t *ph_qp,\r
IN OUT ci_umv_buf_t *p_umv_buf )\r
{\r
- int err;\r
- ib_api_status_t status;\r
- struct ib_qp * ib_qp_p;\r
- struct mthca_qp *qp_p;\r
- struct ib_qp_init_attr qp_init_attr;\r
- struct ib_ucontext *p_context = NULL;\r
- struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
- struct ib_device *ib_dev = ib_pd_p->device;\r
- mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev);\r
- \r
- HCA_ENTER(HCA_DBG_QP);\r
-\r
- \r
- if( p_umv_buf && p_umv_buf->command ) {\r
- // sanity checks \r
- if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
- p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
- !p_umv_buf->p_inout_buf) {\r
- status = IB_INVALID_PARAMETER;\r
- goto err_inval_params;\r
- }\r
- p_context = ib_pd_p->ucontext;\r
- }\r
- else \r
- p_context = NULL;\r
-\r
- // prepare the parameters\r
- RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
- qp_init_attr.qp_type = p_create_attr->qp_type;\r
- qp_init_attr.event_handler = qp_event_handler;\r
- qp_init_attr.qp_context = hob_p;\r
- qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
- qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
- qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
- qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
- qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
- qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
- qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
- qp_init_attr.port_num = port_num;\r
-\r
-\r
- // create qp \r
- ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
- if (IS_ERR(ib_qp_p)) {\r
- err = PTR_ERR(ib_qp_p);\r
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
- ("ibv_create_qp failed (%d)\n", err));\r
- status = errno_to_iberr(err);\r
- goto err_create_qp;\r
- }\r
- \r
- // fill the object\r
- qp_p = (struct mthca_qp *)ib_qp_p;\r
- qp_p->qp_context = (void*)qp_context;\r
- qp_p->qp_init_attr = qp_init_attr;\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp * ib_qp_p;\r
+ struct mthca_qp *qp_p;\r
+ struct ib_qp_init_attr qp_init_attr;\r
+ struct ib_ucontext *p_context = NULL;\r
+ struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+ struct ib_device *ib_dev = ib_pd_p->device;\r
+ mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev);\r
\r
- // Query QP to obtain requested attributes\r
- if (p_qp_attr) {\r
- status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
- if (status != IB_SUCCESS)\r
- goto err_query_qp;\r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ // sanity checks \r
+ if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
+ p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_inval_params;\r
}\r
- \r
- // return the results\r
- if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
- \r
- status = IB_SUCCESS;\r
- goto end;\r
+ p_context = ib_pd_p->ucontext;\r
+ }\r
+ else \r
+ p_context = NULL;\r
+\r
+ // prepare the parameters\r
+ RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
+ qp_init_attr.qp_type = p_create_attr->qp_type;\r
+ qp_init_attr.event_handler = qp_event_handler;\r
+ qp_init_attr.qp_context = hob_p;\r
+ qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
+ qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+ qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
+ qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
+ qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
+ qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
+ qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
+ qp_init_attr.port_num = port_num;\r
+\r
+\r
+ // create qp \r
+ ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
+ if (IS_ERR(ib_qp_p)) {\r
+ err = PTR_ERR(ib_qp_p);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
+ ("ibv_create_qp failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_create_qp;\r
+ }\r
+\r
+ // fill the object\r
+ qp_p = (struct mthca_qp *)ib_qp_p;\r
+ qp_p->qp_context = (void*)qp_context;\r
+ qp_p->qp_init_attr = qp_init_attr;\r
+\r
+ // Query QP to obtain requested attributes\r
+ if (p_qp_attr) {\r
+ status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
+ if (status != IB_SUCCESS)\r
+ goto err_query_qp;\r
+ }\r
\r
- err_query_qp:\r
- ibv_destroy_qp( ib_qp_p );\r
- err_create_qp:\r
- err_inval_params:\r
- end:\r
- if (p_umv_buf && p_umv_buf->command) \r
- p_umv_buf->status = status;\r
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
- ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
- return status;\r
+ // return the results\r
+ if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
+\r
+ status = IB_SUCCESS;\r
+ goto end;\r
+\r
+err_query_qp:\r
+ ibv_destroy_qp( ib_qp_p );\r
+err_create_qp:\r
+err_inval_params:\r
+end:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+ return status;\r
}\r
\r
ib_api_status_t\r
mlnx_hob_t *hob_p;\r
struct ib_device *ib_dev;\r
struct ib_ucontext *p_context;\r
- \r
+\r
HCA_ENTER(HCA_DBG_CQ);\r
\r
if( p_umv_buf ) {\r
ib_dev = IBDEV_FROM_HOB( hob_p );\r
}\r
\r
+ /* sanity check */\r
+ if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
+ status = IB_INVALID_CQ_SIZE;\r
+ goto err_cqe;\r
+ }\r
+\r
// allocate cq \r
ib_cq_p = ibv_create_cq(ib_dev, \r
cq_comp_handler, cq_event_handler,\r
\r
err_create_cq:\r
err_inval_params:\r
+err_cqe:\r
if (p_umv_buf && p_umv_buf->command) \r
p_umv_buf->status = status;\r
HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,\r
void\r
setup_ci_interface(\r
IN const ib_net64_t ca_guid,\r
+ IN const int is_livefish,\r
IN OUT ci_interface_t *p_interface )\r
{\r
cl_memclr(p_interface, sizeof(*p_interface));\r
/* The real interface. */\r
p_interface->open_ca = mlnx_open_ca;\r
p_interface->query_ca = mlnx_query_ca;\r
- p_interface->modify_ca = mlnx_modify_ca; \r
p_interface->close_ca = mlnx_close_ca;\r
p_interface->um_open_ca = mlnx_um_open;\r
p_interface->um_close_ca = mlnx_um_close;\r
\r
p_interface->allocate_pd = mlnx_allocate_pd;\r
p_interface->deallocate_pd = mlnx_deallocate_pd;\r
-\r
- p_interface->create_av = mlnx_create_av;\r
- p_interface->query_av = mlnx_query_av;\r
- p_interface->modify_av = mlnx_modify_av;\r
- p_interface->destroy_av = mlnx_destroy_av;\r
-\r
- p_interface->create_qp = mlnx_create_qp;\r
- p_interface->create_spl_qp = mlnx_create_spl_qp;\r
- p_interface->modify_qp = mlnx_modify_qp;\r
- p_interface->query_qp = mlnx_query_qp;\r
- p_interface->destroy_qp = mlnx_destroy_qp;\r
-\r
- p_interface->create_cq = mlnx_create_cq;\r
- p_interface->resize_cq = mlnx_resize_cq;\r
- p_interface->query_cq = mlnx_query_cq;\r
- p_interface->destroy_cq = mlnx_destroy_cq;\r
-\r
- p_interface->local_mad = mlnx_local_mad;\r
- \r
p_interface->vendor_call = fw_access_ctrl;\r
\r
- mlnx_memory_if(p_interface);\r
- mlnx_direct_if(p_interface);\r
- mlnx_mcast_if(p_interface);\r
+ if (is_livefish) {\r
+ mlnx_memory_if_livefish(p_interface);\r
+ }\r
+ else { \r
+ p_interface->modify_ca = mlnx_modify_ca; \r
+ \r
+ p_interface->create_av = mlnx_create_av;\r
+ p_interface->query_av = mlnx_query_av;\r
+ p_interface->modify_av = mlnx_modify_av;\r
+ p_interface->destroy_av = mlnx_destroy_av;\r
+\r
+ p_interface->create_qp = mlnx_create_qp;\r
+ p_interface->create_spl_qp = mlnx_create_spl_qp;\r
+ p_interface->modify_qp = mlnx_modify_qp;\r
+ p_interface->query_qp = mlnx_query_qp;\r
+ p_interface->destroy_qp = mlnx_destroy_qp;\r
+\r
+ p_interface->create_cq = mlnx_create_cq;\r
+ p_interface->resize_cq = mlnx_resize_cq;\r
+ p_interface->query_cq = mlnx_query_cq;\r
+ p_interface->destroy_cq = mlnx_destroy_cq;\r
+\r
+ p_interface->local_mad = mlnx_local_mad;\r
+ \r
\r
+ mlnx_memory_if(p_interface);\r
+ mlnx_direct_if(p_interface);\r
+ mlnx_mcast_if(p_interface);\r
+ }\r
\r
return;\r
}\r
u64 page_size_cap;
u32 vendor_id;
u32 vendor_part_id;
- u8 board_id[16];
u32 hw_ver;
int max_qp;
int max_qp_wr;
continue;
if (i < 0 || i >= PAGE_SIZE * 8)
continue;
- snprintf(buf, sizeof buf, name, i);
+ snprintf(buf, sizeof(buf)-1, name, i);
+ buf[sizeof(buf)-1] = '\0';
if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
set_bit(i, inuse);
}
i = find_first_zero_bit((const unsigned long *)inuse, PAGE_SIZE * 8);
free_page(inuse);
- snprintf(buf, sizeof buf, name, i);
+ snprintf(buf, sizeof(buf)-1, name, i);
+ buf[sizeof(buf)-1] = '\0';
if (__ib_device_get_by_name(buf))
return -ENFILE;
else
val = 0;
- mask = cl_hton32(((1ull << desc[i].size_bits) - 1) << shift);
+ mask = cl_hton32(((1Ui64 << desc[i].size_bits) - 1) << shift);
addr = (__be32 *) buf + desc[i].offset_words;
*addr = (*addr & ~mask) | (cl_hton32(val) & mask);
} else if (desc[i].size_bits <= 64) {
else
val = 0;
- mask = CPU_2_BE64((~0ull >> (64 - desc[i].size_bits)) << shift);
+ mask = CPU_2_BE64((~0Ui64 >> (64 - desc[i].size_bits)) << shift);
addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
*addr = (*addr & ~mask) | (cl_hton64(val) & mask);
} else {
__be32 *addr;
shift = 32 - desc[i].offset_bits - desc[i].size_bits;
- mask = ((1ull << desc[i].size_bits) - 1) << shift;
+ mask = ((1Ui64 << desc[i].size_bits) - 1) << shift;
addr = (__be32 *) buf + desc[i].offset_words;
val = (cl_ntoh32(*addr) & mask) >> shift;
value_write(desc[i].struct_offset_bytes,
__be64 *addr;
shift = 64 - desc[i].offset_bits - desc[i].size_bits;
- mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
+ mask = (~0Ui64 >> (64 - desc[i].size_bits)) << shift;
addr = (__be64 *) buf + desc[i].offset_words;
val = (cl_ntoh64(*addr) & mask) >> shift;
value_write(desc[i].struct_offset_bytes,
#endif
/* live fishes */
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD
#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD 0x5a45
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD
#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD 0x6279
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD 0x5e8d
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_BD 0x6275
+#endif
// ===========================================
// TYPES
/* Get the device number */
l_Status = IoGetDeviceProperty(pi_pPdo,
- DevicePropertyLocationInformation, sizeof(l_Buffer), &l_Buffer, &l_ResultLength);
+ DevicePropertyLocationInformation, sizeof(l_Buffer), l_Buffer, &l_ResultLength);
/* Verify if the function was successful */
if ( !NT_SUCCESS(l_Status) || !l_ResultLength ) {
// suppose that there is no more than N_PCI_DEVICES, belonging to PCI.SYS
#define N_PCI_DEVICES 256
// suppose that the PDO objects, once created, never get moved
- PDEVICE_OBJECT pdo[N_PCI_DEVICES];
+ PDEVICE_OBJECT *pdo;
int i, n_pdos = 0;
+ KIRQL irql;
+
+
+ pdo = (PDEVICE_OBJECT *)ExAllocatePoolWithTag(
+ NonPagedPool,
+ N_PCI_DEVICES * sizeof(PDEVICE_OBJECT),
+ MT_TAG_KERNEL );
+ if (!pdo)
+ return FALSE;
// suppose, that PDOs are added only at PASSIVE_LEVEL
- KIRQL irql = KeRaiseIrqlToDpcLevel();
+ irql = KeRaiseIrqlToDpcLevel();
// get to the PCI.SYS driver
l_pDrv = pi_pPdo->DriverObject;
}
}
- // check whether we found the PDO
- if (!l_pPdo)
- return FALSE;
*po_pPdo = l_pPdo;
- return TRUE;
+ ExFreePool(pdo);
+ return (BOOLEAN)!!*po_pPdo;
}
/*----------------------------------------------------------------*/
/* build sg list */
npages = (unsigned long)(NEXT_PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT);
+ if (!npages) {
+ ret = -EINVAL;
+ goto err_inval;
+ }
cur_base = (u64)(UINT_PTR)addr & PAGE_MASK;
while (npages) {
/* allocate a max large chunk (it's <= PAGE_SIZE) */
out:
ibv_umem_release(dev, mem);
-err_kmalloc:
+err_kmalloc: err_inval:
exit:
return ret;
}
lg = ffs(i) - 1;
if (lg < 12) {
HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
- (unsigned long long) mthca_icm_addr(&iter),
+ (u64) mthca_icm_addr(&iter),
mthca_icm_size(&iter)));
err = -EINVAL;
goto out;
for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cl_hton64(virt);
- virt += 1ULL << lg;
+ virt += 1Ui64 << lg;
}
pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
(i << lg)) | (lg - 12));
break;
case CMD_MAP_ICM:
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
- tc, ts, (unsigned long long) virt - (ts << 10)));
+ tc, ts, (u64) virt - (ts << 10)));
break;
}
* FW subSIZE_Tor version is at more signifant bits than minor
* version, so swap here.
*/
- dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
- ((dev->fw_ver & 0xffff0000ull) >> 16) |
- ((dev->fw_ver & 0x0000ffffull) << 16);
+ dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) |
+ ((dev->fw_ver & 0xffff0000Ui64) >> 16) |
+ ((dev->fw_ver & 0x0000ffffUi64) << 16);
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
dev->cmd.max_cmds = 1 << lg;
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
- (unsigned long long) dev->fw_ver, dev->cmd.max_cmds));
+ (u64) dev->fw_ver, dev->cmd.max_cmds));
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
- (unsigned long long) dev->catas_err.addr, dev->catas_err.size));
+ (u64) dev->catas_err.addr, dev->catas_err.size));
if (mthca_is_memfree(dev)) {
(PAGE_SHIFT - 12);
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
- (unsigned long long) dev->fw.arbel.clr_int_base,
- (unsigned long long) dev->fw.arbel.eq_arm_base,
- (unsigned long long) dev->fw.arbel.eq_set_ci_base));
+ (u64) dev->fw.arbel.clr_int_base,
+ (u64) dev->fw.arbel.eq_arm_base,
+ (u64) dev->fw.arbel.eq_set_ci_base));
} else {
MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET);
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
(int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
- (unsigned long long) dev->fw.tavor.fw_start,
- (unsigned long long) dev->fw.tavor.fw_end));
+ (u64) dev->fw.tavor.fw_start,
+ (u64) dev->fw.tavor.fw_end));
}
out:
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
(int) ((dev->ddr_end - dev->ddr_start) >> 10),
- (unsigned long long) dev->ddr_start,
- (unsigned long long) dev->ddr_end));
+ (u64) dev->ddr_start,
+ (u64) dev->ddr_end));
out:
mthca_free_mailbox(dev, mailbox);
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
(int) ((dev->ddr_end - dev->ddr_start) >> 10),
- (unsigned long long) dev->ddr_start,
- (unsigned long long) dev->ddr_end));
+ (u64) dev->ddr_start,
+ (u64) dev->ddr_end));
out:
mthca_free_mailbox(dev, mailbox);
}
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
- (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20));
+ (u64) dev_lim->hca.arbel.max_icm_sz >> 20));
}
else {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
if (!err)
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
- (unsigned long long) dma_addr, (unsigned long long) virt));
+ (u64) dma_addr, (u64) virt));
return err;
}
int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
{
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
- page_count, (unsigned long long) virt));
+ page_count, (u64) virt));
return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
}
{
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
unmap ? "Clearing" : "Setting",
- (unsigned long long) event_mask, eq_num));
+ (u64) event_mask, eq_num));
return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
}
MTHCA_FLAG_NO_LAM = 1 << 5,
MTHCA_FLAG_FMR = 1 << 6,
MTHCA_FLAG_MEMFREE = 1 << 7,
- MTHCA_FLAG_PCIE = 1 << 8
+ MTHCA_FLAG_PCIE = 1 << 8,
+ MTHCA_FLAG_LIVEFISH = 1 << 9
};
enum {
return dev->mthca_flags & MTHCA_FLAG_MEMFREE;
}
-void mthca_get_av_params( struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits );
-
-void mthca_set_av_params( struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr );
-
-int ib_uverbs_init(void);
-void ib_uverbs_cleanup(void);
-int mthca_ah_grh_present(struct mthca_ah *ah);
-
-
-
-
-
VOID
WriteEventLogEntry(
PVOID pi_pIoObject,
...
);
+
+static inline int mthca_is_livefish(struct mthca_dev *mdev)
+{
+ return mdev->mthca_flags & MTHCA_FLAG_LIVEFISH;
+}
+
+void mthca_get_av_params( struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits );
+
+void mthca_set_av_params( struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr );
+
+int ib_uverbs_init(void);
+void ib_uverbs_cleanup(void);
+int mthca_ah_grh_present(struct mthca_ah *ah);
+
+
#endif /* MTHCA_DEV_H */
MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14
};
-#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
- (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
- (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
- (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
- (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
- (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
-#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
- (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
- (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
-
-#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
+#define MTHCA_ASYNC_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_COMM_EST) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_CQ_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_ECC_DETECT))
+#define MTHCA_SRQ_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
+ (1Ui64 << MTHCA_EVENT_TYPE_SRQ_LIMIT))
+
+#define MTHCA_CMD_EVENT_MASK (1Ui64 << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24)
#define MTHCA_EQ_DB_REQ_NOT (2 << 24)
enum __hca_type {
TAVOR, /* MT23108 */
ARBEL_COMPAT, /* MT25208 in Tavor compat mode */
- ARBEL_NATIVE, /* MT25208 with extended features */
- SINAI /* MT25204 */
+ ARBEL_NATIVE, /* MT25218 with extended features */
+ SINAI, /* MT25204 */
+ LIVEFISH /* a burning device */
};
#define MTHCA_FW_VER(major, minor, subminor) \
int is_memfree;
int is_pcie;
} mthca_hca_table[] = {
- { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 },
- { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 },
- { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 },
- { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 }
+ { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 }, /* TAVOR */
+ { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 }, /* ARBEL_COMPAT */
+ { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 }, /* ARBEL_NATIVE */
+ { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 }, /* SINAI */
+ { MTHCA_FW_VER(0, 0, 0), MTHCA_FW_VER(0, 0, 0), 0, 0 } /* LIVEFISH */
};
HCA(TOPSPIN, ARBEL, ARBEL_NATIVE),
HCA(TOPSPIN, SINAI_OLD, SINAI),
HCA(TOPSPIN, SINAI, SINAI),
+ // live fishes
+ HCA(MELLANOX, TAVOR_BD, LIVEFISH),
+ HCA(MELLANOX, ARBEL_BD, LIVEFISH),
+ HCA(MELLANOX, SINAI_OLD_BD, LIVEFISH),
+ HCA(MELLANOX, SINAI_BD, LIVEFISH),
+ HCA(TOPSPIN, TAVOR_BD, LIVEFISH),
+ HCA(TOPSPIN, ARBEL_BD, LIVEFISH),
+ HCA(TOPSPIN, SINAI_OLD_BD, LIVEFISH),
+ HCA(TOPSPIN, SINAI_BD, LIVEFISH),
};
#define MTHCA_PCI_TABLE_SIZE (sizeof(mthca_pci_table)/sizeof(struct pci_device_id))
}
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , ("%I64d KB of HCA context requires %I64d KB aux memory.\n",
- (unsigned long long) icm_size >> 10,
- (unsigned long long) aux_pages << 2));
+ (u64) icm_size >> 10,
+ (u64) aux_pages << 2));
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, (int)aux_pages,
GFP_HIGHUSER | __GFP_NOWARN);
goto err_uar_table_free;
}
- mdev->kar = ioremap(mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size);
+ mdev->kar = ioremap((io_addr_t)mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size);
if (!mdev->kar) {
HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map kernel access region, "
"aborting.\n"));
/* allocate mdev structure */
mdev = kmalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
- HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, "
+ // can't use HCA_PRINT_EV here !
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, "
"aborting.\n"));
status = STATUS_INSUFFICIENT_RESOURCES;
goto end;
/* set some fields */
RtlZeroMemory(mdev, sizeof *mdev);
mdev->ext = ext; /* pointer to DEVICE OBJECT extension */
+ mdev->hca_type = p_id->driver_data;
+ mdev->ib_dev.mdev = mdev;
+ if (p_id->driver_data == LIVEFISH)
+ mdev->mthca_flags |= MTHCA_FLAG_LIVEFISH;
+ if (mthca_is_livefish(mdev))
+ goto done;
if (ext->hca_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
if (mthca_hca_table[p_id->driver_data].is_memfree)
mdev->mthca_flags |= MTHCA_FLAG_MEMFREE;
if (mthca_hca_table[p_id->driver_data].is_pcie)
mdev->mthca_flags |= MTHCA_FLAG_PCIE;
-
+
//TODO: after we have a FW, capable of reset,
// write a routine, that only presses the button
goto err_cleanup;
}
+ done:
ext->hca.mdev = mdev;
mdev->state = MTHCA_DEV_INITIALIZED;
return 0;
int p;
if (mdev) {
+ if (mthca_is_livefish(mdev))
+ goto done;
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
mthca_cleanup_uar_table(mdev);
mthca_close_hca(mdev);
mthca_cmd_cleanup(mdev);
-
+done:
kfree(mdev);
ext->hca.mdev = NULL;
}
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,
("Allocated/max chunks %d:%d, reserved/max objects %#x:%#x, one/total size %#x:%#x at %I64x \n",
- i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (unsigned long long) virt));
+ i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (u64) virt));
return table;
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,
( "Released chunks %d, objects %#x, one/total size %#x:%#x at %I64x \n",
table->num_icm, table->num_obj, table->obj_size,
- table->num_obj * table->obj_size, (unsigned long long) table->virt));
+ table->num_obj * table->obj_size, (u64) table->virt));
kfree(table);
}
mthca_mpt_access_t access, struct mthca_mr *mr)
{
mr->mtt = NULL;
- return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+ return mthca_mr_alloc(dev, pd, 12, 0, ~0Ui64, access, mr);
}
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
}
mpt_entry.lkey = cl_hton32(key);
- mpt_entry.length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
+ mpt_entry.length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift));
mpt_entry.start = cl_hton64(iova);
__raw_writel((u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
fmr->mem.arbel.mpt->key = cl_hton32(key);
fmr->mem.arbel.mpt->lkey = cl_hton32(key);
- fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
+ fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift));
fmr->mem.arbel.mpt->start = cl_hton64(iova);
wmb();
if (total_size > mem_avail) {
HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Profile requires 0x%I64x bytes; "
"won't in 0x%I64x bytes of context memory.\n",
- (unsigned long long) total_size,
- (unsigned long long) mem_avail));
+ (u64) total_size,
+ (u64) mem_avail));
kfree(profile);
return (u64)-ENOMEM;
}
HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("profile[%2d]--%2d/%2d @ 0x%16I64x "
"(size 0x%8I64x)\n",
i, profile[i].type, profile[i].log_num,
- (unsigned long long) profile[i].start,
- (unsigned long long) profile[i].size));
+ (u64) profile[i].start,
+ (u64) profile[i].size));
}
if (mthca_is_memfree(dev)){
u8 status;
+ RtlZeroMemory(props, sizeof *props);
+
+ if (mthca_is_livefish(mdev)) {
+ props->max_pd = 1;
+ props->vendor_id = mdev->ext->hcaConfig.VendorID;
+ props->vendor_part_id = mdev->ext->hcaConfig.DeviceID;
+ return 0;
+ }
+
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
}
- RtlZeroMemory(props, sizeof *props);
props->fw_ver = mdev->fw_ver;
props->device_cap_flags = mdev->device_cap_flags;
props->vendor_id = cl_ntoh32(*(__be32 *) (out_mad->data + 36)) &
props->vendor_part_id = cl_ntoh16(*(__be16 *) (out_mad->data + 30));
props->hw_ver = cl_ntoh32(*(__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
- props->max_mr_size = ~0ull;
+ props->max_mr_size = ~0Ui64;
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
err = -ENOMEM;
goto err_nomem;
}
+
+ if (mthca_is_livefish(to_mdev(ibdev)))
+ goto done;
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
if (err)
*/
/* map UAR to kernel */
- context->kva = ioremap(context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size);
+ context->kva = ioremap((io_addr_t)context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size);
if (!context->kva) {
HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,("Couldn't map kernel access region, aborting.\n") );
err = -ENOMEM;
goto err_init_user;
}
+done:
err = ib_copy_to_umv_buf(p_umv_buf, &uresp, sizeof uresp);
if (err)
goto err_copy_to_umv_buf;
{
struct mthca_ucontext *mucontext = to_mucontext(context);
+ if (mthca_is_livefish(to_mdev(context->device)))
+ goto done;
mthca_cleanup_user_db_tab(to_mdev(context->device), &mucontext->uar,
mucontext->db_tab);
MmUnmapLockedPages( mucontext->ibucontext.user_uar, mucontext->mdl );
IoFreeMdl(mucontext->mdl);
iounmap(mucontext->kva, PAGE_SIZE);
mthca_uar_free(to_mdev(context->device), &mucontext->uar);
+done:
kfree(mucontext);
return 0;
goto err_mem;
}
+ if (mthca_is_livefish(to_mdev(ibdev)))
+ goto done;
+
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
if (err) {
goto err_pd_alloc;
}
+done:
if (p_umv_buf && p_umv_buf->command) {
resp.pd_handle = (u64)(UINT_PTR)pd;
resp.pdn = pd->pd_num;
int mthca_dealloc_pd(struct ib_pd *pd)
{
+ if (mthca_is_livefish(to_mdev(pd->device)))
+ goto done;
+
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
- kfree(pd);
+done:
+ kfree(pd);
return 0;
}
/* Find largest page shift we can use to cover buffers */
for (shift = PAGE_SHIFT; shift < 31; ++shift)
if (num_phys_buf > 1) {
- if ((1ULL << shift) & mask)
+ if ((1Ui64 << shift) & mask)
break;
} else {
- if (1ULL << shift >=
+ if (1Ui64 << shift >=
buffer_list[0].size +
- (buffer_list[0].addr & ((1ULL << shift) - 1)))
+ (buffer_list[0].addr & ((1Ui64 << shift) - 1)))
break;
}
- buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
- buffer_list[0].addr &= ~0ull << shift;
+ buffer_list[0].size += buffer_list[0].addr & ((1Ui64 << shift) - 1);
+ buffer_list[0].addr &= ~0Ui64 << shift;
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
npages = 0;
for (i = 0; i < num_phys_buf; ++i)
- npages += (int)((buffer_list[i].size + (1ULL << shift) - 1) >> shift);
+ npages += (int)((buffer_list[i].size + (1Ui64 << shift) - 1) >> shift);
if (!npages)
return &mr->ibmr;
n = 0;
for (i = 0; i < num_phys_buf; ++i)
for (j = 0;
- j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
+ j < (buffer_list[i].size + (1Ui64 << shift) - 1) >> shift;
++j)
page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Registering memory at %I64x (iova %I64x) "
"in PD %x; shift %d, npages %d.\n",
- (unsigned long long) buffer_list[0].addr,
- (unsigned long long) *iova_start,
+ (u64) buffer_list[0].addr,
+ (u64) *iova_start,
to_mpd(pd)->pd_num,
shift, npages));
int err;
SPIN_LOCK_PREP(lh);
- err = mthca_set_qp_size(dev, cap, pd, qp);
- if (err)
- return err;
-
switch (type) {
case IB_QPT_RELIABLE_CONN: qp->transport = RC; break;
case IB_QPT_UNRELIABLE_CONN: qp->transport = UC; break;
default: return -EINVAL;
}
+ err = mthca_set_qp_size(dev, cap, pd, qp);
+ if (err)
+ return err;
+
qp->qpn = mthca_alloc(&dev->qp_table.alloc);
if (qp->qpn == -1)
return -ENOMEM;
RtlZeroMemory(context, sizeof *context);
- context->wqe_base_ds = CPU_2_BE64(1ULL << (srq->wqe_shift - 4));
+ context->wqe_base_ds = CPU_2_BE64(1Ui64 << (srq->wqe_shift - 4));
context->state_pd = cl_hton32(pd->pd_num);
context->lkey = cl_hton32(srq->mr.ibmr.lkey);
uint8_t reserved[3];
};
-struct __ibv_ah {
- uint64_t user_handle;
- int use_mr;
-};
-
struct ibv_create_ah {
- struct __ibv_ah;
+ uint64_t user_handle;
struct ibv_reg_mr mr;
};
struct ibv_create_ah_resp {
- struct __ibv_ah;
+ uint64_t user_handle;
uint64_t start;
struct ibv_reg_mr_resp mr;
};
struct ibv_ah_attr attr;\r
struct ibv_create_ah *p_create_av;\r
ib_api_status_t status = IB_SUCCESS;\r
- int AV_created = TRUE;\r
size_t size = max( sizeof(struct ibv_create_ah), sizeof(struct ibv_create_ah_resp) );\r
mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
\r
// try to create AV\r
err = mthca_alloc_av(to_mpd(p_pd->ibv_pd), &attr, ah, NULL);\r
- if (err == -EAGAIN) \r
- AV_created = FALSE;\r
- else\r
if (err) {\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av failed (%d)\n", err));\r
status = errno_to_iberr(err);\r
p_umv_buf->command = TRUE;\r
p_create_av = (struct ibv_create_ah *)p_umv_buf->p_inout_buf;\r
p_create_av->user_handle = (uint64_t)(ULONG_PTR)ah;\r
- if (!AV_created) {\r
+ if (ah->in_kernel) {\r
struct mthca_ah_page *page = ah->page;\r
p_create_av->mr.start = (uint64_t)(ULONG_PTR)page->buf;\r
p_create_av->mr.length = g_page_size;\r
p_create_av->mr.pd_handle = p_pd->ibv_pd->handle;\r
p_create_av->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn;\r
p_create_av->mr.access_flags = 0; //local read\r
- p_create_av->use_mr = TRUE;\r
status = IB_SUCCESS;\r
}\r
else\r
\r
if (!mthca_is_memfree(p_pd->ibv_pd->context)) {\r
page = ah->page;\r
- if (p_resp->use_mr) {\r
+ if (ah->in_kernel) {\r
// fill mr parameters\r
page->mr.handle = p_resp->mr.mr_handle;\r
page->mr.lkey = p_resp->mr.lkey;\r
mlnx_pre_destroy_av (\r
IN const ib_av_handle_t h_uvp_av)\r
{\r
+ ib_api_status_t status ;\r
+ struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av;\r
UVP_ENTER(UVP_DBG_AV);\r
+ if (mthca_ah->in_kernel)\r
+ status = IB_SUCCESS;\r
+ else\r
+ status = IB_VERBS_PROCESSING_DONE;\r
UVP_EXIT(UVP_DBG_AV);\r
- return IB_SUCCESS;\r
+ return status;\r
}\r
\r
void\r
{\r
ib_api_status_t status = ioctl_status;\r
mlnx_ual_hobul_t *new_ca;\r
- struct ibv_get_context_resp *resp_p;\r
+ struct ibv_get_context_resp *p_resp;\r
struct ibv_context * ibvcontext;\r
int err;\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
+ p_resp = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf;\r
+\r
if (IB_SUCCESS == status) {\r
/* allocate ibv context */\r
- resp_p = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf;\r
- ibvcontext = mthca_alloc_context(resp_p);\r
+ ibvcontext = mthca_alloc_context(p_resp);\r
if (IS_ERR(ibvcontext)) {\r
err = PTR_ERR(ibvcontext);\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,("mthca_alloc_context failed (%d)\n", err));\r
\r
err_memory: \r
err_alloc_context:\r
- cl_free( resp_p );\r
+ if (p_resp)\r
+ cl_free( p_resp );\r
UVP_EXIT(UVP_DBG_SHIM);\r
return status;\r
}\r
size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+ ib_ca_attr_t *p_hca_attr = p_hobul->p_hca_attr;\r
\r
UVP_ENTER(UVP_DBG_QP);\r
\r
+ /* sanity checks */\r
+ if(p_create_attr->sq_depth > p_hca_attr->max_wrs ||p_create_attr->rq_depth > p_hca_attr->max_wrs )\r
+ status = IB_INVALID_MAX_WRS;\r
+ else \r
+ if(p_create_attr->sq_sge> p_hca_attr->max_sges ||p_create_attr->rq_sge> p_hca_attr->max_sges )\r
+ status = IB_INVALID_MAX_SGE;\r
+ if (status)\r
+ goto err_params;\r
+\r
CL_ASSERT(p_umv_buf);\r
\r
if( !p_umv_buf->p_inout_buf )\r
p_umv_buf->command = TRUE;\r
\r
/* convert attributes */\r
- attr.send_cq = p_create_attr->h_sq_cq->ibv_cq;\r
- attr.recv_cq = p_create_attr->h_rq_cq->ibv_cq;\r
- attr.srq = NULL; /* absent in IBAL */\r
+ attr.send_cq = p_create_attr->h_sq_cq->ibv_cq;\r
+ attr.recv_cq = p_create_attr->h_rq_cq->ibv_cq;\r
+ attr.srq = NULL; /* absent in IBAL */\r
attr.cap.max_send_wr = p_create_attr->sq_depth;\r
attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
attr.cap.max_send_sge = p_create_attr->sq_sge;\r
if (IS_ERR(ibv_qp)) {\r
err = PTR_ERR(ibv_qp);\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_create_qp_pre failed (%d)\n", err));\r
- //fix return values\r
- if(err == -EINVAL && (attr.cap.max_send_wr > 65536 ||attr.cap.max_recv_wr > 65536 ))\r
- status = IB_INVALID_MAX_WRS;\r
- else if(err == -EINVAL && (attr.cap.max_send_sge> 64 ||attr.cap.max_recv_sge> 64 ))\r
- status = IB_INVALID_MAX_SGE;\r
- else if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| \r
+ if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| \r
attr.cap.max_send_wr == 0 || attr.cap.max_recv_wr == 0))\r
status = IB_INVALID_SETTING;\r
else\r
status = errno_to_iberr(err);\r
-\r
goto err_alloc_qp;\r
}\r
\r
err_alloc_qp:\r
cl_free(p_umv_buf->p_inout_buf);\r
err_memory:\r
+err_params:\r
end:\r
UVP_EXIT(UVP_DBG_QP);\r
return status;\r
{\r
memset( &attr, 0, sizeof(attr));\r
attr.qp_state = p_resp->qp_state;\r
- if (p_qp_info->ibv_qp)\r
+ if (p_qp_info->ibv_qp) {\r
err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.modify_qp(\r
h_uvp_qp->ibv_qp, &attr, p_resp->attr_mask);\r
- if (err) {\r
- UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
- status = errno_to_iberr(err);\r
- goto err_modify_qp;\r
+ if (err) {\r
+ UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_modify_qp;\r
+ }\r
}\r
UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_SHIM ,\r
("Committed to modify QP to state %d\n", p_resp->qp_state));\r
#define PCI_VENDOR_ID_TOPSPIN 0x1867
#endif
+/* live fishes */
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD 0x5a45
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD 0x6279
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD 0x5e8d
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_BD 0x6275
+#endif
+
+
#define HCA(v, d, t) \
{ PCI_VENDOR_ID_##v, PCI_DEVICE_ID_MELLANOX_##d, MTHCA_##t }
HCA( TOPSPIN, ARBEL, ARBEL),
HCA( TOPSPIN, SINAI_OLD, ARBEL),
HCA( TOPSPIN, SINAI, ARBEL),
+ // live fishes
+ HCA(MELLANOX, TAVOR_BD, LIVEFISH),
+ HCA(MELLANOX, ARBEL_BD, LIVEFISH),
+ HCA(MELLANOX, SINAI_OLD_BD, LIVEFISH),
+ HCA(MELLANOX, SINAI_BD, LIVEFISH),
+ HCA(TOPSPIN, TAVOR_BD, LIVEFISH),
+ HCA(TOPSPIN, ARBEL_BD, LIVEFISH),
+ HCA(TOPSPIN, SINAI_OLD_BD, LIVEFISH),
+ HCA(TOPSPIN, SINAI_BD, LIVEFISH),
};
static struct ibv_context_ops mthca_ctx_ops = {
enum mthca_hca_type {
MTHCA_TAVOR,
- MTHCA_ARBEL
+ MTHCA_ARBEL,
+ MTHCA_LIVEFISH
};
enum {
ib_pd_handle_t h_uvp_pd;
struct mthca_ah_page *page;
uint32_t key;
+ int in_kernel;
};
struct mthca_ah_page {
int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
struct mthca_ah *ah, struct ibv_create_ah_resp *resp)
{
- int added_page = FALSE;
if (mthca_is_memfree(pd->ibv_pd.context)) {
ah->av = cl_malloc(sizeof *ah->av);
if (!ah->av)
ReleaseMutex( pd->ah_mutex );
return -ENOMEM;
}
- added_page = TRUE;
+ ah->in_kernel = TRUE;
found:
++page->use_cnt;
/* Arbel workaround -- low byte of GID must be 2 */
ah->av->dgid[3] = cl_hton32(2);
}
-
- if (added_page)
- return -EAGAIN;
- else
- return 0;
+ return 0;
}
void mthca_free_av(struct mthca_ah *ah)
page = ah->page;
i = ((uint8_t *)ah->av - (uint8_t *)page->buf) / sizeof *ah->av;
page->free[i / (8 * sizeof (int))] |= 1 << (i % (8 * sizeof (int)));
-
- if (!--page->use_cnt) {
- if (page->prev)
- page->prev->next = page->next;
- else
- pd->ah_list = page->next;
- if (page->next)
- page->next->prev = page->prev;
-
-#ifdef NOT_USE_VIRTUAL_ALLOC
- cl_free(page->buf);
-#else
- VirtualFree( page->buf, 0, MEM_RELEASE);
-#endif
-
- cl_free(page);
- }
-
+ --page->use_cnt;
ReleaseMutex( pd->ah_mutex );
}
}
//NB: temporary, for support of modify_qp
-void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
+void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
{
struct mthca_av *av = ah_p->av;
mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah_p->h_uvp_pd;
((struct mthca_next_seg *) wqe)->flags =
((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
- ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+ ((wr->send_opt & IB_SEND_OPT_SOLICITED) ?
cl_hton32(MTHCA_NEXT_SOLICIT) : 0) |
cl_hton32(1);
if (opcode == MTHCA_OPCODE_SEND_IMM||
int mthca_free_pd(struct ibv_pd *ibv_pd)
{
struct mthca_pd *pd = to_mpd(ibv_pd);
- if (!mthca_is_memfree(ibv_pd->context))
+ if (!mthca_is_memfree(ibv_pd->context)) {
+ struct mthca_ah_page *page, *next_page;
+ WaitForSingleObject( pd->ah_mutex, INFINITE );
+ for (page = pd->ah_list; page; page = next_page) {
+ next_page = page->next;
+ #ifdef NOT_USE_VIRTUAL_ALLOC
+ cl_free(page->buf);
+ #else
+ VirtualFree( page->buf, 0, MEM_RELEASE);
+ #endif
+ cl_free(page);
+ }
+ ReleaseMutex( pd->ah_mutex );
CloseHandle(pd->ah_mutex);
+ }
cl_free(pd);
return 0;
}
{
struct mthca_qp *qp;
struct ibv_context *context = pd->context;
- int ret;
+ int ret = -ENOMEM;
UVP_ENTER(UVP_DBG_QP);
/* Sanity check QP size before proceeding */
qp = cl_malloc(sizeof *qp);
if (!qp) {
- ret = -ENOMEM;
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc failed (%d)\n",ret));
goto err_nomem;
}
qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
- ret = -ENOMEM;
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf failed (%d)\n",ret));
goto err_nomem;
}